code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import pickle
import numpy as np
from tqdm.auto import tqdm
import moses
from moses import CharVocab
class NGram:
def __init__(self, max_context_len=10, verbose=False):
self.max_context_len = max_context_len
self._dict = dict()
self.vocab = None
self.default_probs = None
self.zero_probs = None
self.verbose = verbose
def fit(self, data):
self.vocab = CharVocab.from_data(data)
self.default_probs = np.hstack([np.ones(len(self.vocab)-4),
np.array([0., 1., 0., 0.])])
self.zero_probs = np.zeros(len(self.vocab))
if self.verbose:
print('fitting...')
data = tqdm(data, total=len(data))
for line in data:
t_line = tuple(self.vocab.string2ids(line, True, True))
for i in range(len(t_line)):
for shift in range(self.max_context_len):
if i + shift + 1 >= len(t_line):
break
context = t_line[i:i+shift+1]
cid = t_line[i+shift+1]
probs = self._dict.get(context, self.zero_probs.copy())
probs[cid] += 1.
self._dict[context] = probs
def fit_update(self, data):
if self.verbose:
print('fitting...')
data = tqdm(data, total=len(data))
for line in data:
t_line = tuple(self.vocab.string2ids(line, True, True))
for i in range(len(t_line)):
for shift in range(self.max_context_len):
if i + shift + 1 >= len(t_line):
break
context = t_line[i:i+shift+1]
cid = t_line[i+shift+1]
probs = self._dict.get(context, self.zero_probs.copy())
probs[cid] += 1.
self._dict[context] = probs
def generate_one(self, l_smooth=0.01, context_len=None, max_len=100):
if self.vocab is None:
raise RuntimeError('Error: Fit the model before generating')
if context_len is None:
context_len = self.max_context_len
elif context_len <= 0 or context_len > self.max_context_len:
context_len = self.max_context_len
res = [self.vocab.bos]
while res[-1] != self.vocab.eos and len(res) < max_len:
begin_index = max(len(res)-context_len, 0)
context = tuple(res[begin_index:])
while context not in self._dict:
context = context[1:]
probs = self._dict[context]
smoothed = probs + self.default_probs*l_smooth
normed = smoothed / smoothed.sum()
next_symbol = np.random.choice(len(self.vocab), p=normed)
res.append(next_symbol)
return self.vocab.ids2string(res)
def nll(self, smiles, l_smooth=0.01, context_len=None):
if self.vocab is None:
raise RuntimeError('Error: model is not trained')
if context_len is None:
context_len = self.max_context_len
elif context_len <= 0 or context_len > self.max_context_len:
context_len = self.max_context_len
tokens = tuple(self.vocab.string2ids(smiles, True, True))
likelihood = 0.
for i in range(1, len(tokens)):
begin_index = max(i-context_len, 0)
context = tokens[begin_index:i]
while context not in self._dict:
context = context[1:]
probs = self._dict[context] + self.default_probs
normed = probs / probs.sum()
prob = normed[tokens[i]]
if prob == 0.:
return np.inf
likelihood -= np.log(prob)
return likelihood
def generate(self, n, l_smooth=0.01, context_len=None, max_len=100):
generator = (self.generate_one(l_smooth,
context_len,
max_len) for i in range(n))
if self.verbose:
print('generating...')
generator = tqdm(generator, total=n)
return list(generator)
def save(self, path):
"""
Saves a model using pickle
Arguments:
path: path to .pkl file for saving
"""
if self.vocab is None:
raise RuntimeError("Can't save empty model."
" Fit the model first")
data = {
'_dict': self._dict,
'vocab': self.vocab,
'default_probs': self.default_probs,
'zero_probs': self.zero_probs,
'max_context_len': self.max_context_len
}
with open(path, 'wb') as f:
pickle.dump(data, f)
@classmethod
def load(cls, path):
"""
Loads saved model
Arguments:
path: path to saved .pkl file
Returns:
Loaded NGramGenerator
"""
with open(path, "rb") as f:
data = pickle.load(f)
model = cls()
model._dict = data['_dict']
model.vocab = data['vocab']
model.default_probs = data['default_probs']
model.zero_probs = data['zero_probs']
model.max_context_len = data['max_context_len']
return model
def reproduce(seed, samples_path=None, metrics_path=None,
n_jobs=1, device='cpu', verbose=False,
samples=30000):
data = moses.get_dataset('train')
model = NGram(10, verbose=verbose)
model.fit(data)
np.random.seed(seed)
smiles = model.generate(samples, l_smooth=0.01)
metrics = moses.get_all_metrics(smiles, n_jobs=n_jobs, device=device)
if samples_path is not None:
with open(samples_path, 'w') as out:
out.write('SMILES\n')
for s in smiles:
out.write(s+'\n')
if metrics_path is not None:
with open(metrics_path, 'w') as out:
for key, value in metrics.items():
out.write("%s,%f\n" % (key, value))
return smiles, metrics
| [
"pickle.dump",
"moses.CharVocab.from_data",
"numpy.log",
"moses.get_dataset",
"pickle.load",
"numpy.array",
"numpy.random.seed",
"tqdm.auto.tqdm",
"moses.get_all_metrics"
] | [((5491, 5517), 'moses.get_dataset', 'moses.get_dataset', (['"""train"""'], {}), "('train')\n", (5508, 5517), False, 'import moses\n'), ((5581, 5601), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5595, 5601), True, 'import numpy as np\n'), ((5668, 5727), 'moses.get_all_metrics', 'moses.get_all_metrics', (['smiles'], {'n_jobs': 'n_jobs', 'device': 'device'}), '(smiles, n_jobs=n_jobs, device=device)\n', (5689, 5727), False, 'import moses\n'), ((419, 444), 'moses.CharVocab.from_data', 'CharVocab.from_data', (['data'], {}), '(data)\n', (438, 444), False, 'from moses import CharVocab\n'), ((3769, 3781), 'numpy.log', 'np.log', (['prob'], {}), '(prob)\n', (3775, 3781), True, 'import numpy as np\n'), ((4135, 4159), 'tqdm.auto.tqdm', 'tqdm', (['generator'], {'total': 'n'}), '(generator, total=n)\n', (4139, 4159), False, 'from tqdm.auto import tqdm\n'), ((4771, 4791), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (4782, 4791), False, 'import pickle\n'), ((5052, 5066), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5063, 5066), False, 'import pickle\n'), ((553, 583), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 0.0]'], {}), '([0.0, 1.0, 0.0, 0.0])\n', (561, 583), True, 'import numpy as np\n')] |
import numpy
from matplotlib import pyplot
from surrogates.kernels import MCMCSimulation
from surrogates.kernels.samplers.hmc import Hamiltonian
from surrogates.models.simple import UnconditionedModel
from surrogates.utils.distributions import Normal
from surrogates.utils.file import change_directory
from surrogates.utils.plotting import plot_corner, plot_log_p, plot_trace
def main():
std_devs = {"a": 0.05, "b": 50.0, "c": 5000.0}
priors = {
"a": Normal(numpy.array([0.0]), numpy.array([std_devs["a"]])),
"b": Normal(numpy.array([100.0]), numpy.array([std_devs["b"]])),
"c": Normal(numpy.array([0.0]), numpy.array([std_devs["c"]])),
}
model = UnconditionedModel(priors)
# Construct and run the simulation object.
initial_parameters = {
"a": numpy.array([0.0]),
"b": numpy.array([0.0]),
"c": numpy.array([0.0]),
}
# Setup the sampler
sampler = Hamiltonian(
model,
momentum_scales={
"a": numpy.array([1.0 / std_devs["a"]]),
"b": numpy.array([1.0 / std_devs["b"]]),
"c": numpy.array([1.0 / std_devs["c"]]),
},
step_size=1.0,
n_steps=10,
)
# Run the simulation.
with change_directory("3d_univariate"):
simulation = MCMCSimulation(
model, initial_parameters, sampler=sampler, random_seed=42
)
simulation.run(2000, 20000)
# Plot the output.
trace_figure = plot_trace(simulation.trace, show=False)
trace_figure.savefig("trace.png")
pyplot.close(trace_figure)
corner_figure = plot_corner(
simulation.trace, model.trainable_parameters, show=False
)
corner_figure.savefig("corner.png")
pyplot.close(corner_figure)
log_p_figure = plot_log_p(simulation.log_p_trace, show=False)
log_p_figure.savefig("log_p.png")
pyplot.close(log_p_figure)
for label in std_devs:
print(
f"{label}: std estimated={numpy.std(simulation.trace[label])} "
f"real={std_devs[label]}"
)
print(
f"{label}: mean estimated={numpy.mean(simulation.trace[label])} real={0.0}"
)
if __name__ == "__main__":
main()
| [
"numpy.mean",
"surrogates.utils.file.change_directory",
"surrogates.models.simple.UnconditionedModel",
"surrogates.utils.plotting.plot_trace",
"matplotlib.pyplot.close",
"numpy.array",
"surrogates.utils.plotting.plot_corner",
"surrogates.utils.plotting.plot_log_p",
"surrogates.kernels.MCMCSimulation... | [((692, 718), 'surrogates.models.simple.UnconditionedModel', 'UnconditionedModel', (['priors'], {}), '(priors)\n', (710, 718), False, 'from surrogates.models.simple import UnconditionedModel\n'), ((807, 825), 'numpy.array', 'numpy.array', (['[0.0]'], {}), '([0.0])\n', (818, 825), False, 'import numpy\n'), ((840, 858), 'numpy.array', 'numpy.array', (['[0.0]'], {}), '([0.0])\n', (851, 858), False, 'import numpy\n'), ((873, 891), 'numpy.array', 'numpy.array', (['[0.0]'], {}), '([0.0])\n', (884, 891), False, 'import numpy\n'), ((1247, 1280), 'surrogates.utils.file.change_directory', 'change_directory', (['"""3d_univariate"""'], {}), "('3d_univariate')\n", (1263, 1280), False, 'from surrogates.utils.file import change_directory\n'), ((1304, 1378), 'surrogates.kernels.MCMCSimulation', 'MCMCSimulation', (['model', 'initial_parameters'], {'sampler': 'sampler', 'random_seed': '(42)'}), '(model, initial_parameters, sampler=sampler, random_seed=42)\n', (1318, 1378), False, 'from surrogates.kernels import MCMCSimulation\n'), ((1488, 1528), 'surrogates.utils.plotting.plot_trace', 'plot_trace', (['simulation.trace'], {'show': '(False)'}), '(simulation.trace, show=False)\n', (1498, 1528), False, 'from surrogates.utils.plotting import plot_corner, plot_log_p, plot_trace\n'), ((1579, 1605), 'matplotlib.pyplot.close', 'pyplot.close', (['trace_figure'], {}), '(trace_figure)\n', (1591, 1605), False, 'from matplotlib import pyplot\n'), ((1631, 1700), 'surrogates.utils.plotting.plot_corner', 'plot_corner', (['simulation.trace', 'model.trainable_parameters'], {'show': '(False)'}), '(simulation.trace, model.trainable_parameters, show=False)\n', (1642, 1700), False, 'from surrogates.utils.plotting import plot_corner, plot_log_p, plot_trace\n'), ((1775, 1802), 'matplotlib.pyplot.close', 'pyplot.close', (['corner_figure'], {}), '(corner_figure)\n', (1787, 1802), False, 'from matplotlib import pyplot\n'), ((1827, 1873), 'surrogates.utils.plotting.plot_log_p', 'plot_log_p', (['simulation.log_p_trace'], {'show': '(False)'}), '(simulation.log_p_trace, show=False)\n', (1837, 1873), False, 'from surrogates.utils.plotting import plot_corner, plot_log_p, plot_trace\n'), ((1924, 1950), 'matplotlib.pyplot.close', 'pyplot.close', (['log_p_figure'], {}), '(log_p_figure)\n', (1936, 1950), False, 'from matplotlib import pyplot\n'), ((479, 497), 'numpy.array', 'numpy.array', (['[0.0]'], {}), '([0.0])\n', (490, 497), False, 'import numpy\n'), ((499, 527), 'numpy.array', 'numpy.array', (["[std_devs['a']]"], {}), "([std_devs['a']])\n", (510, 527), False, 'import numpy\n'), ((550, 570), 'numpy.array', 'numpy.array', (['[100.0]'], {}), '([100.0])\n', (561, 570), False, 'import numpy\n'), ((572, 600), 'numpy.array', 'numpy.array', (["[std_devs['b']]"], {}), "([std_devs['b']])\n", (583, 600), False, 'import numpy\n'), ((623, 641), 'numpy.array', 'numpy.array', (['[0.0]'], {}), '([0.0])\n', (634, 641), False, 'import numpy\n'), ((643, 671), 'numpy.array', 'numpy.array', (["[std_devs['c']]"], {}), "([std_devs['c']])\n", (654, 671), False, 'import numpy\n'), ((1009, 1043), 'numpy.array', 'numpy.array', (["[1.0 / std_devs['a']]"], {}), "([1.0 / std_devs['a']])\n", (1020, 1043), False, 'import numpy\n'), ((1062, 1096), 'numpy.array', 'numpy.array', (["[1.0 / std_devs['b']]"], {}), "([1.0 / std_devs['b']])\n", (1073, 1096), False, 'import numpy\n'), ((1115, 1149), 'numpy.array', 'numpy.array', (["[1.0 / std_devs['c']]"], {}), "([1.0 / std_devs['c']])\n", (1126, 1149), False, 'import numpy\n'), ((2033, 2067), 'numpy.std', 'numpy.std', (['simulation.trace[label]'], {}), '(simulation.trace[label])\n', (2042, 2067), False, 'import numpy\n'), ((2173, 2208), 'numpy.mean', 'numpy.mean', (['simulation.trace[label]'], {}), '(simulation.trace[label])\n', (2183, 2208), False, 'import numpy\n')] |
import numpy as np
def gaussian_band(wn, A, s, m):
return A/s*np.sqrt(2*np.pi)*np.exp(-(wn-m)**2/2/s**2)
def lorentzian_band(wn, A, w, m):
return A /(1 + (wn - m)**2/w**2)/(w*np.pi)
def band(wn, band_params):
if band_params[0] == "gauss":
return gaussian_band(wn, *band_params[1:])
elif band_params[0] == "lorentz":
return lorentzian_band(wn, *band_params[1:])
else:
raise ArgumentError('Unknown band {}'.format(band_params[0]))
def spectrum(wn, band_params, noise_level=0):
spec = np.zeros_like(wn)
for band_param in band_params:
spec = spec + band(wn, band_param)
if noise_level > 0:
spec = spec + noise_level * np.random.randn(*spec.shape)
return spec
| [
"numpy.exp",
"numpy.sqrt",
"numpy.random.randn",
"numpy.zeros_like"
] | [((540, 557), 'numpy.zeros_like', 'np.zeros_like', (['wn'], {}), '(wn)\n', (553, 557), True, 'import numpy as np\n'), ((85, 120), 'numpy.exp', 'np.exp', (['(-(wn - m) ** 2 / 2 / s ** 2)'], {}), '(-(wn - m) ** 2 / 2 / s ** 2)\n', (91, 120), True, 'import numpy as np\n'), ((68, 86), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (75, 86), True, 'import numpy as np\n'), ((696, 724), 'numpy.random.randn', 'np.random.randn', (['*spec.shape'], {}), '(*spec.shape)\n', (711, 724), True, 'import numpy as np\n')] |
import numpy as np
import torch
def stocks_train(num_training, trainprocess, algorithm, encoder=False):
if encoder:
filenames_encoder = []
filenames_head = []
for i in range(num_training):
filename_encoder = '{}_encoder{}.pt'.format(algorithm, i)
filename_head = '{}_head{}.pt'.format(algorithm, i)
filenames_encoder.append(filename_encoder)
filenames_head.append(filename_head)
trainprocess(filename_encoder, filename_head)
return filenames_encoder, filenames_head
else:
filenames = []
for i in range(num_training):
filename = '{}{}.pt'.format(algorithm, i)
filenames.append(filename)
trainprocess(filename)
return filenames
def stocks_test(num_training, models, noise_range, testprocess, encoder=False):
loss = []
print("Robustness testing:")
if encoder:
encoders = models[0]
heads = models[1]
for i in range(num_training):
encoder = torch.load(encoders[i]).cuda()
head = torch.load(heads[i]).cuda()
loss_tmp = []
for noise_level in range(noise_range):
print("Noise level {}: ".format(noise_level/10))
loss_tmp.append(testprocess(encoder, head, noise_level))
loss.append(np.array(loss_tmp))
else:
for i in range(num_training):
model = torch.load(models[i]).cuda()
loss_tmp = []
for noise_level in range(noise_range):
print("Noise level {}: ".format(noise_level/10))
loss_tmp.append(testprocess(model, noise_level))
loss.append(np.array(loss_tmp))
print("Standard deviation:", list(np.std(np.array(loss), axis=0)))
print("Average loss of different noise levels:", list(np.mean(np.array(loss), axis=0)))
def general_train(trainprocess, algorithm, encoder=False):
if encoder:
filename_encoder = "{}_encoder.pt".format(algorithm)
filename_head = "{}_head.pt".format(algorithm)
trainprocess(filename_encoder, filename_head)
return filename_encoder, filename_head
else:
filename = "{}_best.pt".format(algorithm)
trainprocess(filename)
return filename
def general_test(testprocess, filename, robustdatasets, encoder=False, multi_measure=False):
measures = []
for robustdata in robustdatasets:
measure = []
if encoder:
encoder = torch.load(filename[0]).cuda()
head = torch.load(filename[1]).cuda()
print("Robustness testing:")
for noise_level in range(len(robustdata)):
print("Noise level {}: ".format(noise_level/10))
measure.append(testprocess(encoder, head, robustdata[noise_level]))
else:
model = torch.load(filename).cuda()
print("Robustness testing:")
for noise_level in range(len(robustdata)):
print("Noise level {}: ".format(noise_level/10))
measure.append(testprocess(model, robustdata[noise_level]))
if multi_measure:
result = []
for i in range(len(measure[0])):
result.append([x[i] for x in measure])
measure = result
measures.append(measure)
print("Different noise levels:", measure)
# print("Different noise levels:", list(np.mean(np.array(measures), axis=0)))
| [
"numpy.array",
"torch.load"
] | [((1363, 1381), 'numpy.array', 'np.array', (['loss_tmp'], {}), '(loss_tmp)\n', (1371, 1381), True, 'import numpy as np\n'), ((1711, 1729), 'numpy.array', 'np.array', (['loss_tmp'], {}), '(loss_tmp)\n', (1719, 1729), True, 'import numpy as np\n'), ((1776, 1790), 'numpy.array', 'np.array', (['loss'], {}), '(loss)\n', (1784, 1790), True, 'import numpy as np\n'), ((1868, 1882), 'numpy.array', 'np.array', (['loss'], {}), '(loss)\n', (1876, 1882), True, 'import numpy as np\n'), ((1046, 1069), 'torch.load', 'torch.load', (['encoders[i]'], {}), '(encoders[i])\n', (1056, 1069), False, 'import torch\n'), ((1096, 1116), 'torch.load', 'torch.load', (['heads[i]'], {}), '(heads[i])\n', (1106, 1116), False, 'import torch\n'), ((1451, 1472), 'torch.load', 'torch.load', (['models[i]'], {}), '(models[i])\n', (1461, 1472), False, 'import torch\n'), ((2517, 2540), 'torch.load', 'torch.load', (['filename[0]'], {}), '(filename[0])\n', (2527, 2540), False, 'import torch\n'), ((2567, 2590), 'torch.load', 'torch.load', (['filename[1]'], {}), '(filename[1])\n', (2577, 2590), False, 'import torch\n'), ((2877, 2897), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (2887, 2897), False, 'import torch\n')] |
import tensorflow as tf
import h5py
import collections
import six
from syft.workers.websocket_server import WebsocketServerWorker
import torch
import sys
import syft
import sys
import argparse
from torchvision import datasets
from torchvision import transforms
import numpy as np
import tensorflow_federated as tff
import os.path
from tensorflow_federated.python.simulation.hdf5_client_data import HDF5ClientData
fractionToUse = 5
class TrainDataset:
def __init__(self,transform=None, id="h2"):
fileprefix = "fed_emnist_digitsonly"
dir_path = os.path.dirname("/home/mininet/")
train = HDF5ClientData(os.path.join(dir_path, fileprefix + '_train.h5'))
trainFile = h5py.File(os.path.join(dir_path, fileprefix + '_train.h5'), "r")
_EXAMPLES_GROUP = "examples"
numberofclients = len(train.client_ids)
data = np.empty((0,28,28), np.float32)
target = np.empty((0), np.int_)
if id == "h2":
offset = 0
elif id == "h3":
offset = 1
elif id == "h4":
offset = 2
for i in range(int(numberofclients/(3*fractionToUse))):
clientdataset = collections.OrderedDict((name, ds[()]) for name, ds in sorted(
six.iteritems(trainFile[HDF5ClientData._EXAMPLES_GROUP][train.client_ids[i*3*fractionToUse+offset]])))
data = np.concatenate((data, clientdataset['pixels']))
target = np.concatenate((target, clientdataset['label']), axis=0)
self.target = list(target)
self.data = list(data)
self.transform = transform
def __getitem__(self, index):
x=self.data[index]
y=self.target[index]
if self.transform:
x = self.transform(x)
return x,y
def __len__(self):
return len(self.target)
def main(_id,ip):
mnist_dataset = TrainDataset(transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
(0.1307,), (0.3081,))
]), id=_id)
hook = syft.TorchHook(torch)
server = WebsocketServerWorker(id = _id,host =ip, port = 8778,hook=hook,verbose=True)
print ("Worker:{}, Dataset contains {}".format(_id,str(len(mnist_dataset.data))))
dataset = syft.BaseDataset(
data=mnist_dataset.data, targets=mnist_dataset.target, transform=mnist_dataset.transform
)
key = "targeted"
server.add_dataset(dataset, key=key)
server.start()
main (sys.argv[1], sys.argv[2])
| [
"syft.BaseDataset",
"syft.workers.websocket_server.WebsocketServerWorker",
"numpy.empty",
"syft.TorchHook",
"numpy.concatenate",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor",
"six.iteritems"
] | [((2030, 2051), 'syft.TorchHook', 'syft.TorchHook', (['torch'], {}), '(torch)\n', (2044, 2051), False, 'import syft\n'), ((2065, 2139), 'syft.workers.websocket_server.WebsocketServerWorker', 'WebsocketServerWorker', ([], {'id': '_id', 'host': 'ip', 'port': '(8778)', 'hook': 'hook', 'verbose': '(True)'}), '(id=_id, host=ip, port=8778, hook=hook, verbose=True)\n', (2086, 2139), False, 'from syft.workers.websocket_server import WebsocketServerWorker\n'), ((2242, 2352), 'syft.BaseDataset', 'syft.BaseDataset', ([], {'data': 'mnist_dataset.data', 'targets': 'mnist_dataset.target', 'transform': 'mnist_dataset.transform'}), '(data=mnist_dataset.data, targets=mnist_dataset.target,\n transform=mnist_dataset.transform)\n', (2258, 2352), False, 'import syft\n'), ((868, 901), 'numpy.empty', 'np.empty', (['(0, 28, 28)', 'np.float32'], {}), '((0, 28, 28), np.float32)\n', (876, 901), True, 'import numpy as np\n'), ((917, 937), 'numpy.empty', 'np.empty', (['(0)', 'np.int_'], {}), '(0, np.int_)\n', (925, 937), True, 'import numpy as np\n'), ((1375, 1422), 'numpy.concatenate', 'np.concatenate', (["(data, clientdataset['pixels'])"], {}), "((data, clientdataset['pixels']))\n", (1389, 1422), True, 'import numpy as np\n'), ((1444, 1500), 'numpy.concatenate', 'np.concatenate', (["(target, clientdataset['label'])"], {'axis': '(0)'}), "((target, clientdataset['label']), axis=0)\n", (1458, 1500), True, 'import numpy as np\n'), ((1917, 1938), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1936, 1938), False, 'from torchvision import transforms\n'), ((1946, 1988), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (1966, 1988), False, 'from torchvision import transforms\n'), ((1253, 1363), 'six.iteritems', 'six.iteritems', (['trainFile[HDF5ClientData._EXAMPLES_GROUP][train.client_ids[i * 3 *\n fractionToUse + offset]]'], {}), '(trainFile[HDF5ClientData._EXAMPLES_GROUP][train.client_ids[i *\n 3 * fractionToUse + offset]])\n', (1266, 1363), False, 'import six\n')] |
import json
import numpy as np
import matplotlib.pyplot as plt
import os
import yaml
from sklearn.metrics import f1_score, roc_auc_score
from fcos_core.config.paths_catalog import DatasetCatalog
from Data.Preprocess import join_path
def compute_iou(box1, box2):
"""
Compute IoU between two boxes.
box1: [b1_y1, b1_x1, b1_y2, b1_x2]
box2: [b2_y1, b2_x1, b2_y2, b2_x2]
return: float
"""
# Compute intersection
b1_y1, b1_x1, b1_h, b1_w = box1
b2_y1, b2_x1, b2_h, b2_w = box2
b1_y2, b1_x2 = b1_y1+b1_h, b1_x1+b1_w
b2_y2, b2_x2 = b2_y1+b2_h, b2_x1+b2_w
y1 = max(b1_y1, b2_y1)
x1 = max(b1_x1, b2_x1)
y2 = min(b1_y2, b2_y2)
x2 = min(b1_x2, b2_x2)
intersection = max(x2 - x1, 0)*max(y2 - y1, 0)
# Compute unions
b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)
b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)
union = b1_area + b2_area - intersection
iou = intersection / union
return iou
def iou_matrix(boxes1, boxes2):
n = len(boxes1)
m = len(boxes2)
matrix = np.zeros([n, m])
for i in range(n):
for j in range(m):
matrix[i, j] = compute_iou(boxes1[i], boxes2[j])
return matrix
def get_cls_results(gt_boxes, pred_boxes, iou_th):
gt = [x['category_id'] for x in gt_boxes]
gt_bbox = [x['bbox'] for x in gt_boxes]
pred = [x['category_id'] for x in pred_boxes]
pred_bbox = [x['bbox'] for x in pred_boxes]
pred_score = [x['score'] for x in pred_boxes]
matrix = iou_matrix(pred_bbox, gt_bbox) # (n_pred, n_label)
out_label = []
# out_score = []
out_pred = []
# tp
tp_index = np.nonzero(matrix>iou_th)
for i in range(tp_index[0].size):
pred_index = tp_index[0][i]
label_index = tp_index[1][i]
# best pred in duplicated preds
if matrix[pred_index, label_index] == np.max(matrix[:,label_index]).item():
out_label.append(gt[label_index])
# out_score.append(pred_score[tp_index[0][i]])
out_pred.append(pred[pred_index])
# duplicate preds, taken as fp
else:
out_label.append(0)
# out_score.append(pred_score[fp_index[i]])
out_pred.append(pred[pred_index])
# fp
fp_index = np.nonzero(np.max(matrix, axis=1)<=iou_th)
for i in range(fp_index[0].size):
out_label.append(0)
# out_score.append(pred_score[fp_index[i]])
out_pred.append(pred[fp_index[0][i]])
# fn
if len(pred)>0:
fn_index = np.nonzero(np.max(matrix, axis=0)<=iou_th)
for i in range(fn_index[0].size):
out_label.append(gt[fn_index[0][i]])
# out_score.append()
out_pred.append(0)
else:
out_label.extend(gt)
# out_score.append()
out_pred.extend([0,]*len(gt))
return out_label, out_pred
def compute_auc(pred, label, negative_th):
pred = pred/4
label = np.where(label>negative_th, 1, 0)
auc = roc_auc_score(label, pred)
return auc
def confusion_metrix(pred, label, negative_th):
pred = np.array(pred)
label = np.array(label)
f1 = f1_score(label, pred, average='macro')
auc = compute_auc(pred, label, negative_th)
acc_allclass = np.count_nonzero(pred==label)/pred.size
acc_ap1 = np.count_nonzero(np.abs(pred-label)<2)/pred.size
pred = np.where(pred>negative_th, 1, 0)
label = np.where(label>negative_th, 1, 0)
tp = np.count_nonzero(pred*label)
fp = np.count_nonzero(pred*(1-label))
fn = np.count_nonzero((1-pred)*label)
tn = np.count_nonzero((1-pred)*(1-label))
sen = tp/(tp+fn)
ppv = tp/(tp+fp)
spe = tn/(tn+fp)
acc = (tp+tn)/pred.size
print(f'sen:{round(sen,2)}, '
f'ppv:{round(ppv,2)}, '
f'spe:{round(spe,2)}, '
f'acc:{round(acc,2)}, '
f'f1:{round(f1,2)}, '
f'auc:{round(auc,2)}, '
f'acc:{round(acc_allclass,2)}, '
f'acc+-1:{round(acc_ap1,2)}')
def assess(datasets, output_dir, negative_th=1, iou_th=0.3):
for dataset in datasets:
with open(join_path(output_dir, 'inference', dataset, 'bbox.json')) as f:
bbox_json = json.load(f)
data_args = DatasetCatalog().get(dataset)['args']
img_dir = data_args['root']
ann_f = data_args['ann_file']
with open(ann_f) as f:
ann_json = json.load(f)
label = []
pred = []
for img_dict in ann_json['images']:
img_id = img_dict['id']
img_name = img_dict['file_name']
gt_box = [x for x in ann_json['annotations'] if x['image_id'] == img_id]
pred_box = [x for x in bbox_json if x['image_id'] == img_id]
case_label, case_pred = get_cls_results(gt_box, pred_box, iou_th)
label.extend(case_label)
pred.extend(case_pred)
confusion_metrix(pred, label, negative_th)
if __name__ == '__main__':
negative_th = 1
iou_th = 0.3
assess(("jsph_test_coco_style",), '/homes/rqyu/PycharmProjects/FCOS/training_dir/fcos_R_50_FPN_1x/',
negative_th, iou_th)
assess(("jsph_test_coco_style",), '/homes/rqyu/PycharmProjects/FCOS/training_dir/fcos_R_50_FPN_1x_allclsnms/',
negative_th, iou_th)
assess(("jsph_test_coco_style",), '/homes/rqyu/PycharmProjects/FCOS/training_dir/udm/',
negative_th, iou_th)
assess(("jsph_test_coco_style",), '/homes/rqyu/PycharmProjects/FCOS/training_dir/udm2/',
negative_th, iou_th)
assess(("jsph_test_coco_style",), '/homes/rqyu/PycharmProjects/FCOS/training_dir/udm2_cls1th/',
negative_th, iou_th)
| [
"numpy.abs",
"sklearn.metrics.f1_score",
"numpy.where",
"Data.Preprocess.join_path",
"sklearn.metrics.roc_auc_score",
"numpy.count_nonzero",
"numpy.array",
"numpy.zeros",
"numpy.max",
"fcos_core.config.paths_catalog.DatasetCatalog",
"numpy.nonzero",
"json.load"
] | [((1096, 1112), 'numpy.zeros', 'np.zeros', (['[n, m]'], {}), '([n, m])\n', (1104, 1112), True, 'import numpy as np\n'), ((1708, 1735), 'numpy.nonzero', 'np.nonzero', (['(matrix > iou_th)'], {}), '(matrix > iou_th)\n', (1718, 1735), True, 'import numpy as np\n'), ((3042, 3077), 'numpy.where', 'np.where', (['(label > negative_th)', '(1)', '(0)'], {}), '(label > negative_th, 1, 0)\n', (3050, 3077), True, 'import numpy as np\n'), ((3087, 3113), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['label', 'pred'], {}), '(label, pred)\n', (3100, 3113), False, 'from sklearn.metrics import f1_score, roc_auc_score\n'), ((3195, 3209), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (3203, 3209), True, 'import numpy as np\n'), ((3223, 3238), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (3231, 3238), True, 'import numpy as np\n'), ((3249, 3287), 'sklearn.metrics.f1_score', 'f1_score', (['label', 'pred'], {'average': '"""macro"""'}), "(label, pred, average='macro')\n", (3257, 3287), False, 'from sklearn.metrics import f1_score, roc_auc_score\n'), ((3475, 3509), 'numpy.where', 'np.where', (['(pred > negative_th)', '(1)', '(0)'], {}), '(pred > negative_th, 1, 0)\n', (3483, 3509), True, 'import numpy as np\n'), ((3521, 3556), 'numpy.where', 'np.where', (['(label > negative_th)', '(1)', '(0)'], {}), '(label > negative_th, 1, 0)\n', (3529, 3556), True, 'import numpy as np\n'), ((3567, 3597), 'numpy.count_nonzero', 'np.count_nonzero', (['(pred * label)'], {}), '(pred * label)\n', (3583, 3597), True, 'import numpy as np\n'), ((3606, 3642), 'numpy.count_nonzero', 'np.count_nonzero', (['(pred * (1 - label))'], {}), '(pred * (1 - label))\n', (3622, 3642), True, 'import numpy as np\n'), ((3649, 3685), 'numpy.count_nonzero', 'np.count_nonzero', (['((1 - pred) * label)'], {}), '((1 - pred) * label)\n', (3665, 3685), True, 'import numpy as np\n'), ((3692, 3734), 'numpy.count_nonzero', 'np.count_nonzero', (['((1 - pred) * (1 - label))'], {}), '((1 - pred) * (1 - label))\n', (3708, 3734), True, 'import numpy as np\n'), ((3357, 3388), 'numpy.count_nonzero', 'np.count_nonzero', (['(pred == label)'], {}), '(pred == label)\n', (3373, 3388), True, 'import numpy as np\n'), ((2363, 2385), 'numpy.max', 'np.max', (['matrix'], {'axis': '(1)'}), '(matrix, axis=1)\n', (2369, 2385), True, 'import numpy as np\n'), ((4323, 4335), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4332, 4335), False, 'import json\n'), ((4529, 4541), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4538, 4541), False, 'import json\n'), ((2627, 2649), 'numpy.max', 'np.max', (['matrix'], {'axis': '(0)'}), '(matrix, axis=0)\n', (2633, 2649), True, 'import numpy as np\n'), ((3429, 3449), 'numpy.abs', 'np.abs', (['(pred - label)'], {}), '(pred - label)\n', (3435, 3449), True, 'import numpy as np\n'), ((4234, 4290), 'Data.Preprocess.join_path', 'join_path', (['output_dir', '"""inference"""', 'dataset', '"""bbox.json"""'], {}), "(output_dir, 'inference', dataset, 'bbox.json')\n", (4243, 4290), False, 'from Data.Preprocess import join_path\n'), ((1938, 1968), 'numpy.max', 'np.max', (['matrix[:, label_index]'], {}), '(matrix[:, label_index])\n', (1944, 1968), True, 'import numpy as np\n'), ((4359, 4375), 'fcos_core.config.paths_catalog.DatasetCatalog', 'DatasetCatalog', ([], {}), '()\n', (4373, 4375), False, 'from fcos_core.config.paths_catalog import DatasetCatalog\n')] |
import torch
from torch.utils.data import DataLoader,Dataset
import random
import numpy as np
from torch.utils.data.sampler import Sampler
#从数据集中选取support_set 和query_set
class get_SQ_set(object):
def __init__(self, data_classes, number_class, support_sample_num, query_sample_num):
self.data_classes = data_classes
self.num_class = number_class
self.support_shot_num = support_sample_num
self.query_shot_num = query_sample_num
classes_name = [i for i in self.data_classes.keys()]
np.random.shuffle(classes_name)
epoch_classes = classes_name[:self.num_class]
labels = np.array(range(len(epoch_classes)))#将随机选取的类重新定义为(1-class_number)
labels = dict(zip(epoch_classes, labels))#将类的名称和标记按照字典格式一一对应
temp = dict()
self.support_data = []
self.query_data = []
self.support_labels=[]
self.query_labels=[]
for c in epoch_classes:
temp[c] = random.sample(list(data_classes[c]), len(data_classes[c]))[0:200]
self.support_data.extend(temp[c][:self.support_shot_num])
self.support_labels.extend(
[labels[c] for i in range(self.support_shot_num)]) # 将标记根据字典转化成【0,1,2,3...】用于后面的one-hot编码
self.query_data.extend(temp[c][self.support_shot_num: self.support_shot_num + self.query_shot_num])
self.query_labels.extend([labels[c] for i in range(self.query_shot_num)])
class FewShotDataset(Dataset):
def __init__(self, task, split='support'):
self.task = task
self.split = split
self.data_roots = self.task.support_data if self.split == 'support' else self.task.query_data
self.labels = self.task.support_labels if self.split == 'support' else self.task.query_labels
def __len__(self):
return len(self.image_roots)
def __getitem__(self, idx):
raise NotImplementedError("This is an abstract class. Subclass this class for your particular dataset.")
class Hsi_Dataset(FewShotDataset):
def __init__(self, *args, **kwargs):
super(Hsi_Dataset, self).__init__(*args, **kwargs)
def __getitem__(self, idx):
data = self.data_roots[idx]
label = self.labels[idx]
return data, label
#进行类的平衡
class ClassBalancedSampler(Sampler):
''' Samples 'num_inst' examples each from 'num_cl' pools
of examples of size 'num_per_class' '''
def __init__(self, num_per_class, num_cl, num_inst,shuffle=False):
self.num_per_class = num_per_class
self.num_cl = num_cl
self.num_inst = num_inst
self.shuffle = shuffle
def __iter__(self):
# return a single list of indices, assuming that items will be grouped by class
if self.shuffle:
batch = [[i+j*self.num_inst for i in torch.randperm(self.num_inst)[:self.num_per_class]] for j in range(self.num_cl)]
else:
batch = [[i+j*self.num_inst for i in range(self.num_inst)[:self.num_per_class]] for j in range(self.num_cl)]
batch = [item for sublist in batch for item in sublist]
if self.shuffle:
random.shuffle(batch)
return iter(batch)
def __len__(self):
return 1
#获取dataset和dataloader
def get_data_loader(task, Number_way,num_per_class=1, split='train',shuffle=False):
dataset = Hsi_Dataset(task,split=split)
if split == 'support':#ClassBalancedSampler 数据平衡选择器
sampler = ClassBalancedSampler(num_per_class, Number_way, task.support_shot_num,shuffle=shuffle)
else:
sampler = ClassBalancedSampler(num_per_class, Number_way, task.query_shot_num,shuffle=shuffle)
loader = DataLoader(dataset, batch_size=num_per_class*Number_way, sampler=sampler)
return loader | [
"torch.randperm",
"numpy.random.shuffle",
"random.shuffle",
"torch.utils.data.DataLoader"
] | [((3750, 3825), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(num_per_class * Number_way)', 'sampler': 'sampler'}), '(dataset, batch_size=num_per_class * Number_way, sampler=sampler)\n', (3760, 3825), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((549, 580), 'numpy.random.shuffle', 'np.random.shuffle', (['classes_name'], {}), '(classes_name)\n', (566, 580), True, 'import numpy as np\n'), ((3205, 3226), 'random.shuffle', 'random.shuffle', (['batch'], {}), '(batch)\n', (3219, 3226), False, 'import random\n'), ((2881, 2910), 'torch.randperm', 'torch.randperm', (['self.num_inst'], {}), '(self.num_inst)\n', (2895, 2910), False, 'import torch\n')] |
import json
import logging
import os
import threading
import numpy as np
import pandas as pd
import tiledb
from server_timing import Timing as ServerTiming
from server.common.constants import Axis, XApproximateDistribution
from server.common.errors import DatasetAccessError, ConfigurationError
from server.common.immutable_kvcache import ImmutableKVCache
from server.common.utils.type_conversion_utils import get_schema_type_hint_from_dtype
from server.common.utils.utils import path_join
from server.compute import diffexp_cxg
from server.data_common.data_adaptor import DataAdaptor
from server.common.fbs.matrix import encode_matrix_fbs
from server.data_cxg.cxg_util import pack_selector_from_mask
class CxgAdaptor(DataAdaptor):
# TODO: The tiledb context parameters should be a configuration option
tiledb_ctx = tiledb.Ctx(
{"sm.tile_cache_size": 8 * 1024 * 1024 * 1024, "sm.num_reader_threads": 32, "vfs.s3.region": "us-east-1"}
)
def __init__(self, data_locator, app_config=None):
super().__init__(data_locator, app_config)
self.lock = threading.Lock()
self.url = data_locator.uri_or_path
if self.url[-1] != "/":
self.url += "/"
# caching immutable state
self.lsuri_results = ImmutableKVCache(lambda key: self._lsuri(uri=key, tiledb_ctx=self.tiledb_ctx))
self.arrays = ImmutableKVCache(lambda key: self._open_array(uri=key, tiledb_ctx=self.tiledb_ctx))
self.schema = None
self.X_approximate_distribution = None
self._validate_and_initialize()
def cleanup(self):
"""close all the open tiledb arrays"""
for array in self.arrays.values():
array.close()
self.arrays.clear()
@staticmethod
def set_tiledb_context(context_params):
"""Set the tiledb context. This should be set before any instances of CxgAdaptor are created"""
try:
CxgAdaptor.tiledb_ctx = tiledb.Ctx(context_params)
tiledb.default_ctx(context_params)
except tiledb.libtiledb.TileDBError as e:
if e.message == "Global context already initialized!":
if tiledb.default_ctx().config().dict() != CxgAdaptor.tiledb_ctx.config().dict():
raise ConfigurationError("Cannot change tiledb configuration once it is set")
else:
raise ConfigurationError(f"Invalid tiledb context: {str(e)}")
@staticmethod
def pre_load_validation(data_locator):
location = data_locator.uri_or_path
if not CxgAdaptor.isvalid(location):
logging.error(f"cxg matrix is not valid: {location}")
raise DatasetAccessError("cxg matrix is not valid")
@staticmethod
def file_size(data_locator):
return 0
@staticmethod
def open(data_locator, app_config):
return CxgAdaptor(data_locator, app_config)
def get_about(self):
return self.about if self.about else super().get_about()
def get_title(self):
return self.title if self.title else super().get_title()
def get_corpora_props(self):
return self.corpora_props if self.corpora_props else super().get_corpora_props()
def get_name(self):
return "cellxgene cxg adaptor version"
def get_library_versions(self):
return dict(tiledb=tiledb.__version__)
def get_path(self, *urls):
return path_join(self.url, *urls)
@staticmethod
def _lsuri(uri, tiledb_ctx):
def _cleanpath(p):
if p[-1] == "/":
return p[:-1]
else:
return p
result = []
tiledb.ls(uri, lambda path, type: result.append((_cleanpath(path), type)), ctx=tiledb_ctx)
return result
def lsuri(self, uri):
"""
given a URI, do a tiledb.ls but normalizing for all path weirdness:
* S3 URIs require trailing slash. file: doesn't care.
* results on S3 *have* a trailing slash, Posix does not.
returns list of (absolute paths, type) *without* trailing slash
in the path.
"""
if uri[-1] != "/":
uri += "/"
return self.lsuri_results[uri]
@staticmethod
def isvalid(url):
"""
Return True if this looks like a valid CXG, False if not. Just a quick/cheap
test, not to be fully trusted.
"""
if not tiledb.object_type(url, ctx=CxgAdaptor.tiledb_ctx) == "group":
return False
if not tiledb.object_type(path_join(url, "obs"), ctx=CxgAdaptor.tiledb_ctx) == "array":
return False
if not tiledb.object_type(path_join(url, "var"), ctx=CxgAdaptor.tiledb_ctx) == "array":
return False
if not tiledb.object_type(path_join(url, "X"), ctx=CxgAdaptor.tiledb_ctx) == "array":
return False
if not tiledb.object_type(path_join(url, "emb"), ctx=CxgAdaptor.tiledb_ctx) == "group":
return False
return True
def has_array(self, name):
a_type = tiledb.object_type(path_join(self.url, name), ctx=self.tiledb_ctx)
return a_type == "array"
def _validate_and_initialize(self):
"""
remember, preload_validation() has already been called, so
no need to repeat anything it has done.
Load the CXG "group" metadata and cache instance values.
Be very aware of multiple versions of the CXG object.
CXG versions in the wild:
* version 0, aka "no version" -- can be detected by the lack
of a cxg_group_metadata array.
* version 0.1 -- metadata attache to cxg_group_metadata array.
Same as 0, except it adds group metadata.
"""
title = None
about = None
corpora_props = None
if self.has_array("cxg_group_metadata"):
# version >0
gmd = self.open_array("cxg_group_metadata")
cxg_version = gmd.meta["cxg_version"]
# version 0.1 used a malformed/shorthand semver string.
if cxg_version == "0.1" or cxg_version == "0.2.0":
cxg_properties = json.loads(gmd.meta["cxg_properties"])
title = cxg_properties.get("title", None)
about = cxg_properties.get("about", None)
if cxg_version == "0.2.0":
corpora_props = json.loads(gmd.meta["corpora"]) if "corpora" in gmd.meta else None
else:
# version 0
cxg_version = "0.0"
if cxg_version not in ["0.0", "0.1", "0.2.0"]:
raise DatasetAccessError(f"cxg matrix is not valid: {self.url}")
if self.dataset_config.X_approximate_distribution == "auto":
raise ConfigurationError("X-approximate-distribution 'auto' mode unsupported.")
self.X_approximate_distribution = self.dataset_config.X_approximate_distribution
self.title = title
self.about = about
self.cxg_version = cxg_version
self.corpora_props = corpora_props
@staticmethod
def _open_array(uri, tiledb_ctx):
with tiledb.Array(uri, mode="r", ctx=tiledb_ctx) as array:
if array.schema.sparse:
return tiledb.SparseArray(uri, mode="r", ctx=tiledb_ctx)
else:
return tiledb.DenseArray(uri, mode="r", ctx=tiledb_ctx)
def open_array(self, name):
try:
p = self.get_path(name)
return self.arrays[p]
except tiledb.libtiledb.TileDBError:
raise DatasetAccessError(name)
def get_embedding_array(self, ename, dims=2):
array = self.open_array(f"emb/{ename}")
return array[:, 0:dims]
def compute_diffexp_ttest(self, maskA, maskB, top_n=None, lfc_cutoff=None):
if top_n is None:
top_n = self.dataset_config.diffexp__top_n
if lfc_cutoff is None:
lfc_cutoff = self.dataset_config.diffexp__lfc_cutoff
return diffexp_cxg.diffexp_ttest(
adaptor=self, maskA=maskA, maskB=maskB, top_n=top_n, diffexp_lfc_cutoff=lfc_cutoff
)
def get_colors(self):
if self.cxg_version == "0.0":
return dict()
meta = self.open_array("cxg_group_metadata").meta
return json.loads(meta["cxg_category_colors"]) if "cxg_category_colors" in meta else dict()
def __remap_indices(self, coord_range, coord_mask, coord_data):
"""
This function maps the indices in coord_data, which could be in the range [0,coord_range), to
a range that only includes the number of indices encoded in coord_mask.
coord_range is the maxinum size of the range (e.g. get_shape()[0] or get_shape()[1])
coord_mask is a mask passed into the get_X_array, of size coord_range
coord_data are indices representing locations of non-zero values, in the range [0,coord_range).
For example, say
coord_mask = [1,0,1,0,0,1]
coord_data = [2,0,2,2,5]
The function computes the following:
indices = [0,2,5]
ncoord = 3
maprange = [0,1,2]
mapindex = [0,0,1,0,0,2]
coordindices = [1,0,1,1,2]
"""
if coord_mask is None:
return coord_range, coord_data
indices = np.where(coord_mask)[0]
ncoord = indices.shape[0]
maprange = np.arange(ncoord)
mapindex = np.zeros(indices[-1] + 1, dtype=int)
mapindex[indices] = maprange
coordindices = mapindex[coord_data]
return ncoord, coordindices
def get_X_array(self, obs_mask=None, var_mask=None):
obs_items = pack_selector_from_mask(obs_mask)
var_items = pack_selector_from_mask(var_mask)
if obs_items is None or var_items is None:
# If either zero rows or zero columns were selected, return an empty 2d array.
shape = self.get_shape()
obs_size = 0 if obs_items is None else shape[0] if obs_mask is None else np.count_nonzero(obs_mask)
var_size = 0 if var_items is None else shape[1] if var_mask is None else np.count_nonzero(var_mask)
return np.ndarray((obs_size, var_size))
X = self.open_array("X")
if X.schema.sparse:
if obs_items == slice(None) and var_items == slice(None):
data = X[:, :]
else:
data = X.multi_index[obs_items, var_items]
nrows, obsindices = self.__remap_indices(X.shape[0], obs_mask, data.get("coords", data)["obs"])
ncols, varindices = self.__remap_indices(X.shape[1], var_mask, data.get("coords", data)["var"])
densedata = np.zeros((nrows, ncols), dtype=self.get_X_array_dtype())
densedata[obsindices, varindices] = data[""]
if self.has_array("X_col_shift"):
X_col_shift = self.open_array("X_col_shift")
if var_items == slice(None):
densedata += X_col_shift[:]
else:
densedata += X_col_shift.multi_index[var_items][""]
return densedata
else:
if obs_items == slice(None) and var_items == slice(None):
data = X[:, :]
else:
data = X.multi_index[obs_items, var_items][""]
return data
def get_X_approximate_distribution(self) -> XApproximateDistribution:
return self.X_approximate_distribution
def get_shape(self):
X = self.open_array("X")
return X.shape
def get_X_array_dtype(self):
X = self.open_array("X")
return X.dtype
def query_var_array(self, term_name):
var = self.open_array("var")
data = var.query(attrs=[term_name])[:][term_name]
return data
def query_obs_array(self, term_name):
var = self.open_array("obs")
try:
data = var.query(attrs=[term_name])[:][term_name]
except tiledb.libtiledb.TileDBError:
raise DatasetAccessError("query_obs")
return data
def get_obs_names(self):
# get the index from the meta data
obs = self.open_array("obs")
meta = json.loads(obs.meta["cxg_schema"])
index_name = meta["index"]
return index_name
def get_obs_index(self):
obs = self.open_array("obs")
meta = json.loads(obs.meta["cxg_schema"])
index_name = meta["index"]
data = obs.query(attrs=[index_name])[:][index_name]
return data
def get_obs_columns(self):
obs = self.open_array("obs")
schema = obs.schema
col_names = [attr.name for attr in schema]
return pd.Index(col_names)
def get_obs_keys(self):
obs = self.open_array("obs")
schema = obs.schema
return [attr.name for attr in schema]
def get_var_keys(self):
var = self.open_array("var")
schema = var.schema
return [attr.name for attr in schema]
# function to get the embedding
# this function to iterate through embeddings.
def get_embedding_names(self):
with ServerTiming.time("layout.lsuri"):
pemb = self.get_path("emb")
embeddings = [os.path.basename(p) for (p, t) in self.lsuri(pemb) if t == "array"]
if len(embeddings) == 0:
raise DatasetAccessError("cxg matrix missing embeddings")
return embeddings
def _get_schema(self):
if self.schema:
return self.schema
shape = self.get_shape()
dtype = self.get_X_array_dtype()
dataframe = {"nObs": shape[0], "nVar": shape[1], **get_schema_type_hint_from_dtype(dtype)}
annotations = {}
for ax in ("obs", "var"):
A = self.open_array(ax)
schema_hints = json.loads(A.meta["cxg_schema"]) if "cxg_schema" in A.meta else {}
if type(schema_hints) is not dict:
raise TypeError("Array schema was malformed.")
cols = []
for attr in A.schema:
schema = dict(name=attr.name, writable=False)
type_hint = schema_hints.get(attr.name, {})
# type hints take precedence
if "type" in type_hint:
schema["type"] = type_hint["type"]
if schema["type"] == "categorical" and "categories" in type_hint:
schema["categories"] = type_hint["categories"]
else:
schema.update(get_schema_type_hint_from_dtype(attr.dtype))
cols.append(schema)
annotations[ax] = dict(columns=cols)
if "index" in schema_hints:
annotations[ax].update({"index": schema_hints["index"]})
obs_layout = []
embeddings = self.get_embedding_names()
for ename in embeddings:
A = self.open_array(f"emb/{ename}")
obs_layout.append({"name": ename, "type": "float32", "dims": [f"{ename}_{d}" for d in range(0, A.ndim)]})
schema = {"dataframe": dataframe, "annotations": annotations, "layout": {"obs": obs_layout}}
return schema
def get_schema(self):
if self.schema is None:
with self.lock:
self.schema = self._get_schema()
return self.schema
def _annotations_field_split(self, axis, fields, A, labels):
"""
fields: requested fields, may be None (all)
labels: writable user annotations dataframe, if any
Remove redundant fields, raise KeyError on non-existant fields,
and split into three lists:
fields_to_fetch_from_cxg
fields_to_fetch_from_labels
fields_to_return
if we have to return from labels, the fetch fields will contain the index
to join on, which may not be in fields_to_return
"""
need_labels = axis == Axis.OBS and labels is not None and not labels.empty
index_key = self.get_obs_names() if need_labels else None
if not fields:
return (None, None, None, index_key)
cxg_keys = frozenset([a.name for a in A.schema])
user_anno_keys = frozenset(labels.columns.tolist()) if need_labels else frozenset()
return_keys = frozenset(fields)
label_join_index = frozenset([index_key]) if need_labels and (return_keys & user_anno_keys) else frozenset()
unknown_fields = return_keys - (cxg_keys | user_anno_keys)
if unknown_fields:
raise KeyError("_".join(unknown_fields))
return (
list((return_keys & cxg_keys) | label_join_index),
list(return_keys & user_anno_keys),
list(return_keys),
index_key,
)
def annotation_to_fbs_matrix(self, axis, fields=None, labels=None):
with ServerTiming.time(f"annotations.{axis}.query"):
A = self.open_array(str(axis))
# may raise if fields contains unknown key
cxg_fields, anno_fields, return_fields, index_field = self._annotations_field_split(axis, fields, A, labels)
if cxg_fields is None:
data = A[:]
elif cxg_fields:
data = A.query(attrs=cxg_fields)[:]
else:
data = {}
df = pd.DataFrame.from_dict(data)
if axis == Axis.OBS and labels is not None and not labels.empty:
if anno_fields is None:
assert index_field
df = df.join(labels, index_field)
elif anno_fields:
assert index_field
df = df.join(labels[anno_fields], index_field)
if return_fields:
df = df[return_fields]
with ServerTiming.time(f"annotations.{axis}.encode"):
fbs = encode_matrix_fbs(df, col_idx=df.columns)
return fbs
| [
"tiledb.Array",
"server.common.errors.DatasetAccessError",
"server.compute.diffexp_cxg.diffexp_ttest",
"pandas.Index",
"numpy.count_nonzero",
"logging.error",
"numpy.arange",
"server.common.utils.utils.path_join",
"server.common.utils.type_conversion_utils.get_schema_type_hint_from_dtype",
"numpy.... | [((829, 950), 'tiledb.Ctx', 'tiledb.Ctx', (["{'sm.tile_cache_size': 8 * 1024 * 1024 * 1024, 'sm.num_reader_threads': 32,\n 'vfs.s3.region': 'us-east-1'}"], {}), "({'sm.tile_cache_size': 8 * 1024 * 1024 * 1024,\n 'sm.num_reader_threads': 32, 'vfs.s3.region': 'us-east-1'})\n", (839, 950), False, 'import tiledb\n'), ((1088, 1104), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1102, 1104), False, 'import threading\n'), ((3412, 3438), 'server.common.utils.utils.path_join', 'path_join', (['self.url', '*urls'], {}), '(self.url, *urls)\n', (3421, 3438), False, 'from server.common.utils.utils import path_join\n'), ((7949, 8063), 'server.compute.diffexp_cxg.diffexp_ttest', 'diffexp_cxg.diffexp_ttest', ([], {'adaptor': 'self', 'maskA': 'maskA', 'maskB': 'maskB', 'top_n': 'top_n', 'diffexp_lfc_cutoff': 'lfc_cutoff'}), '(adaptor=self, maskA=maskA, maskB=maskB, top_n=\n top_n, diffexp_lfc_cutoff=lfc_cutoff)\n', (7974, 8063), False, 'from server.compute import diffexp_cxg\n'), ((9330, 9347), 'numpy.arange', 'np.arange', (['ncoord'], {}), '(ncoord)\n', (9339, 9347), True, 'import numpy as np\n'), ((9367, 9403), 'numpy.zeros', 'np.zeros', (['(indices[-1] + 1)'], {'dtype': 'int'}), '(indices[-1] + 1, dtype=int)\n', (9375, 9403), True, 'import numpy as np\n'), ((9599, 9632), 'server.data_cxg.cxg_util.pack_selector_from_mask', 'pack_selector_from_mask', (['obs_mask'], {}), '(obs_mask)\n', (9622, 9632), False, 'from server.data_cxg.cxg_util import pack_selector_from_mask\n'), ((9653, 9686), 'server.data_cxg.cxg_util.pack_selector_from_mask', 'pack_selector_from_mask', (['var_mask'], {}), '(var_mask)\n', (9676, 9686), False, 'from server.data_cxg.cxg_util import pack_selector_from_mask\n'), ((12130, 12164), 'json.loads', 'json.loads', (["obs.meta['cxg_schema']"], {}), "(obs.meta['cxg_schema'])\n", (12140, 12164), False, 'import json\n'), ((12308, 12342), 'json.loads', 'json.loads', (["obs.meta['cxg_schema']"], {}), "(obs.meta['cxg_schema'])\n", (12318, 12342), False, 'import json\n'), ((12621, 12640), 'pandas.Index', 'pd.Index', (['col_names'], {}), '(col_names)\n', (12629, 12640), True, 'import pandas as pd\n'), ((1959, 1985), 'tiledb.Ctx', 'tiledb.Ctx', (['context_params'], {}), '(context_params)\n', (1969, 1985), False, 'import tiledb\n'), ((1998, 2032), 'tiledb.default_ctx', 'tiledb.default_ctx', (['context_params'], {}), '(context_params)\n', (2016, 2032), False, 'import tiledb\n'), ((2606, 2659), 'logging.error', 'logging.error', (['f"""cxg matrix is not valid: {location}"""'], {}), "(f'cxg matrix is not valid: {location}')\n", (2619, 2659), False, 'import logging\n'), ((2678, 2723), 'server.common.errors.DatasetAccessError', 'DatasetAccessError', (['"""cxg matrix is not valid"""'], {}), "('cxg matrix is not valid')\n", (2696, 2723), False, 'from server.common.errors import DatasetAccessError, ConfigurationError\n'), ((5063, 5088), 'server.common.utils.utils.path_join', 'path_join', (['self.url', 'name'], {}), '(self.url, name)\n', (5072, 5088), False, 'from server.common.utils.utils import path_join\n'), ((6572, 6630), 'server.common.errors.DatasetAccessError', 'DatasetAccessError', (['f"""cxg matrix is not valid: {self.url}"""'], {}), "(f'cxg matrix is not valid: {self.url}')\n", (6590, 6630), False, 'from server.common.errors import DatasetAccessError, ConfigurationError\n'), ((6718, 6791), 'server.common.errors.ConfigurationError', 'ConfigurationError', (['"""X-approximate-distribution \'auto\' mode unsupported."""'], {}), '("X-approximate-distribution \'auto\' mode unsupported.")\n', (6736, 6791), False, 'from server.common.errors import DatasetAccessError, ConfigurationError\n'), ((7088, 7131), 'tiledb.Array', 'tiledb.Array', (['uri'], {'mode': '"""r"""', 'ctx': 'tiledb_ctx'}), "(uri, mode='r', ctx=tiledb_ctx)\n", (7100, 7131), False, 'import tiledb\n'), ((8245, 8284), 'json.loads', 'json.loads', (["meta['cxg_category_colors']"], {}), "(meta['cxg_category_colors'])\n", (8255, 8284), False, 'import json\n'), ((9253, 9273), 'numpy.where', 'np.where', (['coord_mask'], {}), '(coord_mask)\n', (9261, 9273), True, 'import numpy as np\n'), ((10109, 10141), 'numpy.ndarray', 'np.ndarray', (['(obs_size, var_size)'], {}), '((obs_size, var_size))\n', (10119, 10141), True, 'import numpy as np\n'), ((13057, 13090), 'server_timing.Timing.time', 'ServerTiming.time', (['"""layout.lsuri"""'], {}), "('layout.lsuri')\n", (13074, 13090), True, 'from server_timing import Timing as ServerTiming\n'), ((13277, 13328), 'server.common.errors.DatasetAccessError', 'DatasetAccessError', (['"""cxg matrix missing embeddings"""'], {}), "('cxg matrix missing embeddings')\n", (13295, 13328), False, 'from server.common.errors import DatasetAccessError, ConfigurationError\n'), ((13573, 13611), 'server.common.utils.type_conversion_utils.get_schema_type_hint_from_dtype', 'get_schema_type_hint_from_dtype', (['dtype'], {}), '(dtype)\n', (13604, 13611), False, 'from server.common.utils.type_conversion_utils import get_schema_type_hint_from_dtype\n'), ((16763, 16809), 'server_timing.Timing.time', 'ServerTiming.time', (['f"""annotations.{axis}.query"""'], {}), "(f'annotations.{axis}.query')\n", (16780, 16809), True, 'from server_timing import Timing as ServerTiming\n'), ((17238, 17266), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {}), '(data)\n', (17260, 17266), True, 'import pandas as pd\n'), ((17702, 17749), 'server_timing.Timing.time', 'ServerTiming.time', (['f"""annotations.{axis}.encode"""'], {}), "(f'annotations.{axis}.encode')\n", (17719, 17749), True, 'from server_timing import Timing as ServerTiming\n'), ((17769, 17810), 'server.common.fbs.matrix.encode_matrix_fbs', 'encode_matrix_fbs', (['df'], {'col_idx': 'df.columns'}), '(df, col_idx=df.columns)\n', (17786, 17810), False, 'from server.common.fbs.matrix import encode_matrix_fbs\n'), ((4405, 4455), 'tiledb.object_type', 'tiledb.object_type', (['url'], {'ctx': 'CxgAdaptor.tiledb_ctx'}), '(url, ctx=CxgAdaptor.tiledb_ctx)\n', (4423, 4455), False, 'import tiledb\n'), ((6135, 6173), 'json.loads', 'json.loads', (["gmd.meta['cxg_properties']"], {}), "(gmd.meta['cxg_properties'])\n", (6145, 6173), False, 'import json\n'), ((7201, 7250), 'tiledb.SparseArray', 'tiledb.SparseArray', (['uri'], {'mode': '"""r"""', 'ctx': 'tiledb_ctx'}), "(uri, mode='r', ctx=tiledb_ctx)\n", (7219, 7250), False, 'import tiledb\n'), ((7292, 7340), 'tiledb.DenseArray', 'tiledb.DenseArray', (['uri'], {'mode': '"""r"""', 'ctx': 'tiledb_ctx'}), "(uri, mode='r', ctx=tiledb_ctx)\n", (7309, 7340), False, 'import tiledb\n'), ((7520, 7544), 'server.common.errors.DatasetAccessError', 'DatasetAccessError', (['name'], {}), '(name)\n', (7538, 7544), False, 'from server.common.errors import DatasetAccessError, ConfigurationError\n'), ((11953, 11984), 'server.common.errors.DatasetAccessError', 'DatasetAccessError', (['"""query_obs"""'], {}), "('query_obs')\n", (11971, 11984), False, 'from server.common.errors import DatasetAccessError, ConfigurationError\n'), ((13158, 13177), 'os.path.basename', 'os.path.basename', (['p'], {}), '(p)\n', (13174, 13177), False, 'import os\n'), ((13736, 13768), 'json.loads', 'json.loads', (["A.meta['cxg_schema']"], {}), "(A.meta['cxg_schema'])\n", (13746, 13768), False, 'import json\n'), ((4527, 4548), 'server.common.utils.utils.path_join', 'path_join', (['url', '"""obs"""'], {}), "(url, 'obs')\n", (4536, 4548), False, 'from server.common.utils.utils import path_join\n'), ((4648, 4669), 'server.common.utils.utils.path_join', 'path_join', (['url', '"""var"""'], {}), "(url, 'var')\n", (4657, 4669), False, 'from server.common.utils.utils import path_join\n'), ((4769, 4788), 'server.common.utils.utils.path_join', 'path_join', (['url', '"""X"""'], {}), "(url, 'X')\n", (4778, 4788), False, 'from server.common.utils.utils import path_join\n'), ((4888, 4909), 'server.common.utils.utils.path_join', 'path_join', (['url', '"""emb"""'], {}), "(url, 'emb')\n", (4897, 4909), False, 'from server.common.utils.utils import path_join\n'), ((6361, 6392), 'json.loads', 'json.loads', (["gmd.meta['corpora']"], {}), "(gmd.meta['corpora'])\n", (6371, 6392), False, 'import json\n'), ((9951, 9977), 'numpy.count_nonzero', 'np.count_nonzero', (['obs_mask'], {}), '(obs_mask)\n', (9967, 9977), True, 'import numpy as np\n'), ((10063, 10089), 'numpy.count_nonzero', 'np.count_nonzero', (['var_mask'], {}), '(var_mask)\n', (10079, 10089), True, 'import numpy as np\n'), ((2275, 2346), 'server.common.errors.ConfigurationError', 'ConfigurationError', (['"""Cannot change tiledb configuration once it is set"""'], {}), "('Cannot change tiledb configuration once it is set')\n", (2293, 2346), False, 'from server.common.errors import DatasetAccessError, ConfigurationError\n'), ((14445, 14488), 'server.common.utils.type_conversion_utils.get_schema_type_hint_from_dtype', 'get_schema_type_hint_from_dtype', (['attr.dtype'], {}), '(attr.dtype)\n', (14476, 14488), False, 'from server.common.utils.type_conversion_utils import get_schema_type_hint_from_dtype\n'), ((2170, 2190), 'tiledb.default_ctx', 'tiledb.default_ctx', ([], {}), '()\n', (2188, 2190), False, 'import tiledb\n')] |
import random
import logging
import numpy as np
class MetropolisHastingsSampler(object):
def __init__(self, tree, X):
self.tree = tree
self.X = X
self.last_move = None
self.likelihoods = []
def initialize_assignments(self):
self.tree.initialize_from_data(self.X)
def add_constraint(self, constraint):
self.tree.add_constraint(constraint, self.X)
def parent_move(self):
logging.debug("Copying tree...")
tree = self.tree.copy()
old_likelihood = self.tree.marg_log_likelihood()
logging.debug("Old Marginal Likelihood: %f" % old_likelihood)
node = tree.choice()
old_assignment = tree.get_assignment(node.parent)
old_index, old_state = old_assignment
subtree = node.detach()
backward_likelihood = tree.log_prob_assignment(old_assignment)
logging.debug("Backward Likelihood: %f" % backward_likelihood)
points = set()
if len(tree.constraints) > 0:
points = subtree.points()
time = float('inf')
try_counter = 0
while time > subtree.get_state('time'):
(assignment, forward_likelihood) = tree.sample_assignment(constraints=tree.constraints,
points=points,
state=old_state)
logging.debug("Candidate assignment: %s", str(assignment))
(index, state) = assignment
time = state['time']
try_counter += 1
if try_counter > 500:
return
tree.assign_node(subtree, assignment)
new_likelihood = tree.marg_log_likelihood()
logging.debug("New Marginal Likelihood: %f" % old_likelihood)
logging.debug("Forward Likelihood: %f" % forward_likelihood)
a = min(1, np.exp(new_likelihood + backward_likelihood - old_likelihood - forward_likelihood))
if np.random.random() < a:
logging.debug("Accepted new tree with probability: %f" % a)
self.tree = tree
return
logging.debug("Rejected new tree with probability: %f" % a)
def update_latent(self):
self.tree.sample_latent()
def sample(self):
self.tree = self.tree.copy()
random.choice([self.parent_move, self.update_latent])()
self.likelihoods.append(self.tree.marg_log_likelihood())
class SPRSampler(object):
def __init__(self, tree, X):
self.tree = tree
self.X = X
self.last_move = None
self.likelihoods = []
def initialize_assignments(self):
self.tree.initialize_assignments(np.arange(self.X.shape[0]))
def add_constraint(self, constraint):
self.tree.add_constraint(constraint, self.X)
def parent_move(self):
logging.debug("Copying tree...")
tree = self.tree.copy()
old_likelihood = self.tree.marg_log_likelihood()
logging.debug("Old Marginal Likelihood: %f" % old_likelihood)
logging.debug("Old Cost: %f" % self.tree.cost())
node = tree.choice()
old_assignment = tree.get_assignment(node.parent)
old_index, old_state = old_assignment
subtree = node.detach()
backward_likelihood = tree.log_prob_assignment(old_assignment)
logging.debug("Backward Likelihood: %f" % backward_likelihood)
points = set()
if len(tree.constraints) > 0:
points = subtree.points()
(assignment, forward_likelihood) = tree.sample_assignment(constraints=tree.constraints,
points=points,
state=old_state)
logging.debug("Candidate assignment: %s", str(assignment))
(index, state) = assignment
tree.assign_node(subtree, assignment)
new_likelihood = tree.marg_log_likelihood()
logging.debug("New Marginal Likelihood: %f" % new_likelihood)
logging.debug("New Cost: %f" % tree.cost())
logging.debug("Forward Likelihood: %f" % forward_likelihood)
a = min(1, np.exp(new_likelihood + backward_likelihood - old_likelihood - forward_likelihood))
if np.random.random() < a:
logging.debug("Accepted new tree with probability: %f" % a)
self.tree = tree
return
logging.debug("Rejected new tree with probability: %f" % a)
def sample(self):
self.tree = self.tree.copy()
self.parent_move()
self.likelihoods.append(self.tree.marg_log_likelihood())
| [
"random.choice",
"logging.debug",
"numpy.random.random",
"numpy.exp",
"numpy.arange"
] | [((446, 478), 'logging.debug', 'logging.debug', (['"""Copying tree..."""'], {}), "('Copying tree...')\n", (459, 478), False, 'import logging\n'), ((577, 638), 'logging.debug', 'logging.debug', (["('Old Marginal Likelihood: %f' % old_likelihood)"], {}), "('Old Marginal Likelihood: %f' % old_likelihood)\n", (590, 638), False, 'import logging\n'), ((885, 947), 'logging.debug', 'logging.debug', (["('Backward Likelihood: %f' % backward_likelihood)"], {}), "('Backward Likelihood: %f' % backward_likelihood)\n", (898, 947), False, 'import logging\n'), ((1760, 1821), 'logging.debug', 'logging.debug', (["('New Marginal Likelihood: %f' % old_likelihood)"], {}), "('New Marginal Likelihood: %f' % old_likelihood)\n", (1773, 1821), False, 'import logging\n'), ((1830, 1890), 'logging.debug', 'logging.debug', (["('Forward Likelihood: %f' % forward_likelihood)"], {}), "('Forward Likelihood: %f' % forward_likelihood)\n", (1843, 1890), False, 'import logging\n'), ((2158, 2217), 'logging.debug', 'logging.debug', (["('Rejected new tree with probability: %f' % a)"], {}), "('Rejected new tree with probability: %f' % a)\n", (2171, 2217), False, 'import logging\n'), ((2876, 2908), 'logging.debug', 'logging.debug', (['"""Copying tree..."""'], {}), "('Copying tree...')\n", (2889, 2908), False, 'import logging\n'), ((3007, 3068), 'logging.debug', 'logging.debug', (["('Old Marginal Likelihood: %f' % old_likelihood)"], {}), "('Old Marginal Likelihood: %f' % old_likelihood)\n", (3020, 3068), False, 'import logging\n'), ((3372, 3434), 'logging.debug', 'logging.debug', (["('Backward Likelihood: %f' % backward_likelihood)"], {}), "('Backward Likelihood: %f' % backward_likelihood)\n", (3385, 3434), False, 'import logging\n'), ((4011, 4072), 'logging.debug', 'logging.debug', (["('New Marginal Likelihood: %f' % new_likelihood)"], {}), "('New Marginal Likelihood: %f' % new_likelihood)\n", (4024, 4072), False, 'import logging\n'), ((4133, 4193), 'logging.debug', 'logging.debug', (["('Forward Likelihood: %f' % forward_likelihood)"], {}), "('Forward Likelihood: %f' % forward_likelihood)\n", (4146, 4193), False, 'import logging\n'), ((4461, 4520), 'logging.debug', 'logging.debug', (["('Rejected new tree with probability: %f' % a)"], {}), "('Rejected new tree with probability: %f' % a)\n", (4474, 4520), False, 'import logging\n'), ((1911, 1997), 'numpy.exp', 'np.exp', (['(new_likelihood + backward_likelihood - old_likelihood - forward_likelihood)'], {}), '(new_likelihood + backward_likelihood - old_likelihood -\n forward_likelihood)\n', (1917, 1997), True, 'import numpy as np\n'), ((2006, 2024), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2022, 2024), True, 'import numpy as np\n'), ((2042, 2101), 'logging.debug', 'logging.debug', (["('Accepted new tree with probability: %f' % a)"], {}), "('Accepted new tree with probability: %f' % a)\n", (2055, 2101), False, 'import logging\n'), ((2350, 2403), 'random.choice', 'random.choice', (['[self.parent_move, self.update_latent]'], {}), '([self.parent_move, self.update_latent])\n', (2363, 2403), False, 'import random\n'), ((2716, 2742), 'numpy.arange', 'np.arange', (['self.X.shape[0]'], {}), '(self.X.shape[0])\n', (2725, 2742), True, 'import numpy as np\n'), ((4214, 4300), 'numpy.exp', 'np.exp', (['(new_likelihood + backward_likelihood - old_likelihood - forward_likelihood)'], {}), '(new_likelihood + backward_likelihood - old_likelihood -\n forward_likelihood)\n', (4220, 4300), True, 'import numpy as np\n'), ((4309, 4327), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4325, 4327), True, 'import numpy as np\n'), ((4345, 4404), 'logging.debug', 'logging.debug', (["('Accepted new tree with probability: %f' % a)"], {}), "('Accepted new tree with probability: %f' % a)\n", (4358, 4404), False, 'import logging\n')] |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import unittest
import numpy
class SimpleNet(paddle.nn.Layer):
def __init__(self):
super(SimpleNet, self).__init__()
self.conv = paddle.nn.Conv2D(1, 2, (3, 3))
def forward(self, image, label=None):
return self.conv(image)
def train_dygraph(net, data):
out = net(data)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam(parameters=net.parameters())
out.backward()
adam.step()
adam.clear_grad()
def static_program(net, data):
out = net(data)
loss = paddle.mean(out)
adam = paddle.optimizer.Adam()
adam.minimize(loss)
return loss
def set_flags(enable_autotune):
if paddle.is_compiled_with_cuda():
if enable_autotune:
paddle.set_flags({'FLAGS_conv_workspace_size_limit': -1})
paddle.set_flags({'FLAGS_cudnn_exhaustive_search': 1})
else:
paddle.set_flags({'FLAGS_conv_workspace_size_limit': 512})
paddle.set_flags({'FLAGS_cudnn_exhaustive_search': 0})
class TestAutoTune(unittest.TestCase):
def test_autotune(self):
paddle.fluid.core.disable_autotune()
status = paddle.fluid.core.autotune_status()
self.assertEqual(status["use_autotune"], False)
paddle.fluid.core.enable_autotune()
status = paddle.fluid.core.autotune_status()
self.assertEqual(status["use_autotune"], True)
def check_status(self, expected_res):
status = paddle.fluid.core.autotune_status()
for key in status.keys():
self.assertEqual(status[key], expected_res[key])
class TestDygraphAutoTuneStatus(TestAutoTune):
def run_program(self, enable_autotune):
set_flags(enable_autotune)
if enable_autotune:
paddle.fluid.core.enable_autotune()
else:
paddle.fluid.core.disable_autotune()
paddle.fluid.core.autotune_range(1, 2)
x_var = paddle.uniform((1, 1, 8, 8), dtype='float32', min=-1., max=1.)
net = SimpleNet()
for i in range(3):
train_dygraph(net, x_var)
if i >= 1 and i < 2:
expected_res = {
"step_id": i,
"use_autotune": enable_autotune,
"cache_size": 0,
"cache_hit_rate": 0
}
self.check_status(expected_res)
else:
expected_res = {
"step_id": i,
"use_autotune": False,
"cache_size": 0,
"cache_hit_rate": 0
}
self.check_status(expected_res)
def func_enable_autotune(self):
self.run_program(enable_autotune=True)
def test_enable_autotune(self):
with paddle.fluid.framework._test_eager_guard():
self.func_enable_autotune()
self.func_enable_autotune()
def func_disable_autotune(self):
self.run_program(enable_autotune=False)
def test_disable_autotune(self):
with paddle.fluid.framework._test_eager_guard():
self.func_disable_autotune()
self.func_disable_autotune()
class TestStaticAutoTuneStatus(TestAutoTune):
def run_program(self, enable_autotune):
paddle.enable_static()
set_flags(enable_autotune)
if enable_autotune:
paddle.fluid.core.enable_autotune()
else:
paddle.fluid.core.disable_autotune()
paddle.fluid.core.autotune_range(1, 2)
data_shape = [1, 1, 8, 8]
data = paddle.static.data(name='X', shape=data_shape, dtype='float32')
net = SimpleNet()
loss = static_program(net, data)
place = paddle.CUDAPlace(0) if paddle.fluid.core.is_compiled_with_cuda(
) else paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
x = numpy.random.random(size=data_shape).astype('float32')
for i in range(3):
exe.run(feed={'X': x}, fetch_list=[loss])
status = paddle.fluid.core.autotune_status()
# In static mode, the startup_program will run at first.
# The expected step_id will be increased by 1.
if i >= 0 and i < 1:
expected_res = {
"step_id": i + 1,
"use_autotune": enable_autotune,
"cache_size": 0,
"cache_hit_rate": 0
}
self.check_status(expected_res)
else:
expected_res = {
"step_id": i + 1,
"use_autotune": False,
"cache_size": 0,
"cache_hit_rate": 0
}
self.check_status(expected_res)
paddle.disable_static()
def func_enable_autotune(self):
self.run_program(enable_autotune=True)
def test_enable_autotune(self):
with paddle.fluid.framework._test_eager_guard():
self.func_enable_autotune()
self.func_enable_autotune()
def func_disable_autotune(self):
self.run_program(enable_autotune=False)
def test_disable_autotune(self):
with paddle.fluid.framework._test_eager_guard():
self.func_disable_autotune()
self.func_disable_autotune()
if __name__ == '__main__':
unittest.main()
| [
"paddle.mean",
"paddle.disable_static",
"unittest.main",
"paddle.fluid.core.disable_autotune",
"paddle.CPUPlace",
"numpy.random.random",
"paddle.static.default_startup_program",
"paddle.enable_static",
"paddle.fluid.core.is_compiled_with_cuda",
"paddle.fluid.core.autotune_status",
"paddle.fluid.... | [((948, 964), 'paddle.mean', 'paddle.mean', (['out'], {}), '(out)\n', (959, 964), False, 'import paddle\n'), ((1148, 1164), 'paddle.mean', 'paddle.mean', (['out'], {}), '(out)\n', (1159, 1164), False, 'import paddle\n'), ((1176, 1199), 'paddle.optimizer.Adam', 'paddle.optimizer.Adam', ([], {}), '()\n', (1197, 1199), False, 'import paddle\n'), ((1281, 1311), 'paddle.is_compiled_with_cuda', 'paddle.is_compiled_with_cuda', ([], {}), '()\n', (1309, 1311), False, 'import paddle\n'), ((5988, 6003), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6001, 6003), False, 'import unittest\n'), ((779, 809), 'paddle.nn.Conv2D', 'paddle.nn.Conv2D', (['(1)', '(2)', '(3, 3)'], {}), '(1, 2, (3, 3))\n', (795, 809), False, 'import paddle\n'), ((1708, 1744), 'paddle.fluid.core.disable_autotune', 'paddle.fluid.core.disable_autotune', ([], {}), '()\n', (1742, 1744), False, 'import paddle\n'), ((1762, 1797), 'paddle.fluid.core.autotune_status', 'paddle.fluid.core.autotune_status', ([], {}), '()\n', (1795, 1797), False, 'import paddle\n'), ((1863, 1898), 'paddle.fluid.core.enable_autotune', 'paddle.fluid.core.enable_autotune', ([], {}), '()\n', (1896, 1898), False, 'import paddle\n'), ((1916, 1951), 'paddle.fluid.core.autotune_status', 'paddle.fluid.core.autotune_status', ([], {}), '()\n', (1949, 1951), False, 'import paddle\n'), ((2067, 2102), 'paddle.fluid.core.autotune_status', 'paddle.fluid.core.autotune_status', ([], {}), '()\n', (2100, 2102), False, 'import paddle\n'), ((2473, 2511), 'paddle.fluid.core.autotune_range', 'paddle.fluid.core.autotune_range', (['(1)', '(2)'], {}), '(1, 2)\n', (2505, 2511), False, 'import paddle\n'), ((2528, 2592), 'paddle.uniform', 'paddle.uniform', (['(1, 1, 8, 8)'], {'dtype': '"""float32"""', 'min': '(-1.0)', 'max': '(1.0)'}), "((1, 1, 8, 8), dtype='float32', min=-1.0, max=1.0)\n", (2542, 2592), False, 'import paddle\n'), ((3862, 3884), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (3882, 3884), False, 'import paddle\n'), ((4067, 4105), 'paddle.fluid.core.autotune_range', 'paddle.fluid.core.autotune_range', (['(1)', '(2)'], {}), '(1, 2)\n', (4099, 4105), False, 'import paddle\n'), ((4156, 4219), 'paddle.static.data', 'paddle.static.data', ([], {'name': '"""X"""', 'shape': 'data_shape', 'dtype': '"""float32"""'}), "(name='X', shape=data_shape, dtype='float32')\n", (4174, 4219), False, 'import paddle\n'), ((4414, 4443), 'paddle.static.Executor', 'paddle.static.Executor', (['place'], {}), '(place)\n', (4436, 4443), False, 'import paddle\n'), ((5418, 5441), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (5439, 5441), False, 'import paddle\n'), ((1353, 1410), 'paddle.set_flags', 'paddle.set_flags', (["{'FLAGS_conv_workspace_size_limit': -1}"], {}), "({'FLAGS_conv_workspace_size_limit': -1})\n", (1369, 1410), False, 'import paddle\n'), ((1423, 1477), 'paddle.set_flags', 'paddle.set_flags', (["{'FLAGS_cudnn_exhaustive_search': 1}"], {}), "({'FLAGS_cudnn_exhaustive_search': 1})\n", (1439, 1477), False, 'import paddle\n'), ((1504, 1562), 'paddle.set_flags', 'paddle.set_flags', (["{'FLAGS_conv_workspace_size_limit': 512}"], {}), "({'FLAGS_conv_workspace_size_limit': 512})\n", (1520, 1562), False, 'import paddle\n'), ((1575, 1629), 'paddle.set_flags', 'paddle.set_flags', (["{'FLAGS_cudnn_exhaustive_search': 0}"], {}), "({'FLAGS_cudnn_exhaustive_search': 0})\n", (1591, 1629), False, 'import paddle\n'), ((2366, 2401), 'paddle.fluid.core.enable_autotune', 'paddle.fluid.core.enable_autotune', ([], {}), '()\n', (2399, 2401), False, 'import paddle\n'), ((2428, 2464), 'paddle.fluid.core.disable_autotune', 'paddle.fluid.core.disable_autotune', ([], {}), '()\n', (2462, 2464), False, 'import paddle\n'), ((3383, 3425), 'paddle.fluid.framework._test_eager_guard', 'paddle.fluid.framework._test_eager_guard', ([], {}), '()\n', (3423, 3425), False, 'import paddle\n'), ((3640, 3682), 'paddle.fluid.framework._test_eager_guard', 'paddle.fluid.framework._test_eager_guard', ([], {}), '()\n', (3680, 3682), False, 'import paddle\n'), ((3960, 3995), 'paddle.fluid.core.enable_autotune', 'paddle.fluid.core.enable_autotune', ([], {}), '()\n', (3993, 3995), False, 'import paddle\n'), ((4022, 4058), 'paddle.fluid.core.disable_autotune', 'paddle.fluid.core.disable_autotune', ([], {}), '()\n', (4056, 4058), False, 'import paddle\n'), ((4326, 4367), 'paddle.fluid.core.is_compiled_with_cuda', 'paddle.fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (4365, 4367), False, 'import paddle\n'), ((4303, 4322), 'paddle.CUDAPlace', 'paddle.CUDAPlace', (['(0)'], {}), '(0)\n', (4319, 4322), False, 'import paddle\n'), ((4382, 4399), 'paddle.CPUPlace', 'paddle.CPUPlace', ([], {}), '()\n', (4397, 4399), False, 'import paddle\n'), ((4460, 4499), 'paddle.static.default_startup_program', 'paddle.static.default_startup_program', ([], {}), '()\n', (4497, 4499), False, 'import paddle\n'), ((4671, 4706), 'paddle.fluid.core.autotune_status', 'paddle.fluid.core.autotune_status', ([], {}), '()\n', (4704, 4706), False, 'import paddle\n'), ((5576, 5618), 'paddle.fluid.framework._test_eager_guard', 'paddle.fluid.framework._test_eager_guard', ([], {}), '()\n', (5616, 5618), False, 'import paddle\n'), ((5833, 5875), 'paddle.fluid.framework._test_eager_guard', 'paddle.fluid.framework._test_eager_guard', ([], {}), '()\n', (5873, 5875), False, 'import paddle\n'), ((4513, 4549), 'numpy.random.random', 'numpy.random.random', ([], {'size': 'data_shape'}), '(size=data_shape)\n', (4532, 4549), False, 'import numpy\n')] |
import cv2
import numpy as np
from keras import Model
from keras.applications import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
class FeatureExtractor:
def __init__(self):
self.model = VGG16(weights='imagenet', include_top=True)
def extract_features(self, image_path):
img_array = image.img_to_array(cv2.resize(cv2.imread(image_path), (224, 224)))
img_array = np.expand_dims(img_array, axis=0)
img_array = preprocess_input(img_array)
# Get pre-last layer
model_extract_features = Model(inputs=self.model.inputs, outputs=self.model.get_layer('fc2').output)
# Extract features
fc2_features = model_extract_features.predict(img_array)
# Reshape the output
fc2_features = [f[0] for f in fc2_features.reshape((4096, 1))]
return fc2_features
| [
"cv2.imread",
"keras.applications.vgg16.preprocess_input",
"numpy.expand_dims",
"keras.applications.VGG16"
] | [((255, 298), 'keras.applications.VGG16', 'VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), "(weights='imagenet', include_top=True)\n", (260, 298), False, 'from keras.applications import VGG16\n'), ((451, 484), 'numpy.expand_dims', 'np.expand_dims', (['img_array'], {'axis': '(0)'}), '(img_array, axis=0)\n', (465, 484), True, 'import numpy as np\n'), ((505, 532), 'keras.applications.vgg16.preprocess_input', 'preprocess_input', (['img_array'], {}), '(img_array)\n', (521, 532), False, 'from keras.applications.vgg16 import preprocess_input\n'), ((394, 416), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (404, 416), False, 'import cv2\n')] |
import numpy as np
import os
import matplotlib.pyplot as plt
import SNN
import data
SAVE_PATH = os.getcwd() + '/weight_mnist'
mnist = data.MNIST(path=["MNIST/t10k-images.idx3-ubyte", "MNIST/t10k-labels.idx1-ubyte"])
w1 = np.load(SAVE_PATH + '1.npy')
w2 = np.load(SAVE_PATH + '2.npy')
Ts = 1e-3
scale = 2
view_max = 2
l1 = SNN.SNNDiscrete(w1, Ts, scale)
l2 = SNN.SNNDiscrete(w2, Ts, scale)
correct = 0
for i in range(mnist.datasize):
xs, ys = mnist.next_batch(1, shuffle=True)
xs = (1-xs[0, :])/Ts
input_mat = np.zeros([784, int(1/Ts*view_max)])
input_mat[range(784), xs.astype(int)] = 1
l1out = l1.forward(input_mat)
l2out = l2.forward(l1out)
peak = np.argmax(l2out, axis=1)
prediction = np.argmin(peak)
label = np.argmax(ys[0])
if prediction == label:
correct += 1
print("test %d" % (i+1))
accuracy = correct / mnist.datasize
print("accuracy = %.4f" % accuracy)
| [
"numpy.argmax",
"os.getcwd",
"SNN.SNNDiscrete",
"data.MNIST",
"numpy.argmin",
"numpy.load"
] | [((135, 220), 'data.MNIST', 'data.MNIST', ([], {'path': "['MNIST/t10k-images.idx3-ubyte', 'MNIST/t10k-labels.idx1-ubyte']"}), "(path=['MNIST/t10k-images.idx3-ubyte',\n 'MNIST/t10k-labels.idx1-ubyte'])\n", (145, 220), False, 'import data\n'), ((223, 251), 'numpy.load', 'np.load', (["(SAVE_PATH + '1.npy')"], {}), "(SAVE_PATH + '1.npy')\n", (230, 251), True, 'import numpy as np\n'), ((257, 285), 'numpy.load', 'np.load', (["(SAVE_PATH + '2.npy')"], {}), "(SAVE_PATH + '2.npy')\n", (264, 285), True, 'import numpy as np\n'), ((326, 356), 'SNN.SNNDiscrete', 'SNN.SNNDiscrete', (['w1', 'Ts', 'scale'], {}), '(w1, Ts, scale)\n', (341, 356), False, 'import SNN\n'), ((362, 392), 'SNN.SNNDiscrete', 'SNN.SNNDiscrete', (['w2', 'Ts', 'scale'], {}), '(w2, Ts, scale)\n', (377, 392), False, 'import SNN\n'), ((97, 108), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (106, 108), False, 'import os\n'), ((687, 711), 'numpy.argmax', 'np.argmax', (['l2out'], {'axis': '(1)'}), '(l2out, axis=1)\n', (696, 711), True, 'import numpy as np\n'), ((729, 744), 'numpy.argmin', 'np.argmin', (['peak'], {}), '(peak)\n', (738, 744), True, 'import numpy as np\n'), ((758, 774), 'numpy.argmax', 'np.argmax', (['ys[0]'], {}), '(ys[0])\n', (767, 774), True, 'import numpy as np\n')] |
"""
Lidar
"""
# requies glob to be installed: "pip3 install glob2"
# requires rplidar to be installed: "pip3 install rplidar"
import time
import math
import pickle
import serial
import numpy as np
from donkeycar.utils import norm_deg, dist, deg2rad, arr_to_img
from PIL import Image, ImageDraw
class RPLidar(object):
'''
https://github.com/SkoltechRobotics/rplidar
'''
def __init__(self, lower_limit = 0, upper_limit = 360, debug=False):
from rplidar import RPLidar
import glob
port_found = False
self.lower_limit = lower_limit
self.upper_limit = upper_limit
temp_list = glob.glob ('/dev/ttyUSB*')
result = []
for a_port in temp_list:
try:
s = serial.Serial(a_port)
s.close()
result.append(a_port)
port_found = True
except serial.SerialException:
pass
if port_found:
self.port = result[0]
self.distances = [] #a list of distance measurements
self.angles = [] # a list of angles corresponding to dist meas above
self.lidar = RPLidar(self.port, baudrate=115200)
self.lidar.clear_input()
time.sleep(1)
self.on = True
#print(self.lidar.get_info())
#print(self.lidar.get_health())
else:
print("No Lidar found")
def update(self):
scans = self.lidar.iter_scans(550)
while self.on:
try:
for scan in scans:
self.distances = [item[2] for item in scan]
self.angles = [item[1] for item in scan]
except serial.serialutil.SerialException:
print('serial.serialutil.SerialException from Lidar. common when shutting down.')
def run_threaded(self):
sorted_distances = []
if (self.angles != []) and (self.distances != []):
angs = np.copy(self.angles)
dists = np.copy(self.distances)
filter_angs = angs[(angs > self.lower_limit) & (angs < self.upper_limit)]
filter_dist = dists[(angs > self.lower_limit) & (angs < self.upper_limit)] #sorts distances based on angle values
angles_ind = np.argsort(filter_angs) # returns the indexes that sorts filter_angs
if angles_ind != []:
sorted_distances = np.argsort(filter_dist) # sorts distances based on angle indexes
return sorted_distances
def shutdown(self):
self.on = False
time.sleep(2)
self.lidar.stop()
self.lidar.stop_motor()
self.lidar.disconnect()
class YDLidar(object):
'''
https://pypi.org/project/PyLidar3/
'''
def __init__(self, port='/dev/ttyUSB0'):
import PyLidar3
self.port = port
self.distances = [] #a list of distance measurements
self.angles = [] # a list of angles corresponding to dist meas above
self.lidar = PyLidar3.YdLidarX4(port)
if(self.lidar.Connect()):
print(self.lidar.GetDeviceInfo())
self.gen = self.lidar.StartScanning()
else:
print("Error connecting to lidar")
self.on = True
def init(self, port='/dev/ttyUSB0'):
import PyLidar3
print("Starting lidar...")
self.port = port
self.distances = [] #a list of distance measurements
self.angles = [] # a list of angles corresponding to dist meas above
self.lidar = PyLidar3.YdLidarX4(port)
if(self.lidar.Connect()):
print(self.lidar.GetDeviceInfo())
gen = self.lidar.StartScanning()
return gen
else:
print("Error connecting to lidar")
self.on = True
#print(self.lidar.get_info())
#print(self.lidar.get_health())
def update(self, lidar, debug = False):
while self.on:
try:
self.data = next(lidar)
for angle in range(0,360):
if(self.data[angle]>1000):
self.angles = [angle]
self.distances = [self.data[angle]]
if debug:
return self.distances, self.angles
except serial.serialutil.SerialException:
print('serial.serialutil.SerialException from Lidar. common when shutting down.')
def run_threaded(self):
return self.distances, self.angles
def shutdown(self):
self.on = False
time.sleep(2)
self.lidar.StopScanning()
self.lidar.Disconnect()
class LidarPlot(object):
'''
takes the raw lidar measurements and plots it to an image
'''
PLOT_TYPE_LINE = 0
PLOT_TYPE_CIRC = 1
def __init__(self, resolution=(500,500),
max_dist=1000, #mm
radius_plot=3,
plot_type=PLOT_TYPE_CIRC):
self.frame = Image.new('RGB', resolution)
self.max_dist = max_dist
self.rad = radius_plot
self.resolution = resolution
if plot_type == self.PLOT_TYPE_CIRC:
self.plot_fn = self.plot_circ
else:
self.plot_fn = self.plot_line
def plot_line(self, img, dist, theta, max_dist, draw):
'''
scale dist so that max_dist is edge of img (mm)
and img is PIL Image, draw the line using the draw ImageDraw object
'''
center = (img.width / 2, img.height / 2)
max_pixel = min(center[0], center[1])
dist = dist / max_dist * max_pixel
if dist < 0 :
dist = 0
elif dist > max_pixel:
dist = max_pixel
theta = np.radians(theta)
sx = math.cos(theta) * dist + center[0]
sy = math.sin(theta) * dist + center[1]
ex = math.cos(theta) * (dist + self.rad) + center[0]
ey = math.sin(theta) * (dist + self.rad) + center[1]
fill = 128
draw.line((sx,sy, ex, ey), fill=(fill, fill, fill), width=1)
def plot_circ(self, img, dist, theta, max_dist, draw):
'''
scale dist so that max_dist is edge of img (mm)
and img is PIL Image, draw the circle using the draw ImageDraw object
'''
center = (img.width / 2, img.height / 2)
max_pixel = min(center[0], center[1])
dist = dist / max_dist * max_pixel
if dist < 0 :
dist = 0
elif dist > max_pixel:
dist = max_pixel
theta = np.radians(theta)
sx = int(math.cos(theta) * dist + center[0])
sy = int(math.sin(theta) * dist + center[1])
ex = int(math.cos(theta) * (dist + 2 * self.rad) + center[0])
ey = int(math.sin(theta) * (dist + 2 * self.rad) + center[1])
fill = 128
draw.ellipse((min(sx, ex), min(sy, ey), max(sx, ex), max(sy, ey)), fill=(fill, fill, fill))
def plot_scan(self, img, distances, angles, max_dist, draw):
for dist, angle in zip(distances, angles):
self.plot_fn(img, dist, angle, max_dist, draw)
def run(self, distances, angles):
'''
takes two lists of equal length, one of distance values, the other of angles corresponding to the dist meas
'''
self.frame = Image.new('RGB', self.resolution, (255, 255, 255))
draw = ImageDraw.Draw(self.frame)
self.plot_scan(self.frame, distances, angles, self.max_dist, draw)
return self.frame
def shutdown(self):
pass
class BreezySLAM(object):
'''
https://github.com/simondlevy/BreezySLAM
'''
def __init__(self, MAP_SIZE_PIXELS=500, MAP_SIZE_METERS=10):
from breezyslam.algorithms import RMHC_SLAM
from breezyslam.sensors import Laser
laser_model = Laser(scan_size=360, scan_rate_hz=10., detection_angle_degrees=360, distance_no_detection_mm=12000)
MAP_QUALITY=5
self.slam = RMHC_SLAM(laser_model, MAP_SIZE_PIXELS, MAP_SIZE_METERS, MAP_QUALITY)
def run(self, distances, angles, map_bytes):
self.slam.update(distances, scan_angles_degrees=angles)
x, y, theta = self.slam.getpos()
if map_bytes is not None:
self.slam.getmap(map_bytes)
#print('x', x, 'y', y, 'theta', norm_deg(theta))
return x, y, deg2rad(norm_deg(theta))
def shutdown(self):
pass
class BreezyMap(object):
'''
bitmap that may optionally be constructed by BreezySLAM
'''
def __init__(self, MAP_SIZE_PIXELS=500):
self.mapbytes = bytearray(MAP_SIZE_PIXELS * MAP_SIZE_PIXELS)
def run(self):
return self.mapbytes
def shutdown(self):
pass
class MapToImage(object):
def __init__(self, resolution=(500, 500)):
self.resolution = resolution
def run(self, map_bytes):
np_arr = np.array(map_bytes).reshape(self.resolution)
return arr_to_img(np_arr)
def shutdown(self):
pass
| [
"PyLidar3.YdLidarX4",
"numpy.radians",
"breezyslam.sensors.Laser",
"numpy.copy",
"donkeycar.utils.norm_deg",
"PIL.Image.new",
"breezyslam.algorithms.RMHC_SLAM",
"rplidar.RPLidar",
"time.sleep",
"numpy.argsort",
"math.cos",
"PIL.ImageDraw.Draw",
"numpy.array",
"serial.Serial",
"math.sin",... | [((638, 663), 'glob.glob', 'glob.glob', (['"""/dev/ttyUSB*"""'], {}), "('/dev/ttyUSB*')\n", (647, 663), False, 'import glob\n'), ((2591, 2604), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2601, 2604), False, 'import time\n'), ((3028, 3052), 'PyLidar3.YdLidarX4', 'PyLidar3.YdLidarX4', (['port'], {}), '(port)\n', (3046, 3052), False, 'import PyLidar3\n'), ((3554, 3578), 'PyLidar3.YdLidarX4', 'PyLidar3.YdLidarX4', (['port'], {}), '(port)\n', (3572, 3578), False, 'import PyLidar3\n'), ((4573, 4586), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (4583, 4586), False, 'import time\n'), ((4954, 4982), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'resolution'], {}), "('RGB', resolution)\n", (4963, 4982), False, 'from PIL import Image, ImageDraw\n'), ((5713, 5730), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (5723, 5730), True, 'import numpy as np\n'), ((6520, 6537), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (6530, 6537), True, 'import numpy as np\n'), ((7293, 7343), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'self.resolution', '(255, 255, 255)'], {}), "('RGB', self.resolution, (255, 255, 255))\n", (7302, 7343), False, 'from PIL import Image, ImageDraw\n'), ((7359, 7385), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['self.frame'], {}), '(self.frame)\n', (7373, 7385), False, 'from PIL import Image, ImageDraw\n'), ((7799, 7903), 'breezyslam.sensors.Laser', 'Laser', ([], {'scan_size': '(360)', 'scan_rate_hz': '(10.0)', 'detection_angle_degrees': '(360)', 'distance_no_detection_mm': '(12000)'}), '(scan_size=360, scan_rate_hz=10.0, detection_angle_degrees=360,\n distance_no_detection_mm=12000)\n', (7804, 7903), False, 'from breezyslam.sensors import Laser\n'), ((7941, 8010), 'breezyslam.algorithms.RMHC_SLAM', 'RMHC_SLAM', (['laser_model', 'MAP_SIZE_PIXELS', 'MAP_SIZE_METERS', 'MAP_QUALITY'], {}), '(laser_model, MAP_SIZE_PIXELS, MAP_SIZE_METERS, MAP_QUALITY)\n', (7950, 8010), False, 'from breezyslam.algorithms import RMHC_SLAM\n'), ((8921, 8939), 'donkeycar.utils.arr_to_img', 'arr_to_img', (['np_arr'], {}), '(np_arr)\n', (8931, 8939), False, 'from donkeycar.utils import norm_deg, dist, deg2rad, arr_to_img\n'), ((1168, 1203), 'rplidar.RPLidar', 'RPLidar', (['self.port'], {'baudrate': '(115200)'}), '(self.port, baudrate=115200)\n', (1175, 1203), False, 'from rplidar import RPLidar\n'), ((1253, 1266), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1263, 1266), False, 'import time\n'), ((1987, 2007), 'numpy.copy', 'np.copy', (['self.angles'], {}), '(self.angles)\n', (1994, 2007), True, 'import numpy as np\n'), ((2028, 2051), 'numpy.copy', 'np.copy', (['self.distances'], {}), '(self.distances)\n', (2035, 2051), True, 'import numpy as np\n'), ((2291, 2314), 'numpy.argsort', 'np.argsort', (['filter_angs'], {}), '(filter_angs)\n', (2301, 2314), True, 'import numpy as np\n'), ((755, 776), 'serial.Serial', 'serial.Serial', (['a_port'], {}), '(a_port)\n', (768, 776), False, 'import serial\n'), ((2436, 2459), 'numpy.argsort', 'np.argsort', (['filter_dist'], {}), '(filter_dist)\n', (2446, 2459), True, 'import numpy as np\n'), ((5744, 5759), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (5752, 5759), False, 'import math\n'), ((5792, 5807), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (5800, 5807), False, 'import math\n'), ((5840, 5855), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (5848, 5855), False, 'import math\n'), ((5901, 5916), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (5909, 5916), False, 'import math\n'), ((8341, 8356), 'donkeycar.utils.norm_deg', 'norm_deg', (['theta'], {}), '(theta)\n', (8349, 8356), False, 'from donkeycar.utils import norm_deg, dist, deg2rad, arr_to_img\n'), ((8861, 8880), 'numpy.array', 'np.array', (['map_bytes'], {}), '(map_bytes)\n', (8869, 8880), True, 'import numpy as np\n'), ((6555, 6570), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (6563, 6570), False, 'import math\n'), ((6608, 6623), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (6616, 6623), False, 'import math\n'), ((6661, 6676), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (6669, 6676), False, 'import math\n'), ((6731, 6746), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (6739, 6746), False, 'import math\n')] |
"""By: Xiaochi (<NAME>: github.com/XC-Li"""
from gensim.models.doc2vec import Doc2Vec
import numpy as np
from scipy.sparse import hstack as sparse_hstack
class D2V(object):
def __init__(self, file):
self.model = Doc2Vec.load(file)
def fit(self, X):
pass
def transform(self, X):
temp = []
for speech in X:
temp.append(self.model.infer_vector(speech))
return np.vstack(temp)
class StackedD2V(object):
def __init__(self, file, vectorizer):
self.d2v = Doc2Vec.load(file)
self.vectorizer = vectorizer
def fit(self, X):
self.vectorizer.fit(X)
def d2v_transform(self, X):
temp = []
for speech in X:
temp.append(self.d2v.infer_vector(speech))
return np.vstack(temp)
def transform(self, X):
bow = self.vectorizer.transform(X)
d2v_emb = self.d2v_transform(X)
combined_emb = sparse_hstack((bow, d2v_emb))
return combined_emb
| [
"scipy.sparse.hstack",
"numpy.vstack",
"gensim.models.doc2vec.Doc2Vec.load"
] | [((226, 244), 'gensim.models.doc2vec.Doc2Vec.load', 'Doc2Vec.load', (['file'], {}), '(file)\n', (238, 244), False, 'from gensim.models.doc2vec import Doc2Vec\n'), ((425, 440), 'numpy.vstack', 'np.vstack', (['temp'], {}), '(temp)\n', (434, 440), True, 'import numpy as np\n'), ((530, 548), 'gensim.models.doc2vec.Doc2Vec.load', 'Doc2Vec.load', (['file'], {}), '(file)\n', (542, 548), False, 'from gensim.models.doc2vec import Doc2Vec\n'), ((786, 801), 'numpy.vstack', 'np.vstack', (['temp'], {}), '(temp)\n', (795, 801), True, 'import numpy as np\n'), ((937, 966), 'scipy.sparse.hstack', 'sparse_hstack', (['(bow, d2v_emb)'], {}), '((bow, d2v_emb))\n', (950, 966), True, 'from scipy.sparse import hstack as sparse_hstack\n')] |
from collections.abc import Sequence
import random
import cv2
import torch
import numpy as np
def set_all_randomness(seed, set_for_cuda=True):
"""Sets the random seed for numpy, pytorch, python.random
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if set_for_cuda:
torch.cuda.manual_seed_all(seed)
def bgr2rgb(frame):
"""Converts a numpy array from BGR ordering of the channels to
RGB
"""
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return frame
def float_to_uint8(img):
"""Converts an numpy array img from range [0-1] to [0-255]
and casts it to uint8
"""
uint8_img = img * 255.
return uint8_img.astype("uint8")
def polygon_to_mask(polygon_list, img_height, img_width):
"""
Arguments:
polygon_list (list): A list where each element is of the format (x,y),
i.e [(x1,y1), (x2,y2) ....]. Note that x,y must be integers
img_height (int): Height of the image we want to make the mask for
img_width (int): Width of the image we want to make the mask for
Returns:
A binary mask of uint8 type with shape HxW
"""
mask = np.zeros((img_height, img_width), dtype=np.uint8)
pts = np.array([polygon_list], dtype=np.int32)
cv2.fillPoly(mask, pts, 1)
return mask
def np_to_tensor(img):
"""Converts a numpy image of shape HxWxC to PyTorch tensor of shape
CxHxW
"""
return torch.from_numpy(img).permute(-1, 0, 1)
def tensor_to_np(tensor):
"""Converts a PyTorch tensor of shape CxHxW back to a numpy array
of shape HxWxC
"""
return tensor.permute(1, 2, 0).numpy()
def validate_fraction_extra(fraction_extra):
"""Validates fraction_extra to ensure that it's either an int or float
or a Sequence or length 2
"""
if not isinstance(fraction_extra, (int, float, Sequence)):
raise TypeError("fraction_extra must be a int or float or sequence."
" Got {}".format(type(fraction_extra)))
if isinstance(fraction_extra, Sequence) and len(fraction_extra) != 2:
raise ValueError("If fraction_extra is a sequence it "
"should have length of 2. Got length of {}".format(
len(fraction_extra)))
def sample_fraction_extra(fraction_extra):
"""If fraction_extra is a not a number then sample a value
uniformally between the 2 elements, otherwise return
fraction_extra
"""
if not isinstance(fraction_extra, (int, float)):
fraction_extra = random.uniform(fraction_extra[0], fraction_extra[1])
return fraction_extra
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"cv2.fillPoly",
"random.uniform",
"random.seed",
"torch.from_numpy",
"numpy.array",
"numpy.zeros",
"numpy.random.seed",
"cv2.cvtColor"
] | [((219, 236), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (230, 236), False, 'import random\n'), ((241, 261), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (255, 261), True, 'import numpy as np\n'), ((266, 289), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (283, 289), False, 'import torch\n'), ((473, 511), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (485, 511), False, 'import cv2\n'), ((1187, 1236), 'numpy.zeros', 'np.zeros', (['(img_height, img_width)'], {'dtype': 'np.uint8'}), '((img_height, img_width), dtype=np.uint8)\n', (1195, 1236), True, 'import numpy as np\n'), ((1247, 1287), 'numpy.array', 'np.array', (['[polygon_list]'], {'dtype': 'np.int32'}), '([polygon_list], dtype=np.int32)\n', (1255, 1287), True, 'import numpy as np\n'), ((1292, 1318), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'pts', '(1)'], {}), '(mask, pts, 1)\n', (1304, 1318), False, 'import cv2\n'), ((319, 351), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (345, 351), False, 'import torch\n'), ((2589, 2641), 'random.uniform', 'random.uniform', (['fraction_extra[0]', 'fraction_extra[1]'], {}), '(fraction_extra[0], fraction_extra[1])\n', (2603, 2641), False, 'import random\n'), ((1465, 1486), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (1481, 1486), False, 'import torch\n')] |
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import time
import unittest
from rlgraph.environments import OpenAIGymEnv
from rlgraph.agents import IMPALAAgent
from rlgraph.spaces import FloatBox
from rlgraph.utils import root_logger
from rlgraph.tests.test_util import config_from_path
class TestIMPALAAgentLongTaskLearning(unittest.TestCase):
"""
Tests whether the DQNAgent can learn in tough environments.
"""
root_logger.setLevel(level=logging.INFO)
#atari_preprocessed_state_space = FloatBox(shape=(80, 80, 4), add_batch_rank=True)
#atari_preprocessing_spec = [
# dict(type="image_crop", x=0, y=25, width=160, height=160),
# dict(type="image_resize", width=80, height=80),
# dict(type="grayscale", keep_rank=True),
# dict(type="divide", divisor=255,),
# dict(type="sequence", sequence_length=4, batch_size=1, add_rank=False)
#]
def test_impala_on_outbreak(self):
"""
Creates a DQNAgent and runs it via a Runner on an openAI Pong Env.
"""
env = OpenAIGymEnv("Breakout-v0", frameskip=4, max_num_noops=30, episodic_life=True, visualize=False)
config_ = config_from_path("configs/impala_agent_for_breakout.json")
agent = IMPALAAgent.from_spec(
config_,
state_space=env.state_space,
action_space=env.action_space,
)
learn_updates = 4000000
mean_returns = []
for i in range(learn_updates):
ret = agent.update()
mean_return = self._calc_mean_return(ret)
mean_returns.append(mean_return)
print("i={} Loss={:.4} Avg-reward={:.2}".format(i, float(ret[1]), mean_return))
time.sleep(3)
agent.terminate()
time.sleep(3)
@staticmethod
def _calc_mean_return(records):
size = records[3]["rewards"].size
rewards = records[3]["rewards"].reshape((size,))
terminals = records[3]["terminals"].reshape((size,))
returns = list()
return_ = 0.0
for r, t in zip(rewards, terminals):
return_ += r
if t:
returns.append(return_)
return_ = 0.0
return np.mean(returns)
| [
"numpy.mean",
"rlgraph.agents.IMPALAAgent.from_spec",
"time.sleep",
"rlgraph.tests.test_util.config_from_path",
"rlgraph.utils.root_logger.setLevel",
"rlgraph.environments.OpenAIGymEnv"
] | [((1221, 1261), 'rlgraph.utils.root_logger.setLevel', 'root_logger.setLevel', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1241, 1261), False, 'from rlgraph.utils import root_logger\n'), ((1842, 1942), 'rlgraph.environments.OpenAIGymEnv', 'OpenAIGymEnv', (['"""Breakout-v0"""'], {'frameskip': '(4)', 'max_num_noops': '(30)', 'episodic_life': '(True)', 'visualize': '(False)'}), "('Breakout-v0', frameskip=4, max_num_noops=30, episodic_life=\n True, visualize=False)\n", (1854, 1942), False, 'from rlgraph.environments import OpenAIGymEnv\n'), ((1956, 2014), 'rlgraph.tests.test_util.config_from_path', 'config_from_path', (['"""configs/impala_agent_for_breakout.json"""'], {}), "('configs/impala_agent_for_breakout.json')\n", (1972, 2014), False, 'from rlgraph.tests.test_util import config_from_path\n'), ((2031, 2126), 'rlgraph.agents.IMPALAAgent.from_spec', 'IMPALAAgent.from_spec', (['config_'], {'state_space': 'env.state_space', 'action_space': 'env.action_space'}), '(config_, state_space=env.state_space, action_space=\n env.action_space)\n', (2052, 2126), False, 'from rlgraph.agents import IMPALAAgent\n'), ((2500, 2513), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2510, 2513), False, 'import time\n'), ((2548, 2561), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2558, 2561), False, 'import time\n'), ((2998, 3014), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (3005, 3014), True, 'import numpy as np\n')] |
# Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD 3 clause
"""
Simulate the lid driven cavity
dt rho + dx qx + dy qy = 0
dt qx + dx (qx^2/rho + c^2 rho) + dy (qx*qy/rho) = 0
dt qy + dx (qx*qy/rho) + dy (qy^2/rho + c^2 rho) = 0
"""
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
import pylbm
# pylint: disable=redefined-outer-name
X, Y = sp.symbols('X, Y')
RHO, QX, QY = sp.symbols('rho, qx, qy')
LA = sp.symbols('lambda', constants=True)
# pylint: disable=unused-argument
def bc_up(f, m, x, y, rho_o, driven_velocity):
"""
boundary values on the top bound
"""
m[RHO] = rho_o
m[QX] = rho_o * driven_velocity
m[QY] = 0.
def vorticity(sol):
"""
compute the vorticity of the solution
"""
qx_n = sol.m[QX] / sol.m[RHO]
qy_n = sol.m[QY] / sol.m[RHO]
vort = np.abs(
qx_n[1:-1, 2:] - qx_n[1:-1, :-2] -
qy_n[2:, 1:-1] + qy_n[:-2, 1:-1]
)
return vort
def flow_lines(sol, nlines, time_length, scale=0.5):
"""
compute the flow lines of the solution
Parameters
----------
sol : :py:class:`Simulation<pylbm.simulation.Simulation>`
the solution given by pylbm
nlines : int (number of flow lines)
time_length : double (time during which we follow the lines)
scale : double (velocity scale (default 0.5))
Returns
-------
list
lines
"""
u_x = sol.m[QX] / sol.m[RHO]
u_y = sol.m[QY] / sol.m[RHO]
# if scale is None:
# scale = max(np.linalg.norm(u_x, np.inf), np.linalg.norm(u_y, np.inf))
lines = []
xmin, xmax = sol.domain.geom.bounds[0]
ymin, ymax = sol.domain.geom.bounds[1]
dx = sol.domain.dx
nx, ny = sol.domain.shape_in
for _ in range(nlines):
# begin a new line
cont = True # boolean to continue the line
x = xmin + (xmax-xmin) * np.random.rand()
y = ymin + (ymax-ymin) * np.random.rand()
line_x, line_y = [x], [y]
t = 0
while cont:
i, j = int((x-xmin)/(xmax-xmin)*nx), int((y-ymin)/(ymax-ymin)*ny)
uxij, uyij = u_x[i, j], u_y[i, j]
if uxij == 0 and uyij == 0:
cont = False
else:
dt = dx*scale / np.sqrt(uxij**2+uyij**2)
x += uxij*dt
y += uyij*dt
t += dt
if x < xmin or x >= xmax or y < ymin or y >= ymax:
cont = False
else:
line_x.append(x)
line_y.append(y)
if t >= time_length:
cont = False
lines.append([np.array(line_x), np.array(line_y)])
return lines
# pylint: disable=invalid-name
def run(space_step,
final_time,
generator="cython",
sorder=None,
with_plot=True):
"""
Parameters
----------
space_step: double
spatial step
final_time: double
final time
generator: string
pylbm generator
sorder: list
storage order
with_plot: boolean
if True plot the solution otherwise just compute the solution
Returns
-------
sol
<class 'pylbm.simulation.Simulation'>
"""
# parameters
scheme_name = 'Geier'
xmin, xmax, ymin, ymax = 0., 1., 0., 1. # bounds of the domain
la = 1. # velocity of the scheme
rho_o = 1. # reference value of the mass
driven_velocity = 0.05 # boundary value of the velocity
mu = 5.e-6 # bulk viscosity
zeta = 100*mu # shear viscosity
def moments_choice(scheme_name, mu, zeta):
if scheme_name == 'dHumiere':
dummy = 1./rho_o
QX2 = dummy*QX**2
QY2 = dummy*QY**2
Q2 = QX2+QY2
QXY = dummy*QX*QY
polynomials = [
1,
X, Y,
3*(X**2+Y**2)-4*LA**2,
0.5*(9*(X**2+Y**2)**2-21*(X**2+Y**2)*LA**2+8*LA**4),
3*X*(X**2+Y**2)-5*X*LA**2, 3*Y*(X**2+Y**2)-5*Y*LA**2,
X**2-Y**2, X*Y
]
equilibrium = [
RHO,
QX, QY,
-2*RHO*LA**2 + 3*Q2,
RHO*LA**2 - 3*Q2,
-QX*LA**2, -QY*LA**2,
QX2 - QY2, QXY
]
dummy = 3.0/(la*rho_o*space_step)
sigma_1 = dummy*zeta
sigma_2 = dummy*mu
s_1 = 1/(.5+sigma_1)
s_2 = 1/(.5+sigma_2)
if scheme_name == 'Geier':
UX, UY = QX/RHO, QY/RHO
RHOU2 = RHO * (UX**2 + UY**2)
polynomials = [
1, X, Y,
X**2 + Y**2,
X*Y**2,
Y*X**2,
X**2*Y**2,
X**2 - Y**2,
X*Y,
]
equilibrium = [
RHO, QX, QY,
RHOU2 + 2/3*RHO*LA**2,
QX*(LA**2/3+UY**2),
QY*(LA**2/3+UX**2),
RHO*(LA**2/3+UX**2)*(LA**2/3+UY**2),
RHO*(UX**2 - UY**2),
RHO*UX*UY,
]
dummy = 3.0/(la*rho_o*space_step)
sigma_1 = dummy*(zeta - 2*mu/3)
sigma_2 = dummy*mu
s_1 = 1/(.5+sigma_1)
s_2 = 1/(.5+sigma_2)
if scheme_name == 'Lallemand':
dummy = 1./rho_o
QX2 = dummy*QX**2
QY2 = dummy*QY**2
Q2 = QX2+QY2
QXY = dummy*QX*QY
polynomials = [
1, X, Y,
X**2 + Y**2,
X*(X**2+Y**2),
Y*(X**2+Y**2),
(X**2+Y**2)**2,
X**2 - Y**2,
X*Y,
]
equilibrium = [
RHO,
QX, QY,
Q2+2/3*LA**2*RHO,
4/3*QX*LA**2,
4/3*QY*LA**2,
((21*Q2+6*RHO*LA**2)*LA**2 - (6*Q2-2*RHO*LA**2))/9,
QX2-QY2,
QXY,
]
dummy = 3.0/(la*rho_o*space_step)
sigma_1 = dummy*zeta
sigma_2 = dummy*mu
s_1 = 1/(.5+sigma_1)
s_2 = 1/(.5+sigma_2)
s = [0., 0., 0., s_1, s_1, s_1, s_1, s_2, s_2]
return polynomials, equilibrium, s
polynomials, equilibrium, s = moments_choice(scheme_name, mu, zeta)
simu_cfg = {
'parameters': {LA: la},
'box': {
'x': [xmin, xmax],
'y': [ymin, ymax],
'label': [0, 0, 0, 1]
},
'space_step': space_step,
'scheme_velocity': LA,
'schemes': [
{
'velocities': list(range(9)),
'polynomials': polynomials,
'relaxation_parameters': s,
'equilibrium': equilibrium,
'conserved_moments': [RHO, QX, QY],
},
],
'init': {RHO: rho_o,
QX: 0.,
QY: 0.},
'boundary_conditions': {
0: {'method': {0: pylbm.bc.BouzidiBounceBack}},
1: {
'method': {0: pylbm.bc.BouzidiBounceBack},
'value': (bc_up, (rho_o, driven_velocity))
}
},
'generator': generator,
'relative_velocity': [QX/RHO, QY/RHO],
# 'show_code': True,
}
sol = pylbm.Simulation(simu_cfg, sorder=sorder)
while sol.t < final_time:
sol.one_time_step()
viewer = pylbm.viewer.matplotlib_viewer
fig = viewer.Fig()
axe = fig[0]
axe.grid(visible=False)
axe.xaxis_set_visible(False)
axe.yaxis_set_visible(False)
axe.SurfaceImage(
vorticity(sol),
cmap='jet', clim=[0, .1], alpha=0.25,
)
lines = flow_lines(sol, 10, 2)
for linek in lines:
axe.CurveLine(linek[0], linek[1], alpha=1)
plt.show()
return sol
if __name__ == '__main__':
# pylint: disable=invalid-name
space_step = 1./128
final_time = 100
run(space_step, final_time)
| [
"numpy.abs",
"numpy.sqrt",
"numpy.random.rand",
"pylbm.Simulation",
"sympy.symbols",
"numpy.array",
"matplotlib.pyplot.show"
] | [((389, 407), 'sympy.symbols', 'sp.symbols', (['"""X, Y"""'], {}), "('X, Y')\n", (399, 407), True, 'import sympy as sp\n'), ((422, 447), 'sympy.symbols', 'sp.symbols', (['"""rho, qx, qy"""'], {}), "('rho, qx, qy')\n", (432, 447), True, 'import sympy as sp\n'), ((453, 489), 'sympy.symbols', 'sp.symbols', (['"""lambda"""'], {'constants': '(True)'}), "('lambda', constants=True)\n", (463, 489), True, 'import sympy as sp\n'), ((855, 930), 'numpy.abs', 'np.abs', (['(qx_n[1:-1, 2:] - qx_n[1:-1, :-2] - qy_n[2:, 1:-1] + qy_n[:-2, 1:-1])'], {}), '(qx_n[1:-1, 2:] - qx_n[1:-1, :-2] - qy_n[2:, 1:-1] + qy_n[:-2, 1:-1])\n', (861, 930), True, 'import numpy as np\n'), ((7505, 7546), 'pylbm.Simulation', 'pylbm.Simulation', (['simu_cfg'], {'sorder': 'sorder'}), '(simu_cfg, sorder=sorder)\n', (7521, 7546), False, 'import pylbm\n'), ((7998, 8008), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8006, 8008), True, 'import matplotlib.pyplot as plt\n'), ((1882, 1898), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1896, 1898), True, 'import numpy as np\n'), ((1932, 1948), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1946, 1948), True, 'import numpy as np\n'), ((2647, 2663), 'numpy.array', 'np.array', (['line_x'], {}), '(line_x)\n', (2655, 2663), True, 'import numpy as np\n'), ((2665, 2681), 'numpy.array', 'np.array', (['line_y'], {}), '(line_y)\n', (2673, 2681), True, 'import numpy as np\n'), ((2260, 2290), 'numpy.sqrt', 'np.sqrt', (['(uxij ** 2 + uyij ** 2)'], {}), '(uxij ** 2 + uyij ** 2)\n', (2267, 2290), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Author: <NAME> <<EMAIL>>
License: MIT
"""
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
# Parameters
fkind = "float32"
# Initialize figure
fig = plt.figure(figsize = (10, 5), facecolor = "white")
fig.patch.set_alpha(0.)
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
ax1.patch.set_alpha(1.)
ax2.patch.set_alpha(1.)
# 1D Kernel Density Estimation
data1d = np.fromfile("../examples/rand/data1d.bin", dtype = fkind)
data1d_kde = np.fromfile("../examples/rand/data1d_kde.bin", dtype = fkind)
xi = np.fromfile("../examples/rand/data1d_kde_xaxis.bin", dtype = fkind)
n = 1500
nx = len(xi)
ax1.hist(data1d, 30, normed = True, color = "black", alpha = 0.8)
ax1.plot(xi, data1d_kde, color = "red", linewidth = 2)
ax1.set_title("1D KDE")
ax1.set_xlabel("X")
ax1.set_ylabel("PDF")
ax1.set_xlim(np.min(xi), np.max(xi))
ax1.grid(True, linestyle = ":")
# 2D Kernel Density Estimation
data2d = np.fromfile("../examples/rand/data2d.bin", dtype = fkind)
data2d_kde = np.fromfile("../examples/rand/data2d_kde.bin", dtype = fkind)
xi = np.fromfile("../examples/rand/data2d_kde_xaxis.bin", dtype = fkind)
yi = np.fromfile("../examples/rand/data2d_kde_yaxis.bin", dtype = fkind)
n = 750
nx, ny = len(xi), len(yi)
data2d = np.reshape(data2d, (n, 2), order = "F")
data2d_kde = np.reshape(data2d_kde, (nx, ny), order = "F")
ax2.contour(xi, yi, data2d_kde.T, 30)
ax2.plot(data2d[:,0], data2d[:,1], color = "black", linestyle = "none", marker = "o", markersize = 6, alpha = 0.33)
ax2.set_title("2D KDE")
ax2.set_xlabel("X")
ax2.set_ylabel("Y")
ax2.set_xlim(np.min(xi), np.max(xi))
ax2.set_ylim(np.min(yi), np.max(yi))
ax2.grid(True, linestyle = ":")
fig.tight_layout() | [
"numpy.fromfile",
"numpy.reshape",
"numpy.max",
"matplotlib.pyplot.figure",
"numpy.min"
] | [((230, 276), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)', 'facecolor': '"""white"""'}), "(figsize=(10, 5), facecolor='white')\n", (240, 276), True, 'import matplotlib.pyplot as plt\n'), ((488, 543), 'numpy.fromfile', 'np.fromfile', (['"""../examples/rand/data1d.bin"""'], {'dtype': 'fkind'}), "('../examples/rand/data1d.bin', dtype=fkind)\n", (499, 543), True, 'import numpy as np\n'), ((563, 622), 'numpy.fromfile', 'np.fromfile', (['"""../examples/rand/data1d_kde.bin"""'], {'dtype': 'fkind'}), "('../examples/rand/data1d_kde.bin', dtype=fkind)\n", (574, 622), True, 'import numpy as np\n'), ((634, 699), 'numpy.fromfile', 'np.fromfile', (['"""../examples/rand/data1d_kde_xaxis.bin"""'], {'dtype': 'fkind'}), "('../examples/rand/data1d_kde_xaxis.bin', dtype=fkind)\n", (645, 699), True, 'import numpy as np\n'), ((1075, 1130), 'numpy.fromfile', 'np.fromfile', (['"""../examples/rand/data2d.bin"""'], {'dtype': 'fkind'}), "('../examples/rand/data2d.bin', dtype=fkind)\n", (1086, 1130), True, 'import numpy as np\n'), ((1150, 1209), 'numpy.fromfile', 'np.fromfile', (['"""../examples/rand/data2d_kde.bin"""'], {'dtype': 'fkind'}), "('../examples/rand/data2d_kde.bin', dtype=fkind)\n", (1161, 1209), True, 'import numpy as np\n'), ((1221, 1286), 'numpy.fromfile', 'np.fromfile', (['"""../examples/rand/data2d_kde_xaxis.bin"""'], {'dtype': 'fkind'}), "('../examples/rand/data2d_kde_xaxis.bin', dtype=fkind)\n", (1232, 1286), True, 'import numpy as np\n'), ((1298, 1363), 'numpy.fromfile', 'np.fromfile', (['"""../examples/rand/data2d_kde_yaxis.bin"""'], {'dtype': 'fkind'}), "('../examples/rand/data2d_kde_yaxis.bin', dtype=fkind)\n", (1309, 1363), True, 'import numpy as np\n'), ((1426, 1463), 'numpy.reshape', 'np.reshape', (['data2d', '(n, 2)'], {'order': '"""F"""'}), "(data2d, (n, 2), order='F')\n", (1436, 1463), True, 'import numpy as np\n'), ((1483, 1526), 'numpy.reshape', 'np.reshape', (['data2d_kde', '(nx, ny)'], {'order': '"""F"""'}), "(data2d_kde, (nx, ny), order='F')\n", (1493, 1526), True, 'import numpy as np\n'), ((962, 972), 'numpy.min', 'np.min', (['xi'], {}), '(xi)\n', (968, 972), True, 'import numpy as np\n'), ((974, 984), 'numpy.max', 'np.max', (['xi'], {}), '(xi)\n', (980, 984), True, 'import numpy as np\n'), ((1789, 1799), 'numpy.min', 'np.min', (['xi'], {}), '(xi)\n', (1795, 1799), True, 'import numpy as np\n'), ((1801, 1811), 'numpy.max', 'np.max', (['xi'], {}), '(xi)\n', (1807, 1811), True, 'import numpy as np\n'), ((1830, 1840), 'numpy.min', 'np.min', (['yi'], {}), '(yi)\n', (1836, 1840), True, 'import numpy as np\n'), ((1842, 1852), 'numpy.max', 'np.max', (['yi'], {}), '(yi)\n', (1848, 1852), True, 'import numpy as np\n')] |
from numpy.testing import TestCase, run_module_suite, assert_allclose
from scipy.linalg import cython_lapack as cython_lapack
from scipy.linalg import lapack
class test_lamch(TestCase):
def test_slamch(self):
for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']:
assert_allclose(cython_lapack._test_slamch(c),
lapack.slamch(c))
def test_dlamch(self):
for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']:
assert_allclose(cython_lapack._test_dlamch(c),
lapack.dlamch(c))
if __name__ == "__main__":
run_module_suite()
| [
"scipy.linalg.cython_lapack._test_slamch",
"scipy.linalg.lapack.slamch",
"scipy.linalg.cython_lapack._test_dlamch",
"numpy.testing.run_module_suite",
"scipy.linalg.lapack.dlamch"
] | [((664, 682), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (680, 682), False, 'from numpy.testing import TestCase, run_module_suite, assert_allclose\n'), ((332, 361), 'scipy.linalg.cython_lapack._test_slamch', 'cython_lapack._test_slamch', (['c'], {}), '(c)\n', (358, 361), True, 'from scipy.linalg import cython_lapack as cython_lapack\n'), ((392, 408), 'scipy.linalg.lapack.slamch', 'lapack.slamch', (['c'], {}), '(c)\n', (405, 408), False, 'from scipy.linalg import lapack\n'), ((549, 578), 'scipy.linalg.cython_lapack._test_dlamch', 'cython_lapack._test_dlamch', (['c'], {}), '(c)\n', (575, 578), True, 'from scipy.linalg import cython_lapack as cython_lapack\n'), ((609, 625), 'scipy.linalg.lapack.dlamch', 'lapack.dlamch', (['c'], {}), '(c)\n', (622, 625), False, 'from scipy.linalg import lapack\n')] |
import matplotlib.pyplot as plt
import numpy as np
import torch
import cv2
def draw_figure(fig):
fig.canvas.draw()
fig.canvas.flush_events()
plt.pause(0.001)
def show_tensor(a: torch.Tensor, fig_num = None, title = None, range=(None, None), ax=None):
"""Display a 2D tensor.
args:
fig_num: Figure number.
title: Title of figure.
"""
a_np = a.squeeze().cpu().clone().detach().numpy()
if a_np.ndim == 3:
a_np = np.transpose(a_np, (1, 2, 0))
if ax is None:
fig = plt.figure(fig_num)
plt.tight_layout()
plt.cla()
plt.imshow(a_np, vmin=range[0], vmax=range[1])
plt.axis('off')
plt.axis('equal')
if title is not None:
plt.title(title)
draw_figure(fig)
else:
ax.cla()
ax.imshow(a_np, vmin=range[0], vmax=range[1])
ax.set_axis_off()
ax.axis('equal')
if title is not None:
ax.set_title(title)
draw_figure(plt.gcf())
def plot_graph(a: torch.Tensor, fig_num = None, title = None):
"""Plot graph. Data is a 1D tensor.
args:
fig_num: Figure number.
title: Title of figure.
"""
a_np = a.squeeze().cpu().clone().detach().numpy()
if a_np.ndim > 1:
raise ValueError
fig = plt.figure(fig_num)
# plt.tight_layout()
plt.cla()
plt.plot(a_np)
if title is not None:
plt.title(title)
draw_figure(fig)
def show_image_with_boxes(im, boxes, iou_pred=None, disp_ids=None):
im_np = im.clone().cpu().squeeze().numpy()
im_np = np.ascontiguousarray(im_np.transpose(1, 2, 0).astype(np.uint8))
boxes = boxes.view(-1, 4).cpu().numpy().round().astype(int)
# Draw proposals
for i_ in range(boxes.shape[0]):
if disp_ids is None or disp_ids[i_]:
bb = boxes[i_, :]
disp_color = (i_*38 % 256, (255 - i_*97) % 256, (123 + i_*66) % 256)
cv2.rectangle(im_np, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]),
disp_color, 1)
if iou_pred is not None:
text_pos = (bb[0], bb[1] - 5)
cv2.putText(im_np, 'ID={} IOU = {:3.2f}'.format(i_, iou_pred[i_]), text_pos,
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, bottomLeftOrigin=False)
im_tensor = torch.from_numpy(im_np.transpose(2, 0, 1)).float()
return im_tensor
def _pascal_color_map(N=256, normalized=False):
"""
Python implementation of the color map function for the PASCAL VOC data set.
Official Matlab version can be found in the PASCAL VOC devkit
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit
"""
def bitget(byteval, idx):
return (byteval & (1 << idx)) != 0
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7 - j)
g = g | (bitget(c, 1) << 7 - j)
b = b | (bitget(c, 2) << 7 - j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap / 255 if normalized else cmap
return cmap
def overlay_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None):
""" Overlay mask over image.
Source: https://github.com/albertomontesg/davis-interactive/blob/master/davisinteractive/utils/visualization.py
This function allows you to overlay a mask over an image with some
transparency.
# Arguments
im: Numpy Array. Array with the image. The shape must be (H, W, 3) and
the pixels must be represented as `np.uint8` data type.
ann: Numpy Array. Array with the mask. The shape must be (H, W) and the
values must be intergers
alpha: Float. Proportion of alpha to apply at the overlaid mask.
colors: Numpy Array. Optional custom colormap. It must have shape (N, 3)
being N the maximum number of colors to represent.
contour_thickness: Integer. Thickness of each object index contour draw
over the overlay. This function requires to have installed the
package `opencv-python`.
# Returns
Numpy Array: Image of the overlay with shape (H, W, 3) and data type
`np.uint8`.
"""
im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int)
if im.shape[:-1] != ann.shape:
raise ValueError('First two dimensions of `im` and `ann` must match')
if im.shape[-1] != 3:
raise ValueError('im must have three channels at the 3 dimension')
colors = colors or _pascal_color_map()
colors = np.asarray(colors, dtype=np.uint8)
mask = colors[ann]
fg = im * alpha + (1 - alpha) * mask
img = im.copy()
img[ann > 0] = fg[ann > 0]
if contour_thickness: # pragma: no cover
import cv2
for obj_id in np.unique(ann[ann > 0]):
contours = cv2.findContours((ann == obj_id).astype(
np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(),
contour_thickness)
return img
| [
"matplotlib.pyplot.imshow",
"cv2.rectangle",
"matplotlib.pyplot.title",
"numpy.unique",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.pause",
"matplotlib.pyplo... | [((164, 180), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (173, 180), True, 'import matplotlib.pyplot as plt\n'), ((1361, 1380), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (1371, 1380), True, 'import matplotlib.pyplot as plt\n'), ((1412, 1421), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (1419, 1421), True, 'import matplotlib.pyplot as plt\n'), ((1427, 1441), 'matplotlib.pyplot.plot', 'plt.plot', (['a_np'], {}), '(a_np)\n', (1435, 1441), True, 'import matplotlib.pyplot as plt\n'), ((2935, 2964), 'numpy.zeros', 'np.zeros', (['(N, 3)'], {'dtype': 'dtype'}), '((N, 3), dtype=dtype)\n', (2943, 2964), True, 'import numpy as np\n'), ((4825, 4859), 'numpy.asarray', 'np.asarray', (['colors'], {'dtype': 'np.uint8'}), '(colors, dtype=np.uint8)\n', (4835, 4859), True, 'import numpy as np\n'), ((489, 518), 'numpy.transpose', 'np.transpose', (['a_np', '(1, 2, 0)'], {}), '(a_np, (1, 2, 0))\n', (501, 518), True, 'import numpy as np\n'), ((556, 575), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (566, 575), True, 'import matplotlib.pyplot as plt\n'), ((585, 603), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (601, 603), True, 'import matplotlib.pyplot as plt\n'), ((613, 622), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (620, 622), True, 'import matplotlib.pyplot as plt\n'), ((632, 678), 'matplotlib.pyplot.imshow', 'plt.imshow', (['a_np'], {'vmin': 'range[0]', 'vmax': 'range[1]'}), '(a_np, vmin=range[0], vmax=range[1])\n', (642, 678), True, 'import matplotlib.pyplot as plt\n'), ((688, 703), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (696, 703), True, 'import matplotlib.pyplot as plt\n'), ((713, 730), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (721, 730), True, 'import matplotlib.pyplot as plt\n'), ((1478, 1494), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1487, 1494), True, 'import matplotlib.pyplot as plt\n'), ((3235, 3254), 'numpy.array', 'np.array', (['[r, g, b]'], {}), '([r, g, b])\n', (3243, 3254), True, 'import numpy as np\n'), ((4485, 4515), 'numpy.asarray', 'np.asarray', (['im'], {'dtype': 'np.uint8'}), '(im, dtype=np.uint8)\n', (4495, 4515), True, 'import numpy as np\n'), ((4517, 4546), 'numpy.asarray', 'np.asarray', (['ann'], {'dtype': 'np.int'}), '(ann, dtype=np.int)\n', (4527, 4546), True, 'import numpy as np\n'), ((5075, 5098), 'numpy.unique', 'np.unique', (['ann[ann > 0]'], {}), '(ann[ann > 0])\n', (5084, 5098), True, 'import numpy as np\n'), ((775, 791), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (784, 791), True, 'import matplotlib.pyplot as plt\n'), ((1040, 1049), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1047, 1049), True, 'import matplotlib.pyplot as plt\n'), ((2016, 2103), 'cv2.rectangle', 'cv2.rectangle', (['im_np', '(bb[0], bb[1])', '(bb[0] + bb[2], bb[1] + bb[3])', 'disp_color', '(1)'], {}), '(im_np, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]),\n disp_color, 1)\n', (2029, 2103), False, 'import cv2\n')] |
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling1D
def create_and_fit_dense_model(batch_size, epochs, callbacks, X_train, X_test, y_train, y_test):
# BUILD THE MODEL
model = Sequential()
model.add(Dense(100, input_shape=(40,)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(200))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
# TRAIN THE MODEL
history = model.fit(X_train, y_train, batch_size=batch_size, callbacks=callbacks, epochs=epochs,
validation_data=(X_test, y_test))
# SAVE MODEL AND HISTORY_DATA
np.save('Auswertung/history.npy', history.history)
model.save('Auswertung/model')
return history
def create_and_fit_cnn_model(batch_size, epochs, callbacks, X_train, X_test, y_train, y_test):
# BUILD THE MODEL
model = Sequential()
model.add(Conv1D(64, 3, activation='relu', input_shape=(40, 1)))
model.add(Conv1D(64, 3, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Conv1D(128, 3, activation='relu'))
model.add(Conv1D(128, 3, activation='relu'))
model.add(GlobalAveragePooling1D())
model.add(Dropout(0.5))
model.add(Dense(3, activation='sigmoid'))
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
# TRAIN THE MODEL
history_CNN = model.fit(X_train, y_train, batch_size=batch_size, callbacks=callbacks, epochs=epochs,
validation_data=(X_test, y_test))
# SAVE MODEL AND HISTORY_DATA
np.save('Auswertung/history_CNN.npy', history_CNN.history)
model.save('Auswertung/model_CNN')
return history_CNN | [
"keras.layers.MaxPooling1D",
"keras.layers.GlobalAveragePooling1D",
"keras.layers.Conv1D",
"keras.models.Sequential",
"keras.layers.Activation",
"keras.layers.Dense",
"keras.layers.Dropout",
"numpy.save"
] | [((310, 322), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (320, 322), False, 'from keras.models import Sequential\n'), ((964, 1014), 'numpy.save', 'np.save', (['"""Auswertung/history.npy"""', 'history.history'], {}), "('Auswertung/history.npy', history.history)\n", (971, 1014), True, 'import numpy as np\n'), ((1201, 1213), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1211, 1213), False, 'from keras.models import Sequential\n'), ((1894, 1952), 'numpy.save', 'np.save', (['"""Auswertung/history_CNN.npy"""', 'history_CNN.history'], {}), "('Auswertung/history_CNN.npy', history_CNN.history)\n", (1901, 1952), True, 'import numpy as np\n'), ((337, 366), 'keras.layers.Dense', 'Dense', (['(100)'], {'input_shape': '(40,)'}), '(100, input_shape=(40,))\n', (342, 366), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((382, 400), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (392, 400), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((416, 428), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (423, 428), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((444, 454), 'keras.layers.Dense', 'Dense', (['(200)'], {}), '(200)\n', (449, 454), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((470, 488), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (480, 488), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((504, 516), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (511, 516), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((532, 542), 'keras.layers.Dense', 'Dense', (['(100)'], {}), '(100)\n', (537, 542), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((558, 576), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (568, 576), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((592, 604), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (599, 604), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((620, 650), 'keras.layers.Dense', 'Dense', (['(3)'], {'activation': '"""softmax"""'}), "(3, activation='softmax')\n", (625, 650), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((1228, 1281), 'keras.layers.Conv1D', 'Conv1D', (['(64)', '(3)'], {'activation': '"""relu"""', 'input_shape': '(40, 1)'}), "(64, 3, activation='relu', input_shape=(40, 1))\n", (1234, 1281), False, 'from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling1D\n'), ((1297, 1329), 'keras.layers.Conv1D', 'Conv1D', (['(64)', '(3)'], {'activation': '"""relu"""'}), "(64, 3, activation='relu')\n", (1303, 1329), False, 'from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling1D\n'), ((1345, 1360), 'keras.layers.MaxPooling1D', 'MaxPooling1D', (['(3)'], {}), '(3)\n', (1357, 1360), False, 'from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling1D\n'), ((1376, 1409), 'keras.layers.Conv1D', 'Conv1D', (['(128)', '(3)'], {'activation': '"""relu"""'}), "(128, 3, activation='relu')\n", (1382, 1409), False, 'from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling1D\n'), ((1425, 1458), 'keras.layers.Conv1D', 'Conv1D', (['(128)', '(3)'], {'activation': '"""relu"""'}), "(128, 3, activation='relu')\n", (1431, 1458), False, 'from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling1D\n'), ((1474, 1498), 'keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (1496, 1498), False, 'from keras.layers import Conv1D, MaxPooling1D, GlobalAveragePooling1D\n'), ((1514, 1526), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1521, 1526), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((1542, 1572), 'keras.layers.Dense', 'Dense', (['(3)'], {'activation': '"""sigmoid"""'}), "(3, activation='sigmoid')\n", (1547, 1572), False, 'from keras.layers import Dense, Dropout, Activation\n')] |
# Copyright (C) <NAME> 2020.
# Distributed under the MIT License (see the accompanying README.md and LICENSE files).
import numpy as np
import policies.plackettluce as pl
import utils.variance as var
def optimize_logging_policy(n_update_steps,
logging_policy,
data_split,
additional_feat,
doc_value_diff,
cutoff,
obs_prob,
rel_prob,
expected_value):
update_i = 0
for update_i in range(n_update_steps):
qid = np.random.choice(data_split.num_queries())
q_n_docs = data_split.query_size(qid)
s_i, e_i = data_split.query_range(qid)
q_value_diff = doc_value_diff[s_i:e_i]
update_i += 1
if np.all(np.equal(q_value_diff, 0.)):
continue
policy_query_scores = logging_policy.score_query(
qid,
data_split,
additional_feat=additional_feat,
)
policy_query_scores += 18 - np.amax(policy_query_scores)
(sampled_rankings,
sampled_inv_rankings,
sampled_ranking_prob,
prob_per_rank) = pl.sample_rankings(
policy_query_scores,
10**3,
cutoff=cutoff,
prob_per_rank=True,
)
doc_prop_scores = np.sum(prob_per_rank*obs_prob[:prob_per_rank.shape[0], None], axis=0)
(list_variance,
list_var_score_grad,
list_var_policy_grad) = var.oracle_list_variance(
expected_value,
doc_value_diff[s_i:e_i],
rel_prob[s_i:e_i],
obs_prob,
doc_prop_scores,
policy_query_scores,
sampled_rankings,
sampled_inv_rankings,
sampled_ranking_prob,
cutoff=cutoff,
# compute_gradient=False,
)
policy_grad = pl.gradient_based_on_samples(
sampled_rankings,
obs_prob,
policy_query_scores,
sampled_ranking_prob,
cutoff=cutoff,
)
score_grad = np.mean(list_var_policy_grad[:, None]*policy_grad, axis=0)
score_grad += list_var_score_grad
logging_policy.gradient_update(
-score_grad,
data_split.query_feat(qid),
learning_rate=10**-2,
additional_feat=additional_feat[s_i:e_i, :],
) | [
"numpy.mean",
"numpy.equal",
"numpy.sum",
"policies.plackettluce.gradient_based_on_samples",
"utils.variance.oracle_list_variance",
"policies.plackettluce.sample_rankings",
"numpy.amax"
] | [((1277, 1364), 'policies.plackettluce.sample_rankings', 'pl.sample_rankings', (['policy_query_scores', '(10 ** 3)'], {'cutoff': 'cutoff', 'prob_per_rank': '(True)'}), '(policy_query_scores, 10 ** 3, cutoff=cutoff,\n prob_per_rank=True)\n', (1295, 1364), True, 'import policies.plackettluce as pl\n'), ((1553, 1624), 'numpy.sum', 'np.sum', (['(prob_per_rank * obs_prob[:prob_per_rank.shape[0], None])'], {'axis': '(0)'}), '(prob_per_rank * obs_prob[:prob_per_rank.shape[0], None], axis=0)\n', (1559, 1624), True, 'import numpy as np\n'), ((1699, 1922), 'utils.variance.oracle_list_variance', 'var.oracle_list_variance', (['expected_value', 'doc_value_diff[s_i:e_i]', 'rel_prob[s_i:e_i]', 'obs_prob', 'doc_prop_scores', 'policy_query_scores', 'sampled_rankings', 'sampled_inv_rankings', 'sampled_ranking_prob'], {'cutoff': 'cutoff'}), '(expected_value, doc_value_diff[s_i:e_i], rel_prob[\n s_i:e_i], obs_prob, doc_prop_scores, policy_query_scores,\n sampled_rankings, sampled_inv_rankings, sampled_ranking_prob, cutoff=cutoff\n )\n', (1723, 1922), True, 'import utils.variance as var\n'), ((2461, 2579), 'policies.plackettluce.gradient_based_on_samples', 'pl.gradient_based_on_samples', (['sampled_rankings', 'obs_prob', 'policy_query_scores', 'sampled_ranking_prob'], {'cutoff': 'cutoff'}), '(sampled_rankings, obs_prob,\n policy_query_scores, sampled_ranking_prob, cutoff=cutoff)\n', (2489, 2579), True, 'import policies.plackettluce as pl\n'), ((2739, 2799), 'numpy.mean', 'np.mean', (['(list_var_policy_grad[:, None] * policy_grad)'], {'axis': '(0)'}), '(list_var_policy_grad[:, None] * policy_grad, axis=0)\n', (2746, 2799), True, 'import numpy as np\n'), ((847, 874), 'numpy.equal', 'np.equal', (['q_value_diff', '(0.0)'], {}), '(q_value_diff, 0.0)\n', (855, 874), True, 'import numpy as np\n'), ((1148, 1176), 'numpy.amax', 'np.amax', (['policy_query_scores'], {}), '(policy_query_scores)\n', (1155, 1176), True, 'import numpy as np\n')] |
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import kaggle_environments.envs.halite.helpers as hh
from gym_halite.envs.halite_env import get_scalar_features, get_feature_maps
NETWORKS = {'CartPole-v1': models.get_q_mlp,
'CartPole-v1_duel': models.get_dueling_q_mlp,
'gym_halite:halite-v0': models.get_halite_q_mlp,
'gym_halite:halite-v0_duel': models.get_halite_dueling_q_mlp}
# halite = 'gym_halite:halite-v0'
# assume halite if there is not input shape
# if not self._input_shape:
# space = self._train_env.observation_space
# feature_maps_shape = space['feature_maps'].shape
# scalar_features_shape = space['scalar_features'].shape
# self._input_shape = (feature_maps_shape, scalar_features_shape)
def get_halite_q_mlp(input_shape, n_outputs):
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.layers as layers
feature_maps_shape, scalar_features_shape = input_shape
# create inputs
feature_maps_input = layers.Input(shape=feature_maps_shape, name="feature_maps")
flatten_feature_maps = layers.Flatten()(feature_maps_input)
scalar_feature_input = layers.Input(shape=scalar_features_shape, name="scalar_features")
# concatenate inputs
x = layers.Concatenate(axis=-1)([flatten_feature_maps, scalar_feature_input])
# the stem
stem_kernel_initializer = tf.keras.initializers.variance_scaling(
scale=2.0, mode='fan_in', distribution='truncated_normal'
)
output_kernel_initializer = tf.keras.initializers.random_uniform(
minval=-0.03, maxval=0.03
)
output_bias_initializer = tf.keras.initializers.constant(-0.2)
x = keras.layers.Dense(512, activation="relu", kernel_initializer=stem_kernel_initializer)(x)
x = keras.layers.Dense(512, activation="relu", kernel_initializer=stem_kernel_initializer)(x)
x = keras.layers.Dense(512, activation="relu", kernel_initializer=stem_kernel_initializer)(x)
output = keras.layers.Dense(n_outputs, name="output",
kernel_initializer=output_kernel_initializer,
bias_initializer=output_bias_initializer)(x)
# the model
model = keras.Model(inputs=[feature_maps_input, scalar_feature_input],
outputs=[output])
return model
def get_halite_dueling_q_mlp(input_shape, n_outputs):
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.layers as layers
feature_maps_shape, scalar_features_shape = input_shape
# create inputs
feature_maps_input = layers.Input(shape=feature_maps_shape, name="feature_maps")
flatten_feature_maps = layers.Flatten()(feature_maps_input)
scalar_feature_input = layers.Input(shape=scalar_features_shape, name="scalar_features")
# concatenate inputs
x = layers.Concatenate(axis=-1)([flatten_feature_maps, scalar_feature_input])
# the stem
stem_kernel_initializer = tf.keras.initializers.variance_scaling(
scale=2.0, mode='fan_in', distribution='truncated_normal'
)
output_kernel_initializer = tf.keras.initializers.random_uniform(
minval=-0.03, maxval=0.03
)
output_bias_initializer = tf.keras.initializers.constant(-0.2)
x = keras.layers.Dense(512, activation="relu", kernel_initializer=stem_kernel_initializer)(x)
x = keras.layers.Dense(512, activation="relu", kernel_initializer=stem_kernel_initializer)(x)
x = keras.layers.Dense(512, activation="relu", kernel_initializer=stem_kernel_initializer)(x)
state_values = keras.layers.Dense(1,
kernel_initializer=output_kernel_initializer,
bias_initializer=output_bias_initializer)(x)
raw_advantages = keras.layers.Dense(n_outputs,
kernel_initializer=output_kernel_initializer,
bias_initializer=output_bias_initializer)(x)
advantages = raw_advantages - tf.reduce_max(raw_advantages, axis=1, keepdims=True)
Q_values = state_values + advantages
# the model
model = keras.Model(inputs=[feature_maps_input, scalar_feature_input],
outputs=[Q_values])
return model
def get_halite_sparse(weights_in, mask_in):
from tensorflow import keras
import tensorflow.keras.layers as layers
class HaliteSparseMLP(keras.Model):
def __init__(self, weights_in, mask_in):
super(HaliteSparseMLP, self).__init__()
self._model = get_sparse(weights_in, mask_in)
def call(self, inputs, **kwargs):
feature_maps, scalar_features = inputs['feature_maps'], inputs['scalar_features']
flatten_feature_maps = layers.Flatten()(feature_maps)
x = layers.Concatenate(axis=-1)([flatten_feature_maps, scalar_features])
Z = self._model(x)
return Z
model = HaliteSparseMLP(weights_in, mask_in)
return model
def process_experiences(experiences):
observations, actions, rewards, next_observations, dones = experiences
try:
observations = tf.nest.map_structure(
lambda *x: tf.convert_to_tensor(x, dtype=tf.float32), *observations)
next_observations = tf.nest.map_structure(
lambda *x: tf.convert_to_tensor(x, dtype=tf.float32), *next_observations)
except ValueError:
observations = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(x, dtype=tf.float32), observations)
next_observations = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(x, dtype=tf.float32), next_observations)
actions = tf.convert_to_tensor(actions, dtype=tf.int32)
rewards = tf.convert_to_tensor(rewards, dtype=tf.float32)
dones = tf.convert_to_tensor(dones, dtype=tf.float32)
return observations, actions, rewards, next_observations, dones
def check_halite_agent(model):
from kaggle_environments import make
board_size = 5
starting_halite = 5000
env = make('halite',
configuration={"size": board_size,
"startingHalite": starting_halite},
debug=True)
trainer = env.train([None])
obs = trainer.reset()
halite_agent = get_halite_agent(model)
return halite_agent(obs, env.configuration)
def halite_env_fake_step(self, step):
one = np.random.uniform(low=0., high=1., size=(5, 5, 3))
two = np.random.uniform(low=0., high=1., size=(11,))
done = 1. if step == 400 else 0.
return OrderedDict({'feature_maps': one, 'scalar_features': two}), 1., done, 'info'
def get_halite_agent(policy):
"""halite agent """
def halite_agent(obs, config):
from collections import OrderedDict
directions = [hh.ShipAction.NORTH,
hh.ShipAction.SOUTH,
hh.ShipAction.WEST,
hh.ShipAction.EAST]
board = hh.Board(obs, config)
me = board.current_player
scalar_features = get_scalar_features(board)
scalar_features = scalar_features[np.newaxis, ...]
scalar_features = tf.convert_to_tensor(scalar_features, dtype=tf.float32)
feature_maps = get_feature_maps(board)
feature_maps = feature_maps[np.newaxis, ...]
feature_maps = tf.convert_to_tensor(feature_maps, dtype=tf.float32)
obs = OrderedDict({'feature_maps': feature_maps, 'scalar_features': scalar_features})
Q_values = policy(obs)
action_number = np.argmax(Q_values.numpy()[0])
try:
me.ships[0].next_action = directions[action_number]
except IndexError:
pass
return me.next_actions
return halite_agent
| [
"tensorflow.keras.layers.Input",
"collections.OrderedDict",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.initializers.random_uniform",
"tensorflow.keras.initializers.constant",
"tensorflow.convert_to_tensor",
"tensorflow.keras.initializers.variance_scaling",
"tensorflow.reduce_max",
"gym... | [((1052, 1111), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'feature_maps_shape', 'name': '"""feature_maps"""'}), "(shape=feature_maps_shape, name='feature_maps')\n", (1064, 1111), True, 'import tensorflow.keras.layers as layers\n'), ((1203, 1268), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'scalar_features_shape', 'name': '"""scalar_features"""'}), "(shape=scalar_features_shape, name='scalar_features')\n", (1215, 1268), True, 'import tensorflow.keras.layers as layers\n'), ((1421, 1522), 'tensorflow.keras.initializers.variance_scaling', 'tf.keras.initializers.variance_scaling', ([], {'scale': '(2.0)', 'mode': '"""fan_in"""', 'distribution': '"""truncated_normal"""'}), "(scale=2.0, mode='fan_in',\n distribution='truncated_normal')\n", (1459, 1522), True, 'import tensorflow as tf\n'), ((1565, 1628), 'tensorflow.keras.initializers.random_uniform', 'tf.keras.initializers.random_uniform', ([], {'minval': '(-0.03)', 'maxval': '(0.03)'}), '(minval=-0.03, maxval=0.03)\n', (1601, 1628), True, 'import tensorflow as tf\n'), ((1673, 1709), 'tensorflow.keras.initializers.constant', 'tf.keras.initializers.constant', (['(-0.2)'], {}), '(-0.2)\n', (1703, 1709), True, 'import tensorflow as tf\n'), ((2245, 2330), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[feature_maps_input, scalar_feature_input]', 'outputs': '[output]'}), '(inputs=[feature_maps_input, scalar_feature_input], outputs=[output]\n )\n', (2256, 2330), False, 'from tensorflow import keras\n'), ((2635, 2694), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'feature_maps_shape', 'name': '"""feature_maps"""'}), "(shape=feature_maps_shape, name='feature_maps')\n", (2647, 2694), True, 'import tensorflow.keras.layers as layers\n'), ((2786, 2851), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'scalar_features_shape', 'name': '"""scalar_features"""'}), "(shape=scalar_features_shape, name='scalar_features')\n", (2798, 2851), True, 'import tensorflow.keras.layers as layers\n'), ((3004, 3105), 'tensorflow.keras.initializers.variance_scaling', 'tf.keras.initializers.variance_scaling', ([], {'scale': '(2.0)', 'mode': '"""fan_in"""', 'distribution': '"""truncated_normal"""'}), "(scale=2.0, mode='fan_in',\n distribution='truncated_normal')\n", (3042, 3105), True, 'import tensorflow as tf\n'), ((3148, 3211), 'tensorflow.keras.initializers.random_uniform', 'tf.keras.initializers.random_uniform', ([], {'minval': '(-0.03)', 'maxval': '(0.03)'}), '(minval=-0.03, maxval=0.03)\n', (3184, 3211), True, 'import tensorflow as tf\n'), ((3256, 3292), 'tensorflow.keras.initializers.constant', 'tf.keras.initializers.constant', (['(-0.2)'], {}), '(-0.2)\n', (3286, 3292), True, 'import tensorflow as tf\n'), ((4173, 4260), 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': '[feature_maps_input, scalar_feature_input]', 'outputs': '[Q_values]'}), '(inputs=[feature_maps_input, scalar_feature_input], outputs=[\n Q_values])\n', (4184, 4260), False, 'from tensorflow import keras\n'), ((5715, 5760), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['actions'], {'dtype': 'tf.int32'}), '(actions, dtype=tf.int32)\n', (5735, 5760), True, 'import tensorflow as tf\n'), ((5775, 5822), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['rewards'], {'dtype': 'tf.float32'}), '(rewards, dtype=tf.float32)\n', (5795, 5822), True, 'import tensorflow as tf\n'), ((5835, 5880), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['dones'], {'dtype': 'tf.float32'}), '(dones, dtype=tf.float32)\n', (5855, 5880), True, 'import tensorflow as tf\n'), ((6080, 6181), 'kaggle_environments.make', 'make', (['"""halite"""'], {'configuration': "{'size': board_size, 'startingHalite': starting_halite}", 'debug': '(True)'}), "('halite', configuration={'size': board_size, 'startingHalite':\n starting_halite}, debug=True)\n", (6084, 6181), False, 'from kaggle_environments import make\n'), ((6438, 6490), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)', 'size': '(5, 5, 3)'}), '(low=0.0, high=1.0, size=(5, 5, 3))\n', (6455, 6490), True, 'import numpy as np\n'), ((6499, 6547), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)', 'size': '(11,)'}), '(low=0.0, high=1.0, size=(11,))\n', (6516, 6547), True, 'import numpy as np\n'), ((1139, 1155), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (1153, 1155), True, 'import tensorflow.keras.layers as layers\n'), ((1302, 1329), 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (1320, 1329), True, 'import tensorflow.keras.layers as layers\n'), ((1718, 1809), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(512)'], {'activation': '"""relu"""', 'kernel_initializer': 'stem_kernel_initializer'}), "(512, activation='relu', kernel_initializer=\n stem_kernel_initializer)\n", (1736, 1809), False, 'from tensorflow import keras\n'), ((1816, 1907), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(512)'], {'activation': '"""relu"""', 'kernel_initializer': 'stem_kernel_initializer'}), "(512, activation='relu', kernel_initializer=\n stem_kernel_initializer)\n", (1834, 1907), False, 'from tensorflow import keras\n'), ((1914, 2005), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(512)'], {'activation': '"""relu"""', 'kernel_initializer': 'stem_kernel_initializer'}), "(512, activation='relu', kernel_initializer=\n stem_kernel_initializer)\n", (1932, 2005), False, 'from tensorflow import keras\n'), ((2017, 2154), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['n_outputs'], {'name': '"""output"""', 'kernel_initializer': 'output_kernel_initializer', 'bias_initializer': 'output_bias_initializer'}), "(n_outputs, name='output', kernel_initializer=\n output_kernel_initializer, bias_initializer=output_bias_initializer)\n", (2035, 2154), False, 'from tensorflow import keras\n'), ((2722, 2738), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (2736, 2738), True, 'import tensorflow.keras.layers as layers\n'), ((2885, 2912), 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (2903, 2912), True, 'import tensorflow.keras.layers as layers\n'), ((3301, 3392), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(512)'], {'activation': '"""relu"""', 'kernel_initializer': 'stem_kernel_initializer'}), "(512, activation='relu', kernel_initializer=\n stem_kernel_initializer)\n", (3319, 3392), False, 'from tensorflow import keras\n'), ((3399, 3490), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(512)'], {'activation': '"""relu"""', 'kernel_initializer': 'stem_kernel_initializer'}), "(512, activation='relu', kernel_initializer=\n stem_kernel_initializer)\n", (3417, 3490), False, 'from tensorflow import keras\n'), ((3497, 3588), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(512)'], {'activation': '"""relu"""', 'kernel_initializer': 'stem_kernel_initializer'}), "(512, activation='relu', kernel_initializer=\n stem_kernel_initializer)\n", (3515, 3588), False, 'from tensorflow import keras\n'), ((3606, 3719), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {'kernel_initializer': 'output_kernel_initializer', 'bias_initializer': 'output_bias_initializer'}), '(1, kernel_initializer=output_kernel_initializer,\n bias_initializer=output_bias_initializer)\n', (3624, 3719), False, 'from tensorflow import keras\n'), ((3816, 3937), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['n_outputs'], {'kernel_initializer': 'output_kernel_initializer', 'bias_initializer': 'output_bias_initializer'}), '(n_outputs, kernel_initializer=output_kernel_initializer,\n bias_initializer=output_bias_initializer)\n', (3834, 3937), False, 'from tensorflow import keras\n'), ((4051, 4103), 'tensorflow.reduce_max', 'tf.reduce_max', (['raw_advantages'], {'axis': '(1)', 'keepdims': '(True)'}), '(raw_advantages, axis=1, keepdims=True)\n', (4064, 4103), True, 'import tensorflow as tf\n'), ((6594, 6652), 'collections.OrderedDict', 'OrderedDict', (["{'feature_maps': one, 'scalar_features': two}"], {}), "({'feature_maps': one, 'scalar_features': two})\n", (6605, 6652), False, 'from collections import OrderedDict\n'), ((6994, 7015), 'kaggle_environments.envs.halite.helpers.Board', 'hh.Board', (['obs', 'config'], {}), '(obs, config)\n', (7002, 7015), True, 'import kaggle_environments.envs.halite.helpers as hh\n'), ((7077, 7103), 'gym_halite.envs.halite_env.get_scalar_features', 'get_scalar_features', (['board'], {}), '(board)\n', (7096, 7103), False, 'from gym_halite.envs.halite_env import get_scalar_features, get_feature_maps\n'), ((7189, 7244), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['scalar_features'], {'dtype': 'tf.float32'}), '(scalar_features, dtype=tf.float32)\n', (7209, 7244), True, 'import tensorflow as tf\n'), ((7268, 7291), 'gym_halite.envs.halite_env.get_feature_maps', 'get_feature_maps', (['board'], {}), '(board)\n', (7284, 7291), False, 'from gym_halite.envs.halite_env import get_scalar_features, get_feature_maps\n'), ((7368, 7420), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['feature_maps'], {'dtype': 'tf.float32'}), '(feature_maps, dtype=tf.float32)\n', (7388, 7420), True, 'import tensorflow as tf\n'), ((7435, 7514), 'collections.OrderedDict', 'OrderedDict', (["{'feature_maps': feature_maps, 'scalar_features': scalar_features}"], {}), "({'feature_maps': feature_maps, 'scalar_features': scalar_features})\n", (7446, 7514), False, 'from collections import OrderedDict\n'), ((4793, 4809), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (4807, 4809), True, 'import tensorflow.keras.layers as layers\n'), ((4840, 4867), 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (4858, 4867), True, 'import tensorflow.keras.layers as layers\n'), ((5222, 5263), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {'dtype': 'tf.float32'}), '(x, dtype=tf.float32)\n', (5242, 5263), True, 'import tensorflow as tf\n'), ((5354, 5395), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {'dtype': 'tf.float32'}), '(x, dtype=tf.float32)\n', (5374, 5395), True, 'import tensorflow as tf\n'), ((5508, 5549), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {'dtype': 'tf.float32'}), '(x, dtype=tf.float32)\n', (5528, 5549), True, 'import tensorflow as tf\n'), ((5638, 5679), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {'dtype': 'tf.float32'}), '(x, dtype=tf.float32)\n', (5658, 5679), True, 'import tensorflow as tf\n')] |
import numpy as np
from africanus.util.numba import jit
@jit(nogil=True, nopython=True, cache=True)
def fac(x):
if x < 0:
raise ValueError("Factorial input is negative.")
if x == 0:
return 1
factorial = 1
for i in range(1, x + 1):
factorial *= i
return factorial
@jit(nogil=True, nopython=True, cache=True)
def pre_fac(k, n, m):
numerator = (-1.0) ** k * fac(n - k)
denominator = fac(k) * fac((n + m) / 2.0 - k) * fac((n - m) / 2.0 - k)
return numerator / denominator
@jit(nogil=True, nopython=True, cache=True)
def zernike_rad(m, n, rho):
if n < 0 or m < 0 or abs(m) > n:
raise ValueError("m and n values are incorrect.")
radial_component = 0
for k in range((n - m) / 2 + 1):
radial_component += pre_fac(k, n, m) * rho ** (n - 2.0 * k)
return radial_component
@jit(nogil=True, nopython=True, cache=True)
def zernike(j, rho, phi):
if rho > 1:
return 0.0
j += 1
n = 0
j1 = j - 1
while j1 > n:
n += 1
j1 -= n
m = (-1) ** j * ((n % 2) + 2 * int((j1 + ((n + 1) % 2)) / 2.0))
if m > 0:
return zernike_rad(m, n, rho) * np.cos(m * phi)
if m < 0:
return zernike_rad(-m, n, rho) * np.sin(-m * phi)
return zernike_rad(0, n, rho)
@jit(nogil=True, nopython=True, cache=True)
def _convert_coords(l_coords, m_coords):
rho, phi = ((l_coords ** 2 + m_coords ** 2) ** 0.5), np.arctan2(
l_coords, m_coords
)
return rho, phi
@jit(nogil=True, nopython=True, cache=True)
def nb_zernike_dde(
coords,
coeffs,
noll_index,
out,
parallactic_angles,
frequency_scaling,
antenna_scaling,
pointing_errors,
):
sources, times, ants, chans, corrs = out.shape
npoly = coeffs.shape[-1]
for s in range(sources):
for t in range(times):
for a in range(ants):
sin_pa = np.sin(parallactic_angles[t, a])
cos_pa = np.cos(parallactic_angles[t, a])
for c in range(chans):
l, m, freq = coords[:, s, t, a, c]
l_coords = l * frequency_scaling[c]
m_coords = m * frequency_scaling[c]
l_coords += pointing_errors[t, a, c, 0]
m_coords += pointing_errors[t, a, c, 1]
vl = l_coords * cos_pa - l_coords * sin_pa
vm = m_coords * sin_pa + m * cos_pa
vl *= antenna_scaling[a, c, 0]
vm *= antenna_scaling[a, c, 1]
rho, phi = _convert_coords(vl, vm)
for co in range(corrs):
zernike_sum = 0
for p in range(npoly):
zc = coeffs[a, c, co, p]
zn = noll_index[a, c, co, p]
zernike_sum += zc * zernike(zn, rho, phi)
out[s, t, a, c, co] = zernike_sum
return out
def zernike_dde(
coords,
coeffs,
noll_index,
parallactic_angles,
frequency_scaling,
antenna_scaling,
pointing_errors,
):
""" Wrapper for :func:`nb_zernike_dde` """
_, sources, times, ants, chans = coords.shape
# ant, chan, corr_1, ..., corr_n, poly
corr_shape = coeffs.shape[2:-1]
npoly = coeffs.shape[-1]
# Flatten correlation dimensions for numba function
fcorrs = np.product(corr_shape)
ddes = np.empty((sources, times, ants, chans, fcorrs), coeffs.dtype)
coeffs = coeffs.reshape((ants, chans, fcorrs, npoly))
noll_index = noll_index.reshape((ants, chans, fcorrs, npoly))
result = nb_zernike_dde(
coords,
coeffs,
noll_index,
ddes,
parallactic_angles,
frequency_scaling,
antenna_scaling,
pointing_errors,
)
# Reshape to full correlation size
return result.reshape((sources, times, ants, chans) + corr_shape)
_ZERNICKE_DOCSTRING = """
Computes Direction Dependent Effects by evaluating
`Zernicke Polynomials <zernike_wiki_>`_
defined by coefficients ``coeffs``
and noll indexes ``noll_index``
at the specified coordinates ``coords``.
Decomposition of a voxel beam cube into Zernicke
polynomial coefficients can be achieved through the
use of the eidos_ package.
.. _zernike_wiki: https://en.wikipedia.org/wiki/Zernike_polynomials
.. _eidos: https://github.com/kmbasad/eidos/
Parameters
----------
coords : :class:`numpy.ndarray`
Float coordinates at which to evaluate the zernike polynomials.
Has shape :code:`(3, source, time, ant, chan)`. The three components in
the first dimension represent
l, m and frequency coordinates, respectively.
coeffs : :class:`numpy.ndarray`
complex Zernicke polynomial coefficients.
Has shape :code:`(ant, chan, corr_1, ..., corr_n, poly)`
where ``poly`` is the number of polynomial coefficients
and ``corr_1, ..., corr_n`` are a variable number of
correlation dimensions.
noll_index : :class:`numpy.ndarray`
Noll index associated with each polynomial coefficient.
Has shape :code:`(ant, chan, corr_1, ..., corr_n, poly)`.
correlation dimensions.
parallactic_angles : :class:`numpy.ndarray`
Parallactic angle rotation.
Has shape :code:`(time, ant)`.
frequency_scaling : :class:`numpy.ndarray`
The scaling of frequency of the beam.
Has shape :code:`(chan,)`.
antenna_scaling : :class:`numpy.ndarray`
The antenna scaling.
Has shape :code:`(ant, chan, 2)`.
pointing_errors : :class:`numpy.ndarray`
The pointing error.
Has shape :code:`(time, ant, chan, 2)`.
Returns
-------
dde : :class:`numpy.ndarray`
complex values with shape
:code:`(source, time, ant, chan, corr_1, ..., corr_n)`
"""
zernike_dde.__doc__ = _ZERNICKE_DOCSTRING
| [
"numpy.product",
"numpy.empty",
"numpy.cos",
"numpy.arctan2",
"africanus.util.numba.jit",
"numpy.sin"
] | [((61, 103), 'africanus.util.numba.jit', 'jit', ([], {'nogil': '(True)', 'nopython': '(True)', 'cache': '(True)'}), '(nogil=True, nopython=True, cache=True)\n', (64, 103), False, 'from africanus.util.numba import jit\n'), ((314, 356), 'africanus.util.numba.jit', 'jit', ([], {'nogil': '(True)', 'nopython': '(True)', 'cache': '(True)'}), '(nogil=True, nopython=True, cache=True)\n', (317, 356), False, 'from africanus.util.numba import jit\n'), ((533, 575), 'africanus.util.numba.jit', 'jit', ([], {'nogil': '(True)', 'nopython': '(True)', 'cache': '(True)'}), '(nogil=True, nopython=True, cache=True)\n', (536, 575), False, 'from africanus.util.numba import jit\n'), ((860, 902), 'africanus.util.numba.jit', 'jit', ([], {'nogil': '(True)', 'nopython': '(True)', 'cache': '(True)'}), '(nogil=True, nopython=True, cache=True)\n', (863, 902), False, 'from africanus.util.numba import jit\n'), ((1296, 1338), 'africanus.util.numba.jit', 'jit', ([], {'nogil': '(True)', 'nopython': '(True)', 'cache': '(True)'}), '(nogil=True, nopython=True, cache=True)\n', (1299, 1338), False, 'from africanus.util.numba import jit\n'), ((1505, 1547), 'africanus.util.numba.jit', 'jit', ([], {'nogil': '(True)', 'nopython': '(True)', 'cache': '(True)'}), '(nogil=True, nopython=True, cache=True)\n', (1508, 1547), False, 'from africanus.util.numba import jit\n'), ((3422, 3444), 'numpy.product', 'np.product', (['corr_shape'], {}), '(corr_shape)\n', (3432, 3444), True, 'import numpy as np\n'), ((3456, 3517), 'numpy.empty', 'np.empty', (['(sources, times, ants, chans, fcorrs)', 'coeffs.dtype'], {}), '((sources, times, ants, chans, fcorrs), coeffs.dtype)\n', (3464, 3517), True, 'import numpy as np\n'), ((1437, 1467), 'numpy.arctan2', 'np.arctan2', (['l_coords', 'm_coords'], {}), '(l_coords, m_coords)\n', (1447, 1467), True, 'import numpy as np\n'), ((1171, 1186), 'numpy.cos', 'np.cos', (['(m * phi)'], {}), '(m * phi)\n', (1177, 1186), True, 'import numpy as np\n'), ((1242, 1258), 'numpy.sin', 'np.sin', (['(-m * phi)'], {}), '(-m * phi)\n', (1248, 1258), True, 'import numpy as np\n'), ((1909, 1941), 'numpy.sin', 'np.sin', (['parallactic_angles[t, a]'], {}), '(parallactic_angles[t, a])\n', (1915, 1941), True, 'import numpy as np\n'), ((1967, 1999), 'numpy.cos', 'np.cos', (['parallactic_angles[t, a]'], {}), '(parallactic_angles[t, a])\n', (1973, 1999), True, 'import numpy as np\n')] |
"""OTE MVTec Dataset facilitate OTE Anomaly Training.
License:
MVTec AD dataset is released under the Creative Commons
Attribution-NonCommercial-ShareAlike 4.0 International License
(CC BY-NC-SA 4.0)(https://creativecommons.org/licenses/by-nc-sa/4.0/).
Reference:
- <NAME>, <NAME>, <NAME>, <NAME>, <NAME>:
The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for
Unsupervised Anomaly Detection; in: International Journal of Computer Vision
129(4):1038-1059, 2021, DOI: 10.1007/s11263-020-01400-4.
- <NAME>, <NAME>, <NAME>, <NAME>: MVTec AD —
A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection;
in: IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR),
9584-9592, 2019, DOI: 10.1109/CVPR.2019.00982.
"""
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
from pathlib import Path
from typing import List, Union
import cv2
import numpy as np
from anomalib.data.mvtec import make_mvtec_dataset
from ote_anomalib.data import LabelNames
from ote_sdk.entities.annotation import (
Annotation,
AnnotationSceneEntity,
AnnotationSceneKind,
)
from ote_sdk.entities.color import Color
from ote_sdk.entities.dataset_item import DatasetItemEntity
from ote_sdk.entities.datasets import DatasetEntity
from ote_sdk.entities.id import ID
from ote_sdk.entities.image import Image
from ote_sdk.entities.label import Domain, LabelEntity
from ote_sdk.entities.model_template import TaskType
from ote_sdk.entities.scored_label import ScoredLabel
from ote_sdk.entities.shapes.rectangle import Rectangle
from ote_sdk.entities.subset import Subset
from ote_sdk.utils.segmentation_utils import create_annotation_from_segmentation_map
from pandas.core.frame import DataFrame
class OteMvtecDataset:
"""Generate OTE MVTec Dataset from the anomaly detection datasets that follows the MVTec format.
Args:
path (Union[str, Path], optional): Path to the MVTec dataset category.
Defaults to "./datasets/MVTec/bottle".
split_ratio (float, optional): Ratio to split normal training images and add to the
test set in case test set doesn't contain any normal images.
Defaults to 0.5.
seed (int, optional): Random seed to ensure reproducibility when splitting. Defaults to 0.
create_validation_set (bool, optional): Create validation set from the test set by splitting
it to half. Default to True.
Examples:
>>> dataset_generator = OteMvtecDataset()
>>> dataset = dataset_generator.generate()
>>> dataset[0].media.numpy.shape
(900, 900, 3)
"""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
path: Union[str, Path],
split_ratio: float = 0.5,
seed: int = 0,
create_validation_set: bool = True,
task_type: TaskType = TaskType.ANOMALY_CLASSIFICATION,
):
self.path = path if isinstance(path, Path) else Path(path)
self.split_ratio = split_ratio
self.seed = seed
self.create_validation_set = create_validation_set
self.task_type = task_type
if self.task_type == TaskType.ANOMALY_CLASSIFICATION:
self.label_domain = Domain.ANOMALY_SEGMENTATION
elif self.task_type == TaskType.ANOMALY_SEGMENTATION:
self.label_domain = Domain.ANOMALY_SEGMENTATION
self.normal_label = LabelEntity(
name=LabelNames.normal, domain=self.label_domain, id=ID(LabelNames.normal), color=Color(0, 255, 0)
)
self.abnormal_label = LabelEntity(
name=LabelNames.anomalous,
domain=self.label_domain,
id=ID(LabelNames.anomalous),
is_anomalous=True,
color=Color(255, 0, 0),
)
self.label_map = {0: self.normal_label, 1: self.abnormal_label}
def get_samples(self) -> DataFrame:
"""Get MVTec samples.
Get MVTec samples in a pandas DataFrame. Update the certain columns
to match the OTE naming terminology. For example, column `split` is
renamed to `subset`. Labels are also renamed by creating their
corresponding OTE LabelEntities
Returns:
DataFrame: Final list of samples comprising all the required
information to create the OTE Dataset.
"""
samples = make_mvtec_dataset(
path=self.path,
split_ratio=self.split_ratio,
seed=self.seed,
create_validation_set=self.create_validation_set,
)
# Set the OTE SDK Splits
samples = samples.rename(columns={"split": "subset"})
samples.loc[samples.subset == "train", "subset"] = Subset.TRAINING
samples.loc[samples.subset == "val", "subset"] = Subset.VALIDATION
samples.loc[samples.subset == "test", "subset"] = Subset.TESTING
# Create and Set the OTE Labels
samples.loc[samples.label != "good", "label"] = self.abnormal_label
samples.loc[samples.label == "good", "label"] = self.normal_label
samples = samples.reset_index(drop=True)
return samples
def generate(self) -> DatasetEntity:
"""Generate OTE Anomaly Dataset.
Returns:
DatasetEntity: Output OTE Anomaly Dataset from an MVTec
"""
samples = self.get_samples()
dataset_items: List[DatasetItemEntity] = []
for _, sample in samples.iterrows():
# Create image
image = Image(file_path=sample.image_path)
# Create annotation
if self.task_type == TaskType.ANOMALY_CLASSIFICATION or sample.label == self.normal_label:
shape = Rectangle(x1=0, y1=0, x2=1, y2=1)
labels = [ScoredLabel(sample.label)]
annotations = [Annotation(shape=shape, labels=labels)]
annotation_scene = AnnotationSceneEntity(annotations=annotations, kind=AnnotationSceneKind.ANNOTATION)
elif self.task_type == TaskType.ANOMALY_SEGMENTATION and sample.label == self.abnormal_label:
mask = (cv2.imread(sample.mask_path, cv2.IMREAD_GRAYSCALE) / 255).astype(np.uint8)
annotations = create_annotation_from_segmentation_map(mask, np.ones_like(mask), self.label_map)
annotation_scene = AnnotationSceneEntity(annotations=annotations, kind=AnnotationSceneKind.ANNOTATION)
else:
raise ValueError(f"Unknown task type: {self.task_type}")
# Create dataset item
dataset_item = DatasetItemEntity(media=image, annotation_scene=annotation_scene, subset=sample.subset)
# Add to dataset items
dataset_items.append(dataset_item)
dataset = DatasetEntity(items=dataset_items)
return dataset
| [
"numpy.ones_like",
"ote_sdk.entities.datasets.DatasetEntity",
"ote_sdk.entities.image.Image",
"pathlib.Path",
"ote_sdk.entities.annotation.AnnotationSceneEntity",
"ote_sdk.entities.id.ID",
"ote_sdk.entities.color.Color",
"ote_sdk.entities.annotation.Annotation",
"ote_sdk.entities.shapes.rectangle.Re... | [((4932, 5067), 'anomalib.data.mvtec.make_mvtec_dataset', 'make_mvtec_dataset', ([], {'path': 'self.path', 'split_ratio': 'self.split_ratio', 'seed': 'self.seed', 'create_validation_set': 'self.create_validation_set'}), '(path=self.path, split_ratio=self.split_ratio, seed=self.\n seed, create_validation_set=self.create_validation_set)\n', (4950, 5067), False, 'from anomalib.data.mvtec import make_mvtec_dataset\n'), ((7318, 7352), 'ote_sdk.entities.datasets.DatasetEntity', 'DatasetEntity', ([], {'items': 'dataset_items'}), '(items=dataset_items)\n', (7331, 7352), False, 'from ote_sdk.entities.datasets import DatasetEntity\n'), ((3536, 3546), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (3540, 3546), False, 'from pathlib import Path\n'), ((6067, 6101), 'ote_sdk.entities.image.Image', 'Image', ([], {'file_path': 'sample.image_path'}), '(file_path=sample.image_path)\n', (6072, 6101), False, 'from ote_sdk.entities.image import Image\n'), ((7128, 7220), 'ote_sdk.entities.dataset_item.DatasetItemEntity', 'DatasetItemEntity', ([], {'media': 'image', 'annotation_scene': 'annotation_scene', 'subset': 'sample.subset'}), '(media=image, annotation_scene=annotation_scene, subset=\n sample.subset)\n', (7145, 7220), False, 'from ote_sdk.entities.dataset_item import DatasetItemEntity\n'), ((4057, 4078), 'ote_sdk.entities.id.ID', 'ID', (['LabelNames.normal'], {}), '(LabelNames.normal)\n', (4059, 4078), False, 'from ote_sdk.entities.id import ID\n'), ((4086, 4102), 'ote_sdk.entities.color.Color', 'Color', (['(0)', '(255)', '(0)'], {}), '(0, 255, 0)\n', (4091, 4102), False, 'from ote_sdk.entities.color import Color\n'), ((4248, 4272), 'ote_sdk.entities.id.ID', 'ID', (['LabelNames.anomalous'], {}), '(LabelNames.anomalous)\n', (4250, 4272), False, 'from ote_sdk.entities.id import ID\n'), ((4323, 4339), 'ote_sdk.entities.color.Color', 'Color', (['(255)', '(0)', '(0)'], {}), '(255, 0, 0)\n', (4328, 4339), False, 'from ote_sdk.entities.color import Color\n'), ((6262, 6295), 'ote_sdk.entities.shapes.rectangle.Rectangle', 'Rectangle', ([], {'x1': '(0)', 'y1': '(0)', 'x2': '(1)', 'y2': '(1)'}), '(x1=0, y1=0, x2=1, y2=1)\n', (6271, 6295), False, 'from ote_sdk.entities.shapes.rectangle import Rectangle\n'), ((6455, 6543), 'ote_sdk.entities.annotation.AnnotationSceneEntity', 'AnnotationSceneEntity', ([], {'annotations': 'annotations', 'kind': 'AnnotationSceneKind.ANNOTATION'}), '(annotations=annotations, kind=AnnotationSceneKind.\n ANNOTATION)\n', (6476, 6543), False, 'from ote_sdk.entities.annotation import Annotation, AnnotationSceneEntity, AnnotationSceneKind\n'), ((6322, 6347), 'ote_sdk.entities.scored_label.ScoredLabel', 'ScoredLabel', (['sample.label'], {}), '(sample.label)\n', (6333, 6347), False, 'from ote_sdk.entities.scored_label import ScoredLabel\n'), ((6380, 6418), 'ote_sdk.entities.annotation.Annotation', 'Annotation', ([], {'shape': 'shape', 'labels': 'labels'}), '(shape=shape, labels=labels)\n', (6390, 6418), False, 'from ote_sdk.entities.annotation import Annotation, AnnotationSceneEntity, AnnotationSceneKind\n'), ((6891, 6979), 'ote_sdk.entities.annotation.AnnotationSceneEntity', 'AnnotationSceneEntity', ([], {'annotations': 'annotations', 'kind': 'AnnotationSceneKind.ANNOTATION'}), '(annotations=annotations, kind=AnnotationSceneKind.\n ANNOTATION)\n', (6912, 6979), False, 'from ote_sdk.entities.annotation import Annotation, AnnotationSceneEntity, AnnotationSceneKind\n'), ((6820, 6838), 'numpy.ones_like', 'np.ones_like', (['mask'], {}), '(mask)\n', (6832, 6838), True, 'import numpy as np\n'), ((6669, 6719), 'cv2.imread', 'cv2.imread', (['sample.mask_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(sample.mask_path, cv2.IMREAD_GRAYSCALE)\n', (6679, 6719), False, 'import cv2\n')] |
'''
This file was cloned from https://github.com/nogueirs/JMLR2018/blob/master/python/stability/__init__.py
The method was propsoed in Nogueira et al. 2017.
Docstring and signatures format were revised to google style.
Reference:
----------
<NAME>., <NAME>., & <NAME>. (2017). On the stability of feature selection algorithms.
J. Mach. Learn. Res., 18(1), 6345-6398.
'''
import numpy as np
from scipy.stats import norm
from typing import Optional, Any, Iterable
import math
__all__ = ['getStability', 'getVarianceofStability', 'confidenceIntervals', 'hypothesisTestT', 'hypothesisTestV',
'feat_list_to_binary_mat', 'jaccard_mean']
def getStability(Z: np.ndarray) -> float:
r'''
Let us assume we have M>1 feature sets and d>0 features in total.
This function computes the stability estimate as given in Definition 4 in [1].
Args:
Z (np.ndarray):
A BINARY matrix Z (given as a list or as a numpy.ndarray of size M*d).
Each row of the binary matrix represents a feature set, where a 1 at the f^th position
means the f^th feature has been selected and a 0 means it has not been selected.
Returns:
(float) - The stability of the feature selection procedure
''' # noqa
Z=checkInputType(Z)
M,d=Z.shape
hatPF=np.mean(Z,axis=0)
kbar=np.sum(hatPF)
denom=(kbar/d)*(1-kbar/d)
return 1-(M/(M-1))*np.mean(np.multiply(hatPF,1-hatPF))/denom
def getVarianceofStability(Z: np.ndarray) -> float:
'''
Let us assume we have M>1 feature sets and d>0 features in total.
This function computes the stability estimate and its variance as given in [1].
Args:
Z:
A BINARY matrix Z (given as a list or as a numpy.ndarray of size M*d, raises a ValueError exception otherwise).
Each row of the binary matrix represents a feature set, where a 1 at the f^th position
means the f^th feature has been selected and a 0 means it has not been selected.
Returns:
(dict) - A dictionnary where the key 'stability' provides the corresponding stability value #
and where the key 'variance' provides the variance of the stability estimate
''' # noqa
Z=checkInputType(Z) # check the input Z is of the right type
M,d=Z.shape # M is the number of feature sets and d the total number of features
hatPF=np.mean(Z,axis=0) # hatPF is a numpy.array with the frequency of selection of each feature
kbar=np.sum(hatPF) # kbar is the average number of selected features over the M feature sets
k=np.sum(Z,axis=1) # k is a numpy.array with the number of features selected on each one of the M feature sets
denom=(kbar/d)*(1-kbar/d)
stab=1-(M/(M-1))*np.mean(np.multiply(hatPF,1-hatPF))/denom # the stability estimate
phi=np.zeros(M)
for i in range(M):
phi[i]=(1/denom)*(np.mean(np.multiply(Z[i,],hatPF))-(k[i]*kbar)/d**2+(stab/2)*((2*k[i]*kbar)/d**2-k[i]/d-kbar/d+1))
phiAv=np.mean(phi)
variance=(4/M**2)*np.sum(np.power(phi-phiAv,2)) # the variance of the stability estimate as given in [1]
return {'stability':stab,'variance':variance}
def confidenceIntervals(Z: np.ndarray,
alpha: Optional[float] = 0.05,
res: Optional[dict] = {}) -> dict:
r'''
Let us assume we have M>1 feature sets and d>0 features in total.
This function provides the stability estimate and the lower and upper bounds of the (1-alpha)- approximate confidence
interval as given by Corollary 9 in [1]
Args:
Z (np.ndarray):
A BINARY matrix Z (given as a list or as a numpy.ndarray of size M*d, raises a ValueError exception otherwise).
Each row of the binary matrix represents a feature set, where a 1 at the f^th position
means the f^th feature has been selected and a 0 means it has not been selected.
alpha (float, Optional):
`alpha` is an optional argument corresponding to the level of significance for the confidence interval
(default is 0.05), e.g. alpha=0.05 give the lower and upper bound of for a (1-alpha)=95% confidence interval.
res (dict, Optional):
In case you already computed the stability estimate of Z using the function getVarianceofStability(Z),
you can provide theresult (a dictionnary) as an optional argument to this function for faster computation.
Returns:
(dict) - A dictionnary where the key 'stability' provides the corresponding stability value, where the key
'variance' provides the variance of the stability estimate the keys 'lower' and 'upper' respectively
give the lower and upper bounds of the (1-alpha)-confidence interval.
''' # noqa
Z=checkInputType(Z) # check the input Z is of the right type
## we check if values of alpha between ) and 1
if alpha>=1 or alpha<=0:
raise ValueError('The level of significance alpha should be a value >0 and <1')
if len(res)==0:
res=getVarianceofStability(Z) # get a dictionnary with the stability estimate and its variance
lower=res['stability']-norm.ppf(1-alpha/2)*math.sqrt(res['variance']) # lower bound of the confidence interval at a level alpha
upper=res['stability']+norm.ppf(1-alpha/2)*math.sqrt(res['variance']) # upper bound of the confidence interval
return {'stability':res['stability'],'lower':lower,'upper':upper}
## this tests whether the true stability is equal to a given value stab0
def hypothesisTestV(Z: np.ndarray,
stab0: float,
alpha: Optional[float] = 0.05) -> dict:
r'''
Let us assume we have M>1 feature sets and d>0 features in total.
This function implements the null hypothesis test in [1] that test whether the population stability is greater
than a given value stab0.
Args:
Z (np.ndarray):
A BINARY matrix Z (given as a list or as a numpy.ndarray of size M*d, raises a ValueError exception
otherwise). Each row of the binary matrix represents a feature set, where a 1 at the f^th position
means the f^th feature has been selected and a 0 means it has not been selected.
stab0 (float):
`stab0` is the value we want to compare the stability of the feature selection to.
alpha (float):
`alpha` is an optional argument corresponding to the level of significance of the null hypothesis test
(default is 0.05).
Returns:
(dict)
A dictionnary with:
* a boolean value for key 'reject' equal to True if the null hypothesis is rejected and to False otherwise
* a float for the key 'V' giving the value of the test statistic
* a float giving for the key 'p-value' giving the p-value of the hypothesis test
''' # noqa
Z=checkInputType(Z) # check the input Z is of the right type
res=getVarianceofStability(Z)
V=(res['stability']-stab0)/math.sqrt(res['variance'])
zCrit=norm.ppf(1-alpha)
if V>=zCrit: reject=True
else: reject=False
pValue=1-norm.cdf(V)
return {'reject':reject,'V':V,'p-value':pValue}
# this tests the equality of the stability of two algorithms
def hypothesisTestT(Z1: np.ndarray,
Z2: np.ndarray,
alpha: Optional[float] = 0.05) -> dict:
'''
Let us assume we have M>1 feature sets and d>0 features in total.
This function implements the null hypothesis test of Theorem 10 in [1] that test whether
two population stabilities are identical.
Args:
Z1 & Z2 (np.ndarray)
Two BINARY matrices Z1 and Z2 (given as lists or as numpy.ndarray objects of size M*d).
Each row of the binary matrix represents a feature set, where a 1 at the f^th position
means the f^th feature has been selected and a 0 means it has not been selected.
alpha (float, Optional)
alpha is an optional argument corresponding to the level of significance of the null
hypothesis test (default is 0.05)
Returns:
(dict)
A dictionnary with:
* a boolean value for key 'reject' equal to True if the null hypothesis is rejected and to False otherwise
* a float for the key 'T' giving the value of the test statistic
* a float giving for the key 'p-value' giving the p-value of the hypothesis test
''' # noqa
Z1=checkInputType(Z1) # check the input Z1 is of the right type
Z2=checkInputType(Z2) # check the input Z2 is of the right type
res1=getVarianceofStability(Z1)
res2=getVarianceofStability(Z2)
stab1=res1['stability']
stab2=res2['stability']
var1=res1['variance']
var2=res2['variance']
T=(stab2-stab1)/math.sqrt(var1+var2)
zCrit=norm.ppf(1-alpha/2)
## the cumulative inverse of the gaussian at 1-alpha/2
if(abs(T)>=zCrit):
reject=True
#print('Reject H0: the two algorithms have different population stabilities')
else:
reject=False
#print('Do not reject H0')
pValue=2*(1-norm.cdf(abs(T)))
return {'reject':reject,'T':T,'p-value':pValue}
def checkInputType(Z):
''' This function checks that Z is of the rigt type and dimension.
It raises an exception if not.
OUTPUT: The input Z as a numpy.ndarray
'''
### We check that Z is a list or a numpy.array
if isinstance(Z,list):
Z=np.asarray(Z)
elif not isinstance(Z,np.ndarray):
raise ValueError('The input matrix Z should be of type list or numpy.ndarray')
### We check if Z is a matrix (2 dimensions)
if Z.ndim!=2:
raise ValueError('The input matrix Z should be of dimension 2')
return Z
#=== My Code ===
import pandas as pd
import itertools
import multiprocessing as mpi
from sklearn.metrics import jaccard_score
def feat_list_to_binary_mat(selected_feat_list: pd.DataFrame,
full_feat_list: Iterable[Any]) -> np.ndarray:
r"""
Args:
selected_feat_list (pd.DataFrame):
Collection of series, each series is the features selected in one trial. Series can have different length,
empty string or NA will be ignored. The element should all be a strings.
full_feat_list (Iterable[Any]):
The full list of all features.
Returns:
""" # noqa
full_feat_list = list(full_feat_list)
# Check if all features in the feature_list is in the full_feat_list
selected_feat_list = selected_feat_list.fillna('')
sf_union = set.union(*[set(selected_feat_list[s]) for s in selected_feat_list])
sf_union -= set(['', 'nan']) # remove na and empty string
fl = set(full_feat_list)
if len(sf_union - fl) > 0:
missing = sf_union - set(full_feat_list)
msg = f"The following features were not in the full_feat_list: {','.join(missing)}"
raise IndexError(msg)
d = len(full_feat_list)
m = len(selected_feat_list.columns)
Z = np.zeros([m, d], dtype=bool)
for i, col in enumerate(selected_feat_list):
# Get the index of the features in the selected list
index = [full_feat_list.index(s) for s in selected_feat_list[col][selected_feat_list[col] != '']]
for j in index:
Z[i, j] = True
return Z
def jaccard_mean(Z: np.ndarray):
r"""
Args:
Z (np.ndarray):
Each row of the binary matrix represents a feature set, where a 1 at the f^th position
means the f^th feature has been selected and a 0 means it has not been selected.
Returns:
(float) - Mean of the pair-wise jaccard index
""" # noqa
jac_job = [(Z[i], Z[j]) for i, j in itertools.product(range(len(Z)), range(len(Z))) if i != j]
pool = mpi.Pool()
res = pool.starmap_async(jaccard_score, jac_job)
pool.close()
pool.join()
jac = res.get()
jac_bar = np.mean(jac)
jac_bar_sd = np.std(jac)
return jac, jac_bar, jac_bar_sd | [
"numpy.mean",
"numpy.multiply",
"numpy.power",
"scipy.stats.norm.ppf",
"math.sqrt",
"numpy.asarray",
"numpy.sum",
"numpy.zeros",
"multiprocessing.Pool",
"numpy.std",
"scipy.stats.norm.cdf"
] | [((1332, 1350), 'numpy.mean', 'np.mean', (['Z'], {'axis': '(0)'}), '(Z, axis=0)\n', (1339, 1350), True, 'import numpy as np\n'), ((1359, 1372), 'numpy.sum', 'np.sum', (['hatPF'], {}), '(hatPF)\n', (1365, 1372), True, 'import numpy as np\n'), ((2425, 2443), 'numpy.mean', 'np.mean', (['Z'], {'axis': '(0)'}), '(Z, axis=0)\n', (2432, 2443), True, 'import numpy as np\n'), ((2525, 2538), 'numpy.sum', 'np.sum', (['hatPF'], {}), '(hatPF)\n', (2531, 2538), True, 'import numpy as np\n'), ((2619, 2636), 'numpy.sum', 'np.sum', (['Z'], {'axis': '(1)'}), '(Z, axis=1)\n', (2625, 2636), True, 'import numpy as np\n'), ((2855, 2866), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (2863, 2866), True, 'import numpy as np\n'), ((3024, 3036), 'numpy.mean', 'np.mean', (['phi'], {}), '(phi)\n', (3031, 3036), True, 'import numpy as np\n'), ((7131, 7150), 'scipy.stats.norm.ppf', 'norm.ppf', (['(1 - alpha)'], {}), '(1 - alpha)\n', (7139, 7150), False, 'from scipy.stats import norm\n'), ((8941, 8964), 'scipy.stats.norm.ppf', 'norm.ppf', (['(1 - alpha / 2)'], {}), '(1 - alpha / 2)\n', (8949, 8964), False, 'from scipy.stats import norm\n'), ((11141, 11169), 'numpy.zeros', 'np.zeros', (['[m, d]'], {'dtype': 'bool'}), '([m, d], dtype=bool)\n', (11149, 11169), True, 'import numpy as np\n'), ((11920, 11930), 'multiprocessing.Pool', 'mpi.Pool', ([], {}), '()\n', (11928, 11930), True, 'import multiprocessing as mpi\n'), ((12051, 12063), 'numpy.mean', 'np.mean', (['jac'], {}), '(jac)\n', (12058, 12063), True, 'import numpy as np\n'), ((12081, 12092), 'numpy.std', 'np.std', (['jac'], {}), '(jac)\n', (12087, 12092), True, 'import numpy as np\n'), ((7094, 7120), 'math.sqrt', 'math.sqrt', (["res['variance']"], {}), "(res['variance'])\n", (7103, 7120), False, 'import math\n'), ((7214, 7225), 'scipy.stats.norm.cdf', 'norm.cdf', (['V'], {}), '(V)\n', (7222, 7225), False, 'from scipy.stats import norm\n'), ((8910, 8932), 'math.sqrt', 'math.sqrt', (['(var1 + var2)'], {}), '(var1 + var2)\n', (8919, 8932), False, 'import math\n'), ((9579, 9592), 'numpy.asarray', 'np.asarray', (['Z'], {}), '(Z)\n', (9589, 9592), True, 'import numpy as np\n'), ((3066, 3090), 'numpy.power', 'np.power', (['(phi - phiAv)', '(2)'], {}), '(phi - phiAv, 2)\n', (3074, 3090), True, 'import numpy as np\n'), ((5227, 5250), 'scipy.stats.norm.ppf', 'norm.ppf', (['(1 - alpha / 2)'], {}), '(1 - alpha / 2)\n', (5235, 5250), False, 'from scipy.stats import norm\n'), ((5247, 5273), 'math.sqrt', 'math.sqrt', (["res['variance']"], {}), "(res['variance'])\n", (5256, 5273), False, 'import math\n'), ((5359, 5382), 'scipy.stats.norm.ppf', 'norm.ppf', (['(1 - alpha / 2)'], {}), '(1 - alpha / 2)\n', (5367, 5382), False, 'from scipy.stats import norm\n'), ((5379, 5405), 'math.sqrt', 'math.sqrt', (["res['variance']"], {}), "(res['variance'])\n", (5388, 5405), False, 'import math\n'), ((1434, 1463), 'numpy.multiply', 'np.multiply', (['hatPF', '(1 - hatPF)'], {}), '(hatPF, 1 - hatPF)\n', (1445, 1463), True, 'import numpy as np\n'), ((2788, 2817), 'numpy.multiply', 'np.multiply', (['hatPF', '(1 - hatPF)'], {}), '(hatPF, 1 - hatPF)\n', (2799, 2817), True, 'import numpy as np\n'), ((2924, 2949), 'numpy.multiply', 'np.multiply', (['Z[i,]', 'hatPF'], {}), '(Z[i,], hatPF)\n', (2935, 2949), True, 'import numpy as np\n')] |
def bedrockchannel(tend,uplift,kappa1,kappa2,deltaz):
import numpy as np
# Initial Topography
nx=200
dx=10
xgrid=np.arange(0,nx*dx,dx) # Grid
area=np.zeros(nx)
area=500+0.5*xgrid**2 # Hack's law relating drainage area and stream length
# Channel Width
# width=8*(area*1e-6).^(0.5)
# Define Slope of Steady State Channel Profile Based on Stream Power Erosion Law
S=np.zeros(nx);
S=(uplift/kappa1)*area**(-1/2)
# Define Steady State Topography Based on Slope
topo=np.zeros(nx)
for i in range(nx-2,-1,-1):
topo[i]=topo[i+1]+0.5*(S[i]+S[i+1])*dx;
topo=topo-min(topo);
slope=np.zeros(nx)
kappa=np.ones(nx)
halfway=round(0.5*nx)
kappa[0:halfway]=kappa1*kappa[0:halfway]
kappa[halfway:nx]=kappa2*kappa[halfway:nx]
topoold=np.zeros(nx)
t=0
dt=0.05*dx/(max(kappa)*(area[nx-1])**(1/2))
while t<tend:
topoold=topo[:]
slope[0:nx-1]=1/dx*abs((topo[1:nx]-topo[0:nx-1]))
slope[nx-1]=slope[nx-2]
erode=kappa*slope*area**(1/2)
topo=topo+dt*uplift-dt*erode
topo[nx-1]=topoold[nx-1]
if (t>0.001) and (deltaz>0):
topo[nx-1]=topo[nx-1]-deltaz
deltaz=0;
t=t+dt
return(xgrid,area,topo,slope)
| [
"numpy.zeros",
"numpy.ones",
"numpy.arange"
] | [((135, 160), 'numpy.arange', 'np.arange', (['(0)', '(nx * dx)', 'dx'], {}), '(0, nx * dx, dx)\n', (144, 160), True, 'import numpy as np\n'), ((184, 196), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (192, 196), True, 'import numpy as np\n'), ((440, 452), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (448, 452), True, 'import numpy as np\n'), ((551, 563), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (559, 563), True, 'import numpy as np\n'), ((689, 701), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (697, 701), True, 'import numpy as np\n'), ((712, 723), 'numpy.ones', 'np.ones', (['nx'], {}), '(nx)\n', (719, 723), True, 'import numpy as np\n'), ((854, 866), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (862, 866), True, 'import numpy as np\n')] |
import tensorflow as tf
import os
import numpy as np
from matplotlib import pyplot as plt
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense
from tensorflow.keras import Model
from utils import BATCH_SIZE, EPOCH, TEST_SET, TRAIN_SET
np.set_printoptions(threshold=np.inf)
fashion = tf.keras.datasets.fashion_mnist
(x_train, y_train), (x_test, y_test) = fashion.load_data()
x_train = x_train[:TRAIN_SET]
y_train = y_train[:TRAIN_SET]
x_test = x_test[:TEST_SET]
y_test = y_test[:TEST_SET]
x_train, x_test = x_train / 255.0, x_test / 255.0
print("x_train.shape", x_train.shape)
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1) # 给数据增加一个维度,使数据和网络结构匹配
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
print("x_train.shape", x_train.shape)
class LeNet5(Model):
def __init__(self):
super(LeNet5, self).__init__()
self.c1 = Conv2D(filters=6, kernel_size=(5, 5),
activation='sigmoid')
self.p1 = MaxPool2D(pool_size=(2, 2), strides=2)
self.c2 = Conv2D(filters=16, kernel_size=(5, 5),
activation='sigmoid')
self.p2 = MaxPool2D(pool_size=(2, 2), strides=2)
self.flatten = Flatten()
self.f1 = Dense(120, activation='sigmoid')
self.f2 = Dense(84, activation='sigmoid')
self.f3 = Dense(10, activation='softmax')
def call(self, x):
x = self.c1(x)
x = self.p1(x)
x = self.c2(x)
x = self.p2(x)
x = self.flatten(x)
x = self.f1(x)
x = self.f2(x)
y = self.f3(x)
return y
model = LeNet5()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
checkpoint_save_path = "./checkpoint/LeNet5.ckpt"
if os.path.exists(checkpoint_save_path + '.index'):
print('-------------load the model-----------------')
model.load_weights(checkpoint_save_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
save_weights_only=True,
save_best_only=True)
history = model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCH, validation_data=(x_test, y_test),
validation_freq=1,
callbacks=[cp_callback])
model.summary()
# print(model.trainable_variables)
# file = open('./weights.txt', 'w')
# for v in model.trainable_variables:
# file.write(str(v.name) + '\n')
# file.write(str(v.shape) + '\n')
# file.write(str(v.numpy()) + '\n')
# file.close()
print("""model total params should be :
Conv2D 1: %d,
Conv2D 2: %d,
BatchNormaliztion: %d,
Dense 1: %d,
Dense 2: %d,
Dense 3: %d.
""" % (
6 * 5 * 5 + 6,
6*16 * 5 * 5 + 16, # 16*5*5的卷积核,输入深度为6(上一层输出深度为6)
0,
16 * 4 * 4 * 120 + 120, # 输出结果16*4*4
84 * 120 + 84,
84 * 10 + 10
))
############################################### show ###############################################
# 显示训练集和验证集的acc和loss曲线
| [
"os.path.exists",
"tensorflow.keras.layers.Conv2D",
"numpy.set_printoptions",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.MaxPool2D"
] | [((294, 331), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (313, 331), True, 'import numpy as np\n'), ((1876, 1923), 'os.path.exists', 'os.path.exists', (["(checkpoint_save_path + '.index')"], {}), "(checkpoint_save_path + '.index')\n", (1890, 1923), False, 'import os\n'), ((2043, 2157), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': 'checkpoint_save_path', 'save_weights_only': '(True)', 'save_best_only': '(True)'}), '(filepath=checkpoint_save_path,\n save_weights_only=True, save_best_only=True)\n', (2077, 2157), True, 'import tensorflow as tf\n'), ((909, 968), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(6)', 'kernel_size': '(5, 5)', 'activation': '"""sigmoid"""'}), "(filters=6, kernel_size=(5, 5), activation='sigmoid')\n", (915, 968), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense\n'), ((1012, 1050), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2)'}), '(pool_size=(2, 2), strides=2)\n', (1021, 1050), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense\n'), ((1070, 1130), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(16)', 'kernel_size': '(5, 5)', 'activation': '"""sigmoid"""'}), "(filters=16, kernel_size=(5, 5), activation='sigmoid')\n", (1076, 1130), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense\n'), ((1174, 1212), 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2)'}), '(pool_size=(2, 2), strides=2)\n', (1183, 1212), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense\n'), ((1237, 1246), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1244, 1246), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense\n'), ((1265, 1297), 'tensorflow.keras.layers.Dense', 'Dense', (['(120)'], {'activation': '"""sigmoid"""'}), "(120, activation='sigmoid')\n", (1270, 1297), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense\n'), ((1316, 1347), 'tensorflow.keras.layers.Dense', 'Dense', (['(84)'], {'activation': '"""sigmoid"""'}), "(84, activation='sigmoid')\n", (1321, 1347), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense\n'), ((1366, 1397), 'tensorflow.keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (1371, 1397), False, 'from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense\n'), ((1701, 1765), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(False)'}), '(from_logits=False)\n', (1746, 1765), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 26 19:17:11 2019
@author: plunder
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 25 09:44:55 2019
@author: plunder
"""
import matplotlib.pyplot as plt
import numpy as np
import sympy as sp
from discrete_pms import DiscretePMS
from scipy.stats import norm
from init_plot_settings import init_plot_settings
init_plot_settings(plt)
dpms = DiscretePMS()
n = 100
# masses
m_r = 2.
m_q = 10. / n
# stiffness
kappa_r = 1.
kappa_q = 0.0 / n
# forces
U_r = lambda r, dr: 0.5 * kappa_r * r**2
T_r = lambda r, dr: 0.5 * m_r * dr**2
U_q = lambda q, dq: 0.5 * kappa_q * q**2
T_q = lambda q, dq: 0.5 * m_q * dq**2
# constraint
factor = 1.
# g = lambda r, q : factor*q/(1+r**2) - q**3
# g = lambda r, q : -factor * sp.sin(0.1*q)/(1+r**2) + 0.1*q
# g = lambda r, q : q**2 - 1/(1+(r-0.5)**2) # start within the circle!
g = lambda r, q : q - r
# g = lambda r, q : q**2 + r**2 # looks nice and smooth!
# g = lambda r, q :
# initial distribution
r0 = 1.
dr0 = 0
# Q0 = np.random.normal(loc=0.9, scale=1., size=(n,))
# generate grid
loc = 2
scale = 0.5
qgrid_size = 100
qgrid = np.linspace(-4,4, qgrid_size)
rho0 = n * norm.pdf( qgrid, loc=loc, scale=scale )
t_end = 60
dpms.init_equations(T_r, U_r, T_q, U_q, g)
dpms.init_meso(r0, dr0, rho0, qgrid, t_end)
dpms.simulate_meso()
path = "../../../documents/paper/images/"
fname = path + "demo_meso"
dpms.plot_g(levels=100)
plt.savefig(fname + "_contour.pdf")
plt.show()
#
#dpms.plot_particle_paths()
#plt.savefig(fname + "_particles_time.pdf")
#plt.show()
#dpms.plot_particle_paths(use_r_axis=True,plot_singular_pts=True)
#plt.savefig(fname + "_particles_statespace.pdf")
#plt.show()
dpms.plot_heavy_system()
plt.savefig(fname + "_heavy.pdf")
plt.show()
dpms.plot_particle_paths_meso_time()
plt.savefig(fname + "_particles_time.pdf")
plt.show()
dpms.calc_energies_meso(show_plot=True)
plt.savefig(fname + "_energies.pdf")
plt.show()
#
#dpms.calc_mod_mass_force(show_plot=True)
#plt.show()
#
#
#
#
#
#
#
#
#
#
#
#
#
| [
"init_plot_settings.init_plot_settings",
"matplotlib.pyplot.savefig",
"discrete_pms.DiscretePMS",
"numpy.linspace",
"scipy.stats.norm.pdf",
"matplotlib.pyplot.show"
] | [((411, 434), 'init_plot_settings.init_plot_settings', 'init_plot_settings', (['plt'], {}), '(plt)\n', (429, 434), False, 'from init_plot_settings import init_plot_settings\n'), ((444, 457), 'discrete_pms.DiscretePMS', 'DiscretePMS', ([], {}), '()\n', (455, 457), False, 'from discrete_pms import DiscretePMS\n'), ((1183, 1213), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', 'qgrid_size'], {}), '(-4, 4, qgrid_size)\n', (1194, 1213), True, 'import numpy as np\n'), ((1483, 1518), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname + '_contour.pdf')"], {}), "(fname + '_contour.pdf')\n", (1494, 1518), True, 'import matplotlib.pyplot as plt\n'), ((1519, 1529), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1527, 1529), True, 'import matplotlib.pyplot as plt\n'), ((1771, 1804), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname + '_heavy.pdf')"], {}), "(fname + '_heavy.pdf')\n", (1782, 1804), True, 'import matplotlib.pyplot as plt\n'), ((1805, 1815), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1813, 1815), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1896), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname + '_particles_time.pdf')"], {}), "(fname + '_particles_time.pdf')\n", (1865, 1896), True, 'import matplotlib.pyplot as plt\n'), ((1897, 1907), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1905, 1907), True, 'import matplotlib.pyplot as plt\n'), ((1950, 1986), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname + '_energies.pdf')"], {}), "(fname + '_energies.pdf')\n", (1961, 1986), True, 'import matplotlib.pyplot as plt\n'), ((1987, 1997), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1995, 1997), True, 'import matplotlib.pyplot as plt\n'), ((1224, 1261), 'scipy.stats.norm.pdf', 'norm.pdf', (['qgrid'], {'loc': 'loc', 'scale': 'scale'}), '(qgrid, loc=loc, scale=scale)\n', (1232, 1261), False, 'from scipy.stats import norm\n')] |
from abc import abstractmethod
from collections import OrderedDict
from functools import partial
from typing import List
import numpy as np
import pymc3 as pm
import theano as th
import theano.tensor as tt
from pymc3.variational.updates import get_or_compute_grads
from .. import types
from ..io import io_commons
from ..models.fancy_model import GeneralizedContinuousModel
class FancyStochasticOptimizer:
"""The base class of stochastic optimizers equipped with the functionality of saving and loading the
optimizer state to and from disk (e.g. for stateful optimizers such as ADAM and ADAMAX), and the
possibility of utilizing the extra attributes of `GeneralizedContinuousModel` to perform structured
parameter updates, e.g. updating only sample-specific variables while keeping global variables intact
(see `FancyAdamax` for a concrete implementation).
"""
@abstractmethod
def get_optimizer(self,
model: GeneralizedContinuousModel=None,
approx: pm.MeanField=None):
"""
Args:
model: a generalized continuous PyMC3 model
approx: an instance of PyMC3 mean-field approximation
Returns:
A callable function that upon providing `loss_or_grads` and `params`, returns an
`OrderedDict` of shared theano tensor updates (for example, see `FancyAdamax.get_optimizer`).
"""
raise NotImplementedError
@staticmethod
def get_call_kwargs(_locals_):
_locals_ = _locals_.copy()
_locals_.pop('loss_or_grads')
_locals_.pop('params')
return _locals_
@abstractmethod
def save(self, output_path: str):
raise NotImplementedError
@abstractmethod
def load(self, input_path: str):
raise NotImplementedError
class FancyAdamax(FancyStochasticOptimizer):
"""Adamax optimizer with saving/loading functionality and sample-specific-only update mode."""
def __init__(self,
learning_rate: float = 0.002,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-8,
sample_specific_only: bool = False,
disable_bias_correction: bool = False):
"""Initializer.
Args:
learning_rate: learning rate
beta1: first moment forgetting factor
beta2: second moment forgetting factor
epsilon: a small float for avoiding division-by-zero
sample_specific_only: only update sample-specific variables (as specified in the generalized model)
disable_bias_correction: disable moment estimation bias correction
"""
self.learning_rate = learning_rate
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.sample_specific_only = sample_specific_only
self.disable_bias_correction = disable_bias_correction
# placeholder for first (m) and second (u) moments
# in mean-field type approximation, ``mu`` and ``rho`` each have their own tensors
# the list elements correspond to ``mu`` and ``rho`` moments, respectively
self.m_tensors: List[types.TensorSharedVariable] = []
self.u_tensors: List[types.TensorSharedVariable] = []
# placeholder for the state of moment estimation bias corrector
self.res_tensor: types.TensorSharedVariable = None
def _assert_shared_tensors_available(self):
m_u_available = len(self.m_tensors) == 2 and len(self.u_tensors) == 2
res_available = self.disable_bias_correction or self.res_tensor is not None
assert m_u_available and res_available, "Adamax tensors are not available yet"
def get_mu_m(self):
self._assert_shared_tensors_available()
return self.m_tensors[0]
def get_rho_m(self):
self._assert_shared_tensors_available()
return self.m_tensors[1]
def get_mu_u(self):
self._assert_shared_tensors_available()
return self.u_tensors[0]
def get_rho_u(self):
self._assert_shared_tensors_available()
return self.u_tensors[1]
def get_res_tensor(self):
return self.res_tensor
@staticmethod
def structured_adamax(loss_or_grads=None,
params=None,
model: GeneralizedContinuousModel=None,
approx: pm.MeanField=None,
learning_rate=0.002, beta1=0.9,
beta2=0.999, epsilon=1e-8,
sample_specific_only=False,
disable_bias_correction=False,
base_class: 'FancyAdamax' = None):
"""Adamax stochastic optimizer with partial sample-specific-only update functionality.
Args:
loss_or_grads: symbolic loss function or gradients
params: variational parameter bundle
model: an instance of generalized model
approx: an instance of variational approximation for the model
learning_rate: global learning rate
beta1: first moment estimation forgetting factor
beta2: second moment estimation forgetting factor
epsilon: a small float to avoid division-by-zero
sample_specific_only: only update parameters registered in the generalized model as sample-specific
disable_bias_correction: disable moment estimation bias correction
base_class: a reference to the base class to store a reference to the shared tensors (for I/O)
Returns:
returns the function itself if `loss_or_grads` and `params` are not given;
otherwise, returns an ordered dict of shared tensor updates (to be used in pymc3 for compiling
the step function)
"""
if loss_or_grads is None and params is None:
return partial(FancyAdamax.structured_adamax,
**FancyStochasticOptimizer.get_call_kwargs(locals()))
elif loss_or_grads is None or params is None:
raise ValueError('Please provide both `loss_or_grads` and `params` to get updates')
assert model is not None, 'Please provide `model` to get updates'
assert approx is not None, 'Please provide `approx` to get updates'
all_grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
# indices of sample-specific vars
if sample_specific_only:
vmap_list = io_commons.get_var_map_list_from_meanfield_approx(approx)
sample_specific_indices = []
for vmap in vmap_list:
if vmap.var in model.sample_specific_var_registry:
sample_specific_indices += [idx for idx in range(vmap.slc.start, vmap.slc.stop)]
update_indices = th.shared(np.asarray(sample_specific_indices, dtype=np.int))
num_dof = len(sample_specific_indices)
# Using theano constant to prevent upcasting of float32
one = tt.constant(1)
if disable_bias_correction:
a_t = learning_rate
else:
res_prev = th.shared(pm.theanof.floatX(beta1))
res = beta1 * res_prev
a_t = learning_rate / (one - res)
updates[res_prev] = res
if base_class is not None:
base_class.res_tensor = res_prev
for param, g_t in zip(params, all_grads):
if sample_specific_only:
g_t_view = g_t[update_indices]
m_prev = th.shared(np.zeros((num_dof,), dtype=types.floatX),
broadcastable=(False,))
u_prev = th.shared(np.zeros((num_dof,), dtype=types.floatX),
broadcastable=(False,))
else:
g_t_view = g_t
value = param.get_value(borrow=True)
m_prev = th.shared(np.zeros(value.shape, dtype=types.floatX),
broadcastable=(False,))
u_prev = th.shared(np.zeros(value.shape, dtype=types.floatX),
broadcastable=(False,))
# save a reference to m and u in the base class
if base_class is not None:
base_class.m_tensors.append(m_prev)
base_class.u_tensors.append(u_prev)
m_t = beta1 * m_prev + (one - beta1) * g_t_view
u_t = tt.maximum(beta2 * u_prev, abs(g_t_view))
step = a_t * m_t / (u_t + epsilon)
if sample_specific_only:
new_param = tt.inc_subtensor(param[update_indices], -step)
else:
new_param = param - step
updates[m_prev] = m_t
updates[u_prev] = u_t
updates[param] = new_param
return updates
def get_optimizer(self,
model: GeneralizedContinuousModel=None,
approx: pm.MeanField=None):
return FancyAdamax.structured_adamax(
model=model,
approx=approx,
beta1=self.beta1,
beta2=self.beta2,
learning_rate=self.learning_rate,
epsilon=self.epsilon,
sample_specific_only=self.sample_specific_only,
disable_bias_correction=self.disable_bias_correction,
base_class=self)
def save(self, output_path: str) -> None:
"""Saves the state of the optimizer to disk.
Args:
output_path: output path (must be writable directory)
"""
from ..io import io_adamax # lazy import to break import cycle
io_adamax.AdamaxStateExporter(self, output_path)()
def load(self, input_path: str):
"""Loads the state of the optimizer from disk.
Args:
input_path: input path (must be a readable directory)
"""
from ..io import io_adamax # lazy import to break import cycle
io_adamax.AdamaxStateImporter(self, input_path)()
| [
"collections.OrderedDict",
"theano.tensor.constant",
"pymc3.variational.updates.get_or_compute_grads",
"numpy.asarray",
"numpy.zeros",
"pymc3.theanof.floatX",
"theano.tensor.inc_subtensor"
] | [((6396, 6439), 'pymc3.variational.updates.get_or_compute_grads', 'get_or_compute_grads', (['loss_or_grads', 'params'], {}), '(loss_or_grads, params)\n', (6416, 6439), False, 'from pymc3.variational.updates import get_or_compute_grads\n'), ((6458, 6471), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6469, 6471), False, 'from collections import OrderedDict\n'), ((7094, 7108), 'theano.tensor.constant', 'tt.constant', (['(1)'], {}), '(1)\n', (7105, 7108), True, 'import theano.tensor as tt\n'), ((6913, 6962), 'numpy.asarray', 'np.asarray', (['sample_specific_indices'], {'dtype': 'np.int'}), '(sample_specific_indices, dtype=np.int)\n', (6923, 6962), True, 'import numpy as np\n'), ((7225, 7249), 'pymc3.theanof.floatX', 'pm.theanof.floatX', (['beta1'], {}), '(beta1)\n', (7242, 7249), True, 'import pymc3 as pm\n'), ((8677, 8723), 'theano.tensor.inc_subtensor', 'tt.inc_subtensor', (['param[update_indices]', '(-step)'], {}), '(param[update_indices], -step)\n', (8693, 8723), True, 'import theano.tensor as tt\n'), ((7626, 7666), 'numpy.zeros', 'np.zeros', (['(num_dof,)'], {'dtype': 'types.floatX'}), '((num_dof,), dtype=types.floatX)\n', (7634, 7666), True, 'import numpy as np\n'), ((7762, 7802), 'numpy.zeros', 'np.zeros', (['(num_dof,)'], {'dtype': 'types.floatX'}), '((num_dof,), dtype=types.floatX)\n', (7770, 7802), True, 'import numpy as np\n'), ((8000, 8041), 'numpy.zeros', 'np.zeros', (['value.shape'], {'dtype': 'types.floatX'}), '(value.shape, dtype=types.floatX)\n', (8008, 8041), True, 'import numpy as np\n'), ((8137, 8178), 'numpy.zeros', 'np.zeros', (['value.shape'], {'dtype': 'types.floatX'}), '(value.shape, dtype=types.floatX)\n', (8145, 8178), True, 'import numpy as np\n')] |
from verifai.simulators.webots.webots_task import webots_task
from verifai.simulators.webots.client_webots import ClientWebots
from math import sin
from math import cos
import numpy as np
from math import atan2
from collections import namedtuple
import os
from dotmap import DotMap
import pickle
from shapely.geometry import Point, Polygon
try:
from controller import Supervisor
except ModuleNotFoundError:
import sys
sys.exit('This functionality requires webots to be installed')
# get distance from x, y to point i
def getDist(data, xy):
return np.linalg.norm(np.array(data) - np.array(xy), axis=1)
# get two nearest points
Line = namedtuple('Line', ['x1', 'y1', 'x2', 'y2'])
def getLine(x, y, data):
dists = getDist(data=data, xy=[x, y])
dist_pos = np.argpartition(dists, 2)
i, j = dist_pos[0], dist_pos[1]
x1, y1 = data[min(i, j)][0], data[min(i, j)][1]
x2, y2 = data[max(i, j)][0], data[max(i, j)][1]
return Line(x1=x1, y1=y1, x2=x2, y2=y2)
curr_dir = os.getcwd()
par_dir = curr_dir
# Defining the task as a webots task
class scenic_intersection(webots_task):
def __init__(self, N_SIM_STEPS, supervisor):
super().__init__(N_SIM_STEPS, supervisor)
def use_sample(self, sample):
print('Sample recieved')
print(sample)
self.data = sample.params.turnWaypoints
car_id = 0
for obj in sample.objects:
if obj.webotsType == 'TurningCar':
object = self.supervisor.getFromDef('TurningCar')
turning_car = object
offset = -20
if obj.webotsType == 'Ego':
object = self.supervisor.getFromDef('EgoCar')
ego_car = object
offset = 30
if obj.webotsType == 'ToyotaPrius':
object = self.supervisor.getFromDef('waiting_car'+str(car_id+3))
car_id +=1
offset = 0
obj_pos = object.getField('translation').getSFVec3f()
pos = obj.position
object.getField('translation').setSFVec3f([pos[0], obj_pos[1], pos[1]+offset])
rot = [0, 1, 0, -obj.heading]
object.getField('rotation').setSFRotation(rot)
return ego_car, turning_car
def run_task(self, sample):
ego_car, turning_car = self.use_sample(sample)
car_length = 2.995
intersection = [(-28, 25), (-28, 17), (-7, 17), (-7, 25)]
destination = max([y for (x, y) in intersection])
intersection_safe = 1
for _ in range(self.N_SIM_STEPS):
self.supervisor.step(1)
# get car position
turning_x = turning_car.getPosition()[0]
turning_y = turning_car.getPosition()[2]
turning_th = atan2(turning_car.getOrientation()[2], turning_car.getOrientation()[0])
# get position of front of the car
turning_y = turning_y + car_length * cos(turning_th)
turning_x = turning_x + car_length * sin(turning_th)
# find nearest segment
line = getLine(x=turning_x, y=turning_y, data=self.data)
# send data to the controller
write_data = DotMap()
write_data.turning.theta = turning_th
write_data.turning.x = turning_x
write_data.turning.y = turning_y
write_data.line.x1 = line.x1
write_data.line.y1 = line.y1
write_data.line.x2 = line.x2
write_data.line.y2 = line.y2
pickle.dump(write_data, open(par_dir + '/data_turning.pickle', 'wb'))
ego_x = ego_car.getPosition()[0]
ego_y = ego_car.getPosition()[2]
ego_th = atan2(ego_car.getOrientation()[2], ego_car.getOrientation()[0])
ego_y = ego_y + car_length * cos(ego_th)
ego_x = ego_x + car_length * sin(ego_th)
intersection_buffer = 2.5 # This decides how far you are from the intersection to provide the warning
# If 0 then you provide warning when you just enter the intersection
intersection_polygon = Polygon(intersection)
intersection_polygon = intersection_polygon.buffer(intersection_buffer + 0.5)
intersection_safe = not Point(turning_x, turning_y).within(intersection_polygon)
write_data = DotMap()
write_data.turning.theta = turning_th
write_data.turning.x = turning_x
write_data.turning.y = turning_y
write_data.braking_info.safe = intersection_safe
write_data.braking_info.dist = ego_y - destination
pickle.dump(write_data, open(par_dir + '/data_ego.pickle', 'wb'))
return
PORT = 8888
BUFSIZE = 4096
N_SIM_STEPS = 300
supervisor = Supervisor()
simulation_data = DotMap()
simulation_data.port = PORT
simulation_data.bufsize = BUFSIZE
simulation_data.task = scenic_intersection(N_SIM_STEPS=N_SIM_STEPS, supervisor=supervisor)
client_task = ClientWebots(simulation_data)
if not client_task.run_client():
print("End of accident scenario generation")
supervisor.simulationQuit(True)
| [
"collections.namedtuple",
"numpy.argpartition",
"controller.Supervisor",
"dotmap.DotMap",
"verifai.simulators.webots.client_webots.ClientWebots",
"os.getcwd",
"math.cos",
"numpy.array",
"shapely.geometry.Polygon",
"shapely.geometry.Point",
"sys.exit",
"math.sin"
] | [((654, 698), 'collections.namedtuple', 'namedtuple', (['"""Line"""', "['x1', 'y1', 'x2', 'y2']"], {}), "('Line', ['x1', 'y1', 'x2', 'y2'])\n", (664, 698), False, 'from collections import namedtuple\n'), ((1005, 1016), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1014, 1016), False, 'import os\n'), ((4786, 4798), 'controller.Supervisor', 'Supervisor', ([], {}), '()\n', (4796, 4798), False, 'from controller import Supervisor\n'), ((4817, 4825), 'dotmap.DotMap', 'DotMap', ([], {}), '()\n', (4823, 4825), False, 'from dotmap import DotMap\n'), ((4993, 5022), 'verifai.simulators.webots.client_webots.ClientWebots', 'ClientWebots', (['simulation_data'], {}), '(simulation_data)\n', (5005, 5022), False, 'from verifai.simulators.webots.client_webots import ClientWebots\n'), ((783, 808), 'numpy.argpartition', 'np.argpartition', (['dists', '(2)'], {}), '(dists, 2)\n', (798, 808), True, 'import numpy as np\n'), ((431, 493), 'sys.exit', 'sys.exit', (['"""This functionality requires webots to be installed"""'], {}), "('This functionality requires webots to be installed')\n", (439, 493), False, 'import sys\n'), ((581, 595), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (589, 595), True, 'import numpy as np\n'), ((598, 610), 'numpy.array', 'np.array', (['xy'], {}), '(xy)\n', (606, 610), True, 'import numpy as np\n'), ((3190, 3198), 'dotmap.DotMap', 'DotMap', ([], {}), '()\n', (3196, 3198), False, 'from dotmap import DotMap\n'), ((4125, 4146), 'shapely.geometry.Polygon', 'Polygon', (['intersection'], {}), '(intersection)\n', (4132, 4146), False, 'from shapely.geometry import Point, Polygon\n'), ((4356, 4364), 'dotmap.DotMap', 'DotMap', ([], {}), '()\n', (4362, 4364), False, 'from dotmap import DotMap\n'), ((2936, 2951), 'math.cos', 'cos', (['turning_th'], {}), '(turning_th)\n', (2939, 2951), False, 'from math import cos\n'), ((3001, 3016), 'math.sin', 'sin', (['turning_th'], {}), '(turning_th)\n', (3004, 3016), False, 'from math import sin\n'), ((3803, 3814), 'math.cos', 'cos', (['ego_th'], {}), '(ego_th)\n', (3806, 3814), False, 'from math import cos\n'), ((3856, 3867), 'math.sin', 'sin', (['ego_th'], {}), '(ego_th)\n', (3859, 3867), False, 'from math import sin\n'), ((4273, 4300), 'shapely.geometry.Point', 'Point', (['turning_x', 'turning_y'], {}), '(turning_x, turning_y)\n', (4278, 4300), False, 'from shapely.geometry import Point, Polygon\n')] |
import matplotlib.pyplot as plt
import numpy as np
from keras import regularizers
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.utils.np_utils import to_categorical
from keras.optimizers import Adam
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
from keras.callbacks.callbacks import ModelCheckpoint
train_data = np.loadtxt('vgg19_train.csv', delimiter=",")
s = np.arange(train_data.shape[0])
np.random.shuffle(s)
train_data = train_data[s]
train_X = train_data[:, :-1]
validation_data = np.loadtxt('vgg19_validation.csv', delimiter=",")
validation_X = validation_data[:, :-1]
# pca = PCA(n_components=800)
# pca = pca.fit(train_X)
# train_X = pca.transform(train_X)
# validation_X = pca.transform(validation_data[:, :-1])
# tree = RandomForestClassifier(n_estimators=1000)
# tree = tree.fit(train_X, train_data[:, -1])
# print(tree.score(train_X, train_data[:, -1]))
# print(tree.score(validation_X, validation_data[:, -1]))
# exit()
model = Sequential()
model.add(Dense(128, activation='relu', kernel_initializer='he_uniform', input_shape=(512,), kernel_regularizer=regularizers.l2(0.1), bias_regularizer=regularizers.l2(0.1)))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr=100e-6),
metrics=['acc'])
history = model.fit(
x=train_X,
y=train_data[:, -1],
epochs=100,
batch_size=256,
validation_data=(validation_X, validation_data[:, -1]),
# callbacks=[ModelCheckpoint("vgg19_is_fungus_{val_acc:.4f}_{val_loss:.4f}_{epoch:02d}.h5", save_best_only=True, monitor='acc',
# verbose=0, mode='auto', period=1)],
verbose=2)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
# plt.title('Dokładność modelu')
plt.ylabel('Dokładność modelu')
plt.xlabel('Epoka')
plt.legend(['Zbiór trenujący', 'Zbiór walidujący'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
# plt.title('model loss')
plt.ylabel('Funkcja kosztu')
plt.xlabel('Epoka')
plt.legend(['Zbiór trenujący', 'Zbiór walidujący'], loc='upper left')
plt.show()
| [
"keras.optimizers.Adam",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"keras.models.Sequential",
"keras.regularizers.l2",
"keras.layers.Dense",
"numpy.loadtxt",
"keras.layers.Dropout",
"numpy.arange",
... | [((419, 463), 'numpy.loadtxt', 'np.loadtxt', (['"""vgg19_train.csv"""'], {'delimiter': '""","""'}), "('vgg19_train.csv', delimiter=',')\n", (429, 463), True, 'import numpy as np\n'), ((468, 498), 'numpy.arange', 'np.arange', (['train_data.shape[0]'], {}), '(train_data.shape[0])\n', (477, 498), True, 'import numpy as np\n'), ((499, 519), 'numpy.random.shuffle', 'np.random.shuffle', (['s'], {}), '(s)\n', (516, 519), True, 'import numpy as np\n'), ((597, 646), 'numpy.loadtxt', 'np.loadtxt', (['"""vgg19_validation.csv"""'], {'delimiter': '""","""'}), "('vgg19_validation.csv', delimiter=',')\n", (607, 646), True, 'import numpy as np\n'), ((1055, 1067), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1065, 1067), False, 'from keras.models import Sequential\n'), ((1890, 1922), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['acc']"], {}), "(history.history['acc'])\n", (1898, 1922), True, 'import matplotlib.pyplot as plt\n'), ((1923, 1959), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_acc']"], {}), "(history.history['val_acc'])\n", (1931, 1959), True, 'import matplotlib.pyplot as plt\n'), ((1993, 2024), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Dokładność modelu"""'], {}), "('Dokładność modelu')\n", (2003, 2024), True, 'import matplotlib.pyplot as plt\n'), ((2025, 2044), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoka"""'], {}), "('Epoka')\n", (2035, 2044), True, 'import matplotlib.pyplot as plt\n'), ((2045, 2114), 'matplotlib.pyplot.legend', 'plt.legend', (["['Zbiór trenujący', 'Zbiór walidujący']"], {'loc': '"""upper left"""'}), "(['Zbiór trenujący', 'Zbiór walidujący'], loc='upper left')\n", (2055, 2114), True, 'import matplotlib.pyplot as plt\n'), ((2115, 2125), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2123, 2125), True, 'import matplotlib.pyplot as plt\n'), ((2127, 2160), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (2135, 2160), True, 'import matplotlib.pyplot as plt\n'), ((2161, 2198), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (2169, 2198), True, 'import matplotlib.pyplot as plt\n'), ((2225, 2253), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Funkcja kosztu"""'], {}), "('Funkcja kosztu')\n", (2235, 2253), True, 'import matplotlib.pyplot as plt\n'), ((2254, 2273), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoka"""'], {}), "('Epoka')\n", (2264, 2273), True, 'import matplotlib.pyplot as plt\n'), ((2274, 2343), 'matplotlib.pyplot.legend', 'plt.legend', (["['Zbiór trenujący', 'Zbiór walidujący']"], {'loc': '"""upper left"""'}), "(['Zbiór trenujący', 'Zbiór walidujący'], loc='upper left')\n", (2284, 2343), True, 'import matplotlib.pyplot as plt\n'), ((2344, 2354), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2352, 2354), True, 'import matplotlib.pyplot as plt\n'), ((1253, 1265), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1260, 1265), False, 'from keras.layers import Dense, Dropout\n'), ((1277, 1338), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_uniform"""'}), "(64, activation='relu', kernel_initializer='he_uniform')\n", (1282, 1338), False, 'from keras.layers import Dense, Dropout\n'), ((1352, 1382), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1357, 1382), False, 'from keras.layers import Dense, Dropout\n'), ((1467, 1482), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (1471, 1482), False, 'from keras.optimizers import Adam\n'), ((1181, 1201), 'keras.regularizers.l2', 'regularizers.l2', (['(0.1)'], {}), '(0.1)\n', (1196, 1201), False, 'from keras import regularizers\n'), ((1220, 1240), 'keras.regularizers.l2', 'regularizers.l2', (['(0.1)'], {}), '(0.1)\n', (1235, 1240), False, 'from keras import regularizers\n')] |
from DataSocket import TCPSendSocket, TCPReceiveSocket, RAW
import time
import numpy as np
import threading
import struct
import sys
send_port = 4242
rec_port = 4242
ip = '127.0.0.1'
# define function to print the echo back from matlab
def print_data(data):
print(data, "unpacked:", struct.unpack('ff', data))
# create a send and receive socket
send_socket = TCPSendSocket(tcp_port=send_port, tcp_ip=ip, send_type=RAW)
receive_socket = TCPReceiveSocket(tcp_port=rec_port, tcp_ip=ip, receive_as_raw=True, handler_function=print_data)
# start the sockets
send_socket.start()
receive_socket.start()
stop_flag = threading.Event()
def send_sig():
while not stop_flag.is_set():
data = np.random.random((1, 2)).tolist()[0]
data_as_bytes = struct.pack('ff', *data)
send_socket.send_data(data_as_bytes)
time.sleep(0.5)
thread = threading.Thread(target=send_sig)
thread.start()
input('Press enter to shutdown.')
stop_flag.set()
thread.join()
# close the sockets
send_socket.stop()
receive_socket.stop()
sys.exit(0) | [
"numpy.random.random",
"struct.pack",
"time.sleep",
"threading.Event",
"struct.unpack",
"DataSocket.TCPSendSocket",
"sys.exit",
"threading.Thread",
"DataSocket.TCPReceiveSocket"
] | [((368, 427), 'DataSocket.TCPSendSocket', 'TCPSendSocket', ([], {'tcp_port': 'send_port', 'tcp_ip': 'ip', 'send_type': 'RAW'}), '(tcp_port=send_port, tcp_ip=ip, send_type=RAW)\n', (381, 427), False, 'from DataSocket import TCPSendSocket, TCPReceiveSocket, RAW\n'), ((445, 545), 'DataSocket.TCPReceiveSocket', 'TCPReceiveSocket', ([], {'tcp_port': 'rec_port', 'tcp_ip': 'ip', 'receive_as_raw': '(True)', 'handler_function': 'print_data'}), '(tcp_port=rec_port, tcp_ip=ip, receive_as_raw=True,\n handler_function=print_data)\n', (461, 545), False, 'from DataSocket import TCPSendSocket, TCPReceiveSocket, RAW\n'), ((618, 635), 'threading.Event', 'threading.Event', ([], {}), '()\n', (633, 635), False, 'import threading\n'), ((869, 902), 'threading.Thread', 'threading.Thread', ([], {'target': 'send_sig'}), '(target=send_sig)\n', (885, 902), False, 'import threading\n'), ((1045, 1056), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1053, 1056), False, 'import sys\n'), ((290, 315), 'struct.unpack', 'struct.unpack', (['"""ff"""', 'data'], {}), "('ff', data)\n", (303, 315), False, 'import struct\n'), ((764, 788), 'struct.pack', 'struct.pack', (['"""ff"""', '*data'], {}), "('ff', *data)\n", (775, 788), False, 'import struct\n'), ((842, 857), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (852, 857), False, 'import time\n'), ((703, 727), 'numpy.random.random', 'np.random.random', (['(1, 2)'], {}), '((1, 2))\n', (719, 727), True, 'import numpy as np\n')] |
from numpy import loadtxt, ndarray, min, max
from sklearn.metrics import adjusted_mutual_info_score, adjusted_rand_score, fowlkes_mallows_score
from SNNDPC import SNNDPC
if __name__ == '__main__':
# Parameter
# --------------------------------------------------------------------------------
# pathData = "../data/Flame.tsv"
# k = 5
# nc = 2
pathData = "../data/Aggregation.tsv"
k = 15
nc = 7
# Run
# --------------------------------------------------------------------------------
data: ndarray = loadtxt(pathData)
label = data[:, -1]
data: ndarray = data[:, :-1]
data = (data - min(data, axis=0)) / (max(data, axis=0) - min(data, axis=0))
centroid, assignment = SNNDPC(k, nc, data)
print(f"Centroids = {centroid.tolist()}")
print(f"AMI = {adjusted_mutual_info_score(label, assignment):.4f}")
print(f"ARI = {adjusted_rand_score(label, assignment):.4f}")
print(f"FMI = {fowlkes_mallows_score(label, assignment):.4f}")
| [
"SNNDPC.SNNDPC",
"sklearn.metrics.adjusted_mutual_info_score",
"sklearn.metrics.adjusted_rand_score",
"numpy.max",
"sklearn.metrics.fowlkes_mallows_score",
"numpy.min",
"numpy.loadtxt"
] | [((515, 532), 'numpy.loadtxt', 'loadtxt', (['pathData'], {}), '(pathData)\n', (522, 532), False, 'from numpy import loadtxt, ndarray, min, max\n'), ((685, 704), 'SNNDPC.SNNDPC', 'SNNDPC', (['k', 'nc', 'data'], {}), '(k, nc, data)\n', (691, 704), False, 'from SNNDPC import SNNDPC\n'), ((600, 617), 'numpy.min', 'min', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (603, 617), False, 'from numpy import loadtxt, ndarray, min, max\n'), ((622, 639), 'numpy.max', 'max', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (625, 639), False, 'from numpy import loadtxt, ndarray, min, max\n'), ((642, 659), 'numpy.min', 'min', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (645, 659), False, 'from numpy import loadtxt, ndarray, min, max\n'), ((764, 809), 'sklearn.metrics.adjusted_mutual_info_score', 'adjusted_mutual_info_score', (['label', 'assignment'], {}), '(label, assignment)\n', (790, 809), False, 'from sklearn.metrics import adjusted_mutual_info_score, adjusted_rand_score, fowlkes_mallows_score\n'), ((833, 871), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['label', 'assignment'], {}), '(label, assignment)\n', (852, 871), False, 'from sklearn.metrics import adjusted_mutual_info_score, adjusted_rand_score, fowlkes_mallows_score\n'), ((895, 935), 'sklearn.metrics.fowlkes_mallows_score', 'fowlkes_mallows_score', (['label', 'assignment'], {}), '(label, assignment)\n', (916, 935), False, 'from sklearn.metrics import adjusted_mutual_info_score, adjusted_rand_score, fowlkes_mallows_score\n')] |
import streamlit as st
import numpy as np
import pandas as pd
import joblib
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import plotly.express as px
from PIL import Image
import os
import cv2
#from google.colab.patches import cv2_imshow
import dlib
from skimage import io
import matplotlib.pyplot as plt
from pathlib import Path
from keras.models import load_model
from PIL import Image, ImageOps
import numpy as np
def welcome():
st.title('이 앱은 나의 관상으로 보았을때 어떤 직업이 어울리는지 보는 앱입니다.')
st.subheader('이미지 또는 캠코더로 직접 입력 해 주세요.')
st.subheader('모바일에서는 상단의 ">"를 클릭해 이미지입력 방식을 ')
st.image('face_detection.jpeg',use_column_width=True)
def photo():
st.title('포토파일입력')
uploaded_file = st.file_uploader("이미지파일선택",type = ["jpg","png","jpeg"])
if uploaded_file is not None:
image = Image.open(uploaded_file)
st.image(image, caption='선택된 이미지.', use_column_width=True)
st.write("")
st.write("누구일까요")
# pillow에서 cv로 변환
numpy_image=np.array(image)
opencv_image=cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR)
#그레이로 변환
gray = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2GRAY)
st.image(gray, caption='그레이변환.', use_column_width=True)
try:
# 페이스찾기
face_detector = dlib.get_frontal_face_detector()
detected_faces = face_detector(gray, 1)
face_frames = [(x.left(), x.top(), x.right(), x.bottom()) for x in detected_faces]
for n, face_rect in enumerate(face_frames):
face = Image.fromarray(opencv_image).crop(face_rect)
st.image(face, caption='페이스', use_column_width=True)
# #cv를 pillow로 변환
# color_coverted = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
# pil_image=Image.fromarray(color_coverted)
# st.image(pil_image, caption='PIL페이스', use_column_width=True)
# Load the model
model = load_model('keras_model.h5')
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
image = face
# #resize the image to a 224x224 with the same strategy as in TM2:
# #resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# #turn the image into a numpy array
image_array = np.asarray(image)
# # Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# # Load the image into the array
data[0] = normalized_image_array
# # run the inference
prediction = model.predict(data)
st.write(prediction)
#첫번째 수치를 j에 입력
label_ = 0
result1 = "누구일까요?"
# label_ = prediction[0].index(max(prediction[0]))
label_ = np.argmax(prediction[0])
if label_ == 0:
result1 = "정치인"
if label_ == 1:
result1 = "연예인"
if label_ == 2:
result1 = "교수"
if label_ == 3:
result1 = "CEO"
if label_ == 4:
result1 = "운동선수"
st.write("나의 최적의 직업은?: "+ result1)
except:
st.error('인물사진을 다시 촬영하세요.얼굴이 있는 사진이고 핸드폰 세로 사진입니다.')
def video():
st.title('캠코더입력')
img_file_buffer = st.camera_input("Take a picture")
if img_file_buffer is not None:
# To read image file buffer with OpenCV:
bytes_data = img_file_buffer.getvalue()
cv2_img = cv2.imdecode(np.frombuffer(bytes_data, np.uint8), cv2.IMREAD_COLOR)
# Check the type of cv2_img:
# Should output: <class 'numpy.ndarray'>
# st.write(type(cv2_img))
# Check the shape of cv2_img:
# Should output shape: (height, width, channels)
# st.write(cv2_img.shape)
# image = Image.open(uploaded_file)
# st.image( cv2_img , caption='선택된 이미지.', use_column_width=True)
# uploaded_file = cv2_img
try:
if cv2_img is not None:
image = cv2_img
st.image(image, caption='선택된 이미지.', use_column_width=True)
st.write("")
st.write("누구일까요")
# pillow에서 cv로 변환
numpy_image=np.array(image)
opencv_image=cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR)
#그레이로 변환
gray = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2GRAY)
st.image(gray, caption='그레이변환.', use_column_width=True)
try:
# 페이스찾기
face_detector = dlib.get_frontal_face_detector()
detected_faces = face_detector(gray, 1)
face_frames = [(x.left(), x.top(), x.right(), x.bottom()) for x in detected_faces]
for n, face_rect in enumerate(face_frames):
face = Image.fromarray(opencv_image).crop(face_rect)
st.image(face, caption='페이스', use_column_width=True)
# #cv를 pillow로 변환
# color_coverted = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
# pil_image=Image.fromarray(color_coverted)
# st.image(pil_image, caption='PIL페이스', use_column_width=True)
# Load the model
model = load_model('keras_model.h5')
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
image = face
# #resize the image to a 224x224 with the same strategy as in TM2:
# #resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# #turn the image into a numpy array
image_array = np.asarray(image)
# # Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# # Load the image into the array
data[0] = normalized_image_array
# # run the inference
prediction = model.predict(data)
st.write(prediction)
#첫번째 수치를 j에 입력
label_ = 0
result1 = "누구일까요?"
# label_ = prediction[0].index(max(prediction[0]))
label_ = np.argmax(prediction[0])
if label_ == 0:
result1 = "정치인"
if label_ == 1:
result1 = "연예인"
if label_ == 2:
result1 = "교수"
if label_ == 3:
result1 = "CEO"
if label_ == 4:
result1 = "운동선수"
st.write("나의 최적의 직업은?: "+ result1)
except:
st.error('인물사진을 다시 촬영하세요.얼굴이 있는 사진이고 핸드폰 세로 사진입니다.')
except:
st.write('사진을촬영해 주세요.')
selected_box = st.sidebar.selectbox('다음중 선택해주세요',('설명서','사진파일입력', '캠코더입력'))
if selected_box == '설명서':
welcome()
st.sidebar.write("모바일에서는 상단의 X를 눌러 원래화면으로 가세요")
if selected_box == '사진파일입력':
photo()
st.sidebar.write("모바일에서는 상단의 X를 눌러 원래화면으로 가세요")
if selected_box == '캠코더입력':
video()
st.sidebar.write("모바일에서는 상단의 X를 눌러 원래화면으로 가세요")
| [
"streamlit.image",
"PIL.ImageOps.fit",
"numpy.array",
"streamlit.title",
"streamlit.sidebar.write",
"numpy.asarray",
"dlib.get_frontal_face_detector",
"numpy.frombuffer",
"streamlit.file_uploader",
"streamlit.write",
"numpy.argmax",
"streamlit.subheader",
"cv2.cvtColor",
"streamlit.camera_... | [((7064, 7126), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""다음중 선택해주세요"""', "('설명서', '사진파일입력', '캠코더입력')"], {}), "('다음중 선택해주세요', ('설명서', '사진파일입력', '캠코더입력'))\n", (7084, 7126), True, 'import streamlit as st\n'), ((468, 519), 'streamlit.title', 'st.title', (['"""이 앱은 나의 관상으로 보았을때 어떤 직업이 어울리는지 보는 앱입니다."""'], {}), "('이 앱은 나의 관상으로 보았을때 어떤 직업이 어울리는지 보는 앱입니다.')\n", (476, 519), True, 'import streamlit as st\n'), ((524, 564), 'streamlit.subheader', 'st.subheader', (['"""이미지 또는 캠코더로 직접 입력 해 주세요."""'], {}), "('이미지 또는 캠코더로 직접 입력 해 주세요.')\n", (536, 564), True, 'import streamlit as st\n'), ((570, 616), 'streamlit.subheader', 'st.subheader', (['"""모바일에서는 상단의 ">"를 클릭해 이미지입력 방식을 """'], {}), '(\'모바일에서는 상단의 ">"를 클릭해 이미지입력 방식을 \')\n', (582, 616), True, 'import streamlit as st\n'), ((627, 681), 'streamlit.image', 'st.image', (['"""face_detection.jpeg"""'], {'use_column_width': '(True)'}), "('face_detection.jpeg', use_column_width=True)\n", (635, 681), True, 'import streamlit as st\n'), ((699, 717), 'streamlit.title', 'st.title', (['"""포토파일입력"""'], {}), "('포토파일입력')\n", (707, 717), True, 'import streamlit as st\n'), ((738, 794), 'streamlit.file_uploader', 'st.file_uploader', (['"""이미지파일선택"""'], {'type': "['jpg', 'png', 'jpeg']"}), "('이미지파일선택', type=['jpg', 'png', 'jpeg'])\n", (754, 794), True, 'import streamlit as st\n'), ((3356, 3373), 'streamlit.title', 'st.title', (['"""캠코더입력"""'], {}), "('캠코더입력')\n", (3364, 3373), True, 'import streamlit as st\n'), ((3396, 3429), 'streamlit.camera_input', 'st.camera_input', (['"""Take a picture"""'], {}), "('Take a picture')\n", (3411, 3429), True, 'import streamlit as st\n'), ((7174, 7221), 'streamlit.sidebar.write', 'st.sidebar.write', (['"""모바일에서는 상단의 X를 눌러 원래화면으로 가세요"""'], {}), "('모바일에서는 상단의 X를 눌러 원래화면으로 가세요')\n", (7190, 7221), True, 'import streamlit as st\n'), ((7267, 7314), 'streamlit.sidebar.write', 'st.sidebar.write', (['"""모바일에서는 상단의 X를 눌러 원래화면으로 가세요"""'], {}), "('모바일에서는 상단의 X를 눌러 원래화면으로 가세요')\n", (7283, 7314), True, 'import streamlit as st\n'), ((7359, 7406), 'streamlit.sidebar.write', 'st.sidebar.write', (['"""모바일에서는 상단의 X를 눌러 원래화면으로 가세요"""'], {}), "('모바일에서는 상단의 X를 눌러 원래화면으로 가세요')\n", (7375, 7406), True, 'import streamlit as st\n'), ((847, 872), 'PIL.Image.open', 'Image.open', (['uploaded_file'], {}), '(uploaded_file)\n', (857, 872), False, 'from PIL import Image, ImageOps\n'), ((879, 937), 'streamlit.image', 'st.image', (['image'], {'caption': '"""선택된 이미지."""', 'use_column_width': '(True)'}), "(image, caption='선택된 이미지.', use_column_width=True)\n", (887, 937), True, 'import streamlit as st\n'), ((944, 956), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (952, 956), True, 'import streamlit as st\n'), ((963, 980), 'streamlit.write', 'st.write', (['"""누구일까요"""'], {}), "('누구일까요')\n", (971, 980), True, 'import streamlit as st\n'), ((1024, 1039), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1032, 1039), True, 'import numpy as np\n'), ((1061, 1105), 'cv2.cvtColor', 'cv2.cvtColor', (['numpy_image', 'cv2.COLOR_RGB2BGR'], {}), '(numpy_image, cv2.COLOR_RGB2BGR)\n', (1073, 1105), False, 'import cv2\n'), ((1135, 1181), 'cv2.cvtColor', 'cv2.cvtColor', (['opencv_image', 'cv2.COLOR_BGR2GRAY'], {}), '(opencv_image, cv2.COLOR_BGR2GRAY)\n', (1147, 1181), False, 'import cv2\n'), ((1188, 1243), 'streamlit.image', 'st.image', (['gray'], {'caption': '"""그레이변환."""', 'use_column_width': '(True)'}), "(gray, caption='그레이변환.', use_column_width=True)\n", (1196, 1243), True, 'import streamlit as st\n'), ((1316, 1348), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (1346, 1348), False, 'import dlib\n'), ((1623, 1675), 'streamlit.image', 'st.image', (['face'], {'caption': '"""페이스"""', 'use_column_width': '(True)'}), "(face, caption='페이스', use_column_width=True)\n", (1631, 1675), True, 'import streamlit as st\n'), ((1947, 1975), 'keras.models.load_model', 'load_model', (['"""keras_model.h5"""'], {}), "('keras_model.h5')\n", (1957, 1975), False, 'from keras.models import load_model\n'), ((1994, 2046), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(1, 224, 224, 3)', 'dtype': 'np.float32'}), '(shape=(1, 224, 224, 3), dtype=np.float32)\n', (2004, 2046), True, 'import numpy as np\n'), ((2284, 2326), 'PIL.ImageOps.fit', 'ImageOps.fit', (['image', 'size', 'Image.ANTIALIAS'], {}), '(image, size, Image.ANTIALIAS)\n', (2296, 2326), False, 'from PIL import Image, ImageOps\n'), ((2399, 2416), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (2409, 2416), True, 'import numpy as np\n'), ((2705, 2725), 'streamlit.write', 'st.write', (['prediction'], {}), '(prediction)\n', (2713, 2725), True, 'import streamlit as st\n'), ((2882, 2906), 'numpy.argmax', 'np.argmax', (['prediction[0]'], {}), '(prediction[0])\n', (2891, 2906), True, 'import numpy as np\n'), ((3202, 3237), 'streamlit.write', 'st.write', (["('나의 최적의 직업은?: ' + result1)"], {}), "('나의 최적의 직업은?: ' + result1)\n", (3210, 3237), True, 'import streamlit as st\n'), ((3595, 3630), 'numpy.frombuffer', 'np.frombuffer', (['bytes_data', 'np.uint8'], {}), '(bytes_data, np.uint8)\n', (3608, 3630), True, 'import numpy as np\n'), ((4134, 4192), 'streamlit.image', 'st.image', (['image'], {'caption': '"""선택된 이미지."""', 'use_column_width': '(True)'}), "(image, caption='선택된 이미지.', use_column_width=True)\n", (4142, 4192), True, 'import streamlit as st\n'), ((4207, 4219), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (4215, 4219), True, 'import streamlit as st\n'), ((4234, 4251), 'streamlit.write', 'st.write', (['"""누구일까요"""'], {}), "('누구일까요')\n", (4242, 4251), True, 'import streamlit as st\n'), ((4312, 4327), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (4320, 4327), True, 'import numpy as np\n'), ((4357, 4401), 'cv2.cvtColor', 'cv2.cvtColor', (['numpy_image', 'cv2.COLOR_RGB2BGR'], {}), '(numpy_image, cv2.COLOR_RGB2BGR)\n', (4369, 4401), False, 'import cv2\n'), ((4447, 4493), 'cv2.cvtColor', 'cv2.cvtColor', (['opencv_image', 'cv2.COLOR_BGR2GRAY'], {}), '(opencv_image, cv2.COLOR_BGR2GRAY)\n', (4459, 4493), False, 'import cv2\n'), ((4508, 4563), 'streamlit.image', 'st.image', (['gray'], {'caption': '"""그레이변환."""', 'use_column_width': '(True)'}), "(gray, caption='그레이변환.', use_column_width=True)\n", (4516, 4563), True, 'import streamlit as st\n'), ((7022, 7045), 'streamlit.write', 'st.write', (['"""사진을촬영해 주세요."""'], {}), "('사진을촬영해 주세요.')\n", (7030, 7045), True, 'import streamlit as st\n'), ((3270, 3322), 'streamlit.error', 'st.error', (['"""인물사진을 다시 촬영하세요.얼굴이 있는 사진이고 핸드폰 세로 사진입니다."""'], {}), "('인물사진을 다시 촬영하세요.얼굴이 있는 사진이고 핸드폰 세로 사진입니다.')\n", (3278, 3322), True, 'import streamlit as st\n'), ((4652, 4684), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (4682, 4684), False, 'import dlib\n'), ((4999, 5051), 'streamlit.image', 'st.image', (['face'], {'caption': '"""페이스"""', 'use_column_width': '(True)'}), "(face, caption='페이스', use_column_width=True)\n", (5007, 5051), True, 'import streamlit as st\n'), ((5371, 5399), 'keras.models.load_model', 'load_model', (['"""keras_model.h5"""'], {}), "('keras_model.h5')\n", (5381, 5399), False, 'from keras.models import load_model\n'), ((5426, 5478), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(1, 224, 224, 3)', 'dtype': 'np.float32'}), '(shape=(1, 224, 224, 3), dtype=np.float32)\n', (5436, 5478), True, 'import numpy as np\n'), ((5756, 5798), 'PIL.ImageOps.fit', 'ImageOps.fit', (['image', 'size', 'Image.ANTIALIAS'], {}), '(image, size, Image.ANTIALIAS)\n', (5768, 5798), False, 'from PIL import Image, ImageOps\n'), ((5887, 5904), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (5897, 5904), True, 'import numpy as np\n'), ((6249, 6269), 'streamlit.write', 'st.write', (['prediction'], {}), '(prediction)\n', (6257, 6269), True, 'import streamlit as st\n'), ((6466, 6490), 'numpy.argmax', 'np.argmax', (['prediction[0]'], {}), '(prediction[0])\n', (6475, 6490), True, 'import numpy as np\n'), ((6874, 6909), 'streamlit.write', 'st.write', (["('나의 최적의 직업은?: ' + result1)"], {}), "('나의 최적의 직업은?: ' + result1)\n", (6882, 6909), True, 'import streamlit as st\n'), ((6949, 7001), 'streamlit.error', 'st.error', (['"""인물사진을 다시 촬영하세요.얼굴이 있는 사진이고 핸드폰 세로 사진입니다."""'], {}), "('인물사진을 다시 촬영하세요.얼굴이 있는 사진이고 핸드폰 세로 사진입니다.')\n", (6957, 7001), True, 'import streamlit as st\n'), ((1566, 1595), 'PIL.Image.fromarray', 'Image.fromarray', (['opencv_image'], {}), '(opencv_image)\n', (1581, 1595), False, 'from PIL import Image, ImageOps\n'), ((4934, 4963), 'PIL.Image.fromarray', 'Image.fromarray', (['opencv_image'], {}), '(opencv_image)\n', (4949, 4963), False, 'from PIL import Image, ImageOps\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
# Lots of different places that widgets could come from...
try:
from ipywidgets import interact, FloatSlider, IntSlider
except ImportError:
import warnings
# ignore ShimWarning raised by IPython, see GH #892
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
from IPython.html.widgets import interact, FloatSlider, IntSlider
except ImportError:
try:
from IPython.html.widgets import (interact,
FloatSliderWidget,
IntSliderWidget)
FloatSlider = FloatSliderWidget
IntSlider = IntSliderWidget
except ImportError:
pass
from .miscplot import palplot
from .palettes import (color_palette, dark_palette, light_palette,
diverging_palette, cubehelix_palette)
__all__ = ["choose_colorbrewer_palette", "choose_cubehelix_palette",
"choose_dark_palette", "choose_light_palette",
"choose_diverging_palette"]
def _init_mutable_colormap():
"""Create a matplotlib colormap that will be updated by the widgets."""
greys = color_palette("Greys", 256)
cmap = LinearSegmentedColormap.from_list("interactive", greys)
cmap._init()
cmap._set_extremes()
return cmap
def _update_lut(cmap, colors):
"""Change the LUT values in a matplotlib colormap in-place."""
cmap._lut[:256] = colors
cmap._set_extremes()
def _show_cmap(cmap):
"""Show a continuous matplotlib colormap."""
from .rcmod import axes_style # Avoid circular import
with axes_style("white"):
f, ax = plt.subplots(figsize=(8.25, .75))
ax.set(xticks=[], yticks=[])
x = np.linspace(0, 1, 256)[np.newaxis, :]
ax.pcolormesh(x, cmap=cmap)
def choose_colorbrewer_palette(data_type, as_cmap=False):
"""Select a palette from the ColorBrewer set.
These palettes are built into matplotlib and can be used by name in
many seaborn functions, or by passing the object returned by this function.
Parameters
----------
data_type : {'sequential', 'diverging', 'qualitative'}
This describes the kind of data you want to visualize. See the seaborn
color palette docs for more information about how to choose this value.
Note that you can pass substrings (e.g. 'q' for 'qualitative.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
dark_palette : Create a sequential palette with dark low values.
light_palette : Create a sequential palette with bright low values.
diverging_palette : Create a diverging palette from selected colors.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
if data_type.startswith("q") and as_cmap:
raise ValueError("Qualitative palettes cannot be colormaps.")
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if data_type.startswith("s"):
opts = ["Greys", "Reds", "Greens", "Blues", "Oranges", "Purples",
"BuGn", "BuPu", "GnBu", "OrRd", "PuBu", "PuRd", "RdPu", "YlGn",
"PuBuGn", "YlGnBu", "YlOrBr", "YlOrRd"]
variants = ["regular", "reverse", "dark"]
@interact
def choose_sequential(name=opts, n=(2, 18),
desat=FloatSlider(min=0, max=1, value=1),
variant=variants):
if variant == "reverse":
name += "_r"
elif variant == "dark":
name += "_d"
if as_cmap:
colors = color_palette(name, 256, desat)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = color_palette(name, n, desat)
palplot(pal)
elif data_type.startswith("d"):
opts = ["RdBu", "RdGy", "PRGn", "PiYG", "BrBG",
"RdYlBu", "RdYlGn", "Spectral"]
variants = ["regular", "reverse"]
@interact
def choose_diverging(name=opts, n=(2, 16),
desat=FloatSlider(min=0, max=1, value=1),
variant=variants):
if variant == "reverse":
name += "_r"
if as_cmap:
colors = color_palette(name, 256, desat)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = color_palette(name, n, desat)
palplot(pal)
elif data_type.startswith("q"):
opts = ["Set1", "Set2", "Set3", "Paired", "Accent",
"Pastel1", "Pastel2", "Dark2"]
@interact
def choose_qualitative(name=opts, n=(2, 16),
desat=FloatSlider(min=0, max=1, value=1)):
pal[:] = color_palette(name, n, desat)
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_dark_palette(input="husl", as_cmap=False):
"""Launch an interactive widget to create a dark sequential palette.
This corresponds with the :func:`dark_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
input : {'husl', 'hls', 'rgb'}
Color space for defining the seed value. Note that the default is
different than the default input for :func:`dark_palette`.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
dark_palette : Create a sequential palette with dark low values.
light_palette : Create a sequential palette with bright low values.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if input == "rgb":
@interact
def choose_dark_palette_rgb(r=(0., 1.),
g=(0., 1.),
b=(0., 1.),
n=(3, 17)):
color = r, g, b
if as_cmap:
colors = dark_palette(color, 256, input="rgb")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="rgb")
palplot(pal)
elif input == "hls":
@interact
def choose_dark_palette_hls(h=(0., 1.),
l=(0., 1.), # noqa: E741
s=(0., 1.),
n=(3, 17)):
color = h, l, s
if as_cmap:
colors = dark_palette(color, 256, input="hls")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="hls")
palplot(pal)
elif input == "husl":
@interact
def choose_dark_palette_husl(h=(0, 359),
s=(0, 99),
l=(0, 99), # noqa: E741
n=(3, 17)):
color = h, s, l
if as_cmap:
colors = dark_palette(color, 256, input="husl")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = dark_palette(color, n, input="husl")
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_light_palette(input="husl", as_cmap=False):
"""Launch an interactive widget to create a light sequential palette.
This corresponds with the :func:`light_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
input : {'husl', 'hls', 'rgb'}
Color space for defining the seed value. Note that the default is
different than the default input for :func:`light_palette`.
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
light_palette : Create a sequential palette with bright low values.
dark_palette : Create a sequential palette with dark low values.
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
if input == "rgb":
@interact
def choose_light_palette_rgb(r=(0., 1.),
g=(0., 1.),
b=(0., 1.),
n=(3, 17)):
color = r, g, b
if as_cmap:
colors = light_palette(color, 256, input="rgb")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="rgb")
palplot(pal)
elif input == "hls":
@interact
def choose_light_palette_hls(h=(0., 1.),
l=(0., 1.), # noqa: E741
s=(0., 1.),
n=(3, 17)):
color = h, l, s
if as_cmap:
colors = light_palette(color, 256, input="hls")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="hls")
palplot(pal)
elif input == "husl":
@interact
def choose_light_palette_husl(h=(0, 359),
s=(0, 99),
l=(0, 99), # noqa: E741
n=(3, 17)):
color = h, s, l
if as_cmap:
colors = light_palette(color, 256, input="husl")
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = light_palette(color, n, input="husl")
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_diverging_palette(as_cmap=False):
"""Launch an interactive widget to choose a diverging color palette.
This corresponds with the :func:`diverging_palette` function. This kind
of palette is good for data that range between interesting low values
and interesting high values with a meaningful midpoint. (For example,
change scores relative to some baseline value).
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
diverging_palette : Create a diverging color palette or colormap.
choose_colorbrewer_palette : Interactively choose palettes from the
colorbrewer set, including diverging palettes.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
@interact
def choose_diverging_palette(
h_neg=IntSlider(min=0,
max=359,
value=220),
h_pos=IntSlider(min=0,
max=359,
value=10),
s=IntSlider(min=0, max=99, value=74),
l=IntSlider(min=0, max=99, value=50), # noqa: E741
sep=IntSlider(min=1, max=50, value=10),
n=(2, 16),
center=["light", "dark"]
):
if as_cmap:
colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)
_update_lut(cmap, colors)
_show_cmap(cmap)
else:
pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)
palplot(pal)
if as_cmap:
return cmap
return pal
def choose_cubehelix_palette(as_cmap=False):
"""Launch an interactive widget to create a sequential cubehelix palette.
This corresponds with the :func:`cubehelix_palette` function. This kind
of palette is good for data that range between relatively uninteresting
low values and interesting high values. The cubehelix system allows the
palette to have more hue variance across the range, which can be helpful
for distinguishing a wider range of values.
Requires IPython 2+ and must be used in the notebook.
Parameters
----------
as_cmap : bool
If True, the return value is a matplotlib colormap rather than a
list of discrete colors.
Returns
-------
pal or cmap : list of colors or matplotlib colormap
Object that can be passed to plotting functions.
See Also
--------
cubehelix_palette : Create a sequential palette or colormap using the
cubehelix system.
"""
pal = []
if as_cmap:
cmap = _init_mutable_colormap()
@interact
def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),
start=FloatSlider(min=0, max=3, value=0),
rot=FloatSlider(min=-1, max=1, value=.4),
gamma=FloatSlider(min=0, max=5, value=1),
hue=FloatSlider(min=0, max=1, value=.8),
light=FloatSlider(min=0, max=1, value=.85),
dark=FloatSlider(min=0, max=1, value=.15),
reverse=False):
if as_cmap:
colors = cubehelix_palette(256, start, rot, gamma,
hue, light, dark, reverse)
_update_lut(cmap, np.c_[colors, np.ones(256)])
_show_cmap(cmap)
else:
pal[:] = cubehelix_palette(n_colors, start, rot, gamma,
hue, light, dark, reverse)
palplot(pal)
if as_cmap:
return cmap
return pal
| [
"numpy.ones",
"warnings.catch_warnings",
"IPython.html.widgets.IntSlider",
"IPython.html.widgets.FloatSlider",
"numpy.linspace",
"warnings.simplefilter",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.subplots"
] | [((1366, 1421), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""interactive"""', 'greys'], {}), "('interactive', greys)\n", (1399, 1421), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((1812, 1846), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8.25, 0.75)'}), '(figsize=(8.25, 0.75))\n', (1824, 1846), True, 'import matplotlib.pyplot as plt\n'), ((1887, 1909), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(256)'], {}), '(0, 1, 256)\n', (1898, 1909), True, 'import numpy as np\n'), ((12345, 12381), 'IPython.html.widgets.IntSlider', 'IntSlider', ([], {'min': '(0)', 'max': '(359)', 'value': '(220)'}), '(min=0, max=359, value=220)\n', (12354, 12381), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((12445, 12480), 'IPython.html.widgets.IntSlider', 'IntSlider', ([], {'min': '(0)', 'max': '(359)', 'value': '(10)'}), '(min=0, max=359, value=10)\n', (12454, 12480), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((12540, 12574), 'IPython.html.widgets.IntSlider', 'IntSlider', ([], {'min': '(0)', 'max': '(99)', 'value': '(74)'}), '(min=0, max=99, value=74)\n', (12549, 12574), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((12586, 12620), 'IPython.html.widgets.IntSlider', 'IntSlider', ([], {'min': '(0)', 'max': '(99)', 'value': '(50)'}), '(min=0, max=99, value=50)\n', (12595, 12620), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((12648, 12682), 'IPython.html.widgets.IntSlider', 'IntSlider', ([], {'min': '(1)', 'max': '(50)', 'value': '(10)'}), '(min=1, max=50, value=10)\n', (12657, 12682), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((14175, 14208), 'IPython.html.widgets.IntSlider', 'IntSlider', ([], {'min': '(2)', 'max': '(16)', 'value': '(9)'}), '(min=2, max=16, value=9)\n', (14184, 14208), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((14241, 14275), 'IPython.html.widgets.FloatSlider', 'FloatSlider', ([], {'min': '(0)', 'max': '(3)', 'value': '(0)'}), '(min=0, max=3, value=0)\n', (14252, 14275), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((14306, 14343), 'IPython.html.widgets.FloatSlider', 'FloatSlider', ([], {'min': '(-1)', 'max': '(1)', 'value': '(0.4)'}), '(min=-1, max=1, value=0.4)\n', (14317, 14343), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((14375, 14409), 'IPython.html.widgets.FloatSlider', 'FloatSlider', ([], {'min': '(0)', 'max': '(5)', 'value': '(1)'}), '(min=0, max=5, value=1)\n', (14386, 14409), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((14440, 14476), 'IPython.html.widgets.FloatSlider', 'FloatSlider', ([], {'min': '(0)', 'max': '(1)', 'value': '(0.8)'}), '(min=0, max=1, value=0.8)\n', (14451, 14476), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((14508, 14545), 'IPython.html.widgets.FloatSlider', 'FloatSlider', ([], {'min': '(0)', 'max': '(1)', 'value': '(0.85)'}), '(min=0, max=1, value=0.85)\n', (14519, 14545), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((14576, 14613), 'IPython.html.widgets.FloatSlider', 'FloatSlider', ([], {'min': '(0)', 'max': '(1)', 'value': '(0.15)'}), '(min=0, max=1, value=0.15)\n', (14587, 14613), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((335, 360), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (358, 360), False, 'import warnings\n'), ((370, 401), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (391, 401), False, 'import warnings\n'), ((3758, 3792), 'IPython.html.widgets.FloatSlider', 'FloatSlider', ([], {'min': '(0)', 'max': '(1)', 'value': '(1)'}), '(min=0, max=1, value=1)\n', (3769, 3792), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((4542, 4576), 'IPython.html.widgets.FloatSlider', 'FloatSlider', ([], {'min': '(0)', 'max': '(1)', 'value': '(1)'}), '(min=0, max=1, value=1)\n', (4553, 4576), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((5224, 5258), 'IPython.html.widgets.FloatSlider', 'FloatSlider', ([], {'min': '(0)', 'max': '(1)', 'value': '(1)'}), '(min=0, max=1, value=1)\n', (5235, 5258), False, 'from IPython.html.widgets import interact, FloatSlider, IntSlider\n'), ((14849, 14861), 'numpy.ones', 'np.ones', (['(256)'], {}), '(256)\n', (14856, 14861), True, 'import numpy as np\n'), ((4104, 4116), 'numpy.ones', 'np.ones', (['(256)'], {}), '(256)\n', (4111, 4116), True, 'import numpy as np\n'), ((4821, 4833), 'numpy.ones', 'np.ones', (['(256)'], {}), '(256)\n', (4828, 4833), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding=UTF-8
'''
@Author: <NAME>
@LastEditors: <NAME>
@Description:
@Date: 2019-04-19
@LastEditTime: 2019-04-22 10:38
'''
import numpy as np
import torch
import torch.utils.data as Data
from torch.autograd import Variable
from EvalBox.Evaluation.evaluation import Evaluation
from EvalBox.Evaluation.evaluation import MIN_COMPENSATION
class ALDp(Evaluation):
def __init__(self, outputs_origin, outputs_adv, device, **kwargs):
'''
@description:
@param {
model:
device:
kwargs:
}
@return: None
'''
super(ALDp, self).__init__(outputs_origin, outputs_adv, device)
self._parsing_parameters(**kwargs)
def _parsing_parameters(self, **kwargs):
'''
@description:
@param {
}
@return:
'''
def evaluate(self,adv_xs=None, cln_xs=None, cln_ys=None,adv_ys=None,target_preds=None, target_flag=False):
'''
@description:Average L_p Distortion
@param {
adv_xs: 攻击样本
cln_xs:原始样本
cln_ys: 原始类别,非目标攻击下原始样本的类型
adv_ys: 攻击样本的预测类别
target_preds: 目标攻击下是原始样本攻击的目标类别
target_flag:是否是目标攻击
}
@return: L0 distance, norm L2 distance, L_inf distance
'''
assert len(adv_xs) == len(cln_ys), 'examples and labels do not match.'
ori_r = cln_xs.numpy() * 255
adv_r = adv_xs.numpy() * 255
NUM_PIXEL = int(np.prod(cln_xs.shape[1:]))
pert = adv_r - ori_r
dist_l0 = 0
norm_dist_l2 = 0
dist_li = 0
number = 0
predicts = list()
outputs = torch.from_numpy(self.outputs_adv)
preds = torch.argmax(outputs, 1)
preds = preds.data.numpy()
labels = target_preds.numpy()
predicts.extend(preds)
if not target_flag:
for i in range(len(predicts)):
if predicts[i] != labels[i]:
number += 1
dist_l0 += (np.linalg.norm(np.reshape(pert[i], -1), ord=0) / NUM_PIXEL)
norm_dist_l2 += np.linalg.norm(np.reshape(cln_xs.numpy()[i] - adv_xs.numpy()[i], -1), ord=2) / \
(np.linalg.norm(np.reshape(cln_xs.numpy()[i], -1), ord=2))
dist_li += np.linalg.norm(np.reshape(cln_xs.numpy()[i] - adv_xs.numpy()[i], -1), ord=np.inf)
else:
for i in range(len(predicts)):
if predicts[i] == labels[i]:
number += 1
dist_l0 += (np.linalg.norm(np.reshape(pert[i], -1), ord=0) / NUM_PIXEL)
norm_dist_l2 += np.linalg.norm(np.reshape(cln_xs.numpy()[i] - adv_xs.numpy()[i], -1), ord=2) / \
(np.linalg.norm(np.reshape(cln_xs.numpy()[i], -1), ord=2))
dist_li += np.linalg.norm(np.reshape(cln_xs.numpy()[i] - adv_xs.numpy()[i], -1),
ord=np.inf)
if not number==0:
adv_l0 = dist_l0 / number
norm_adv_l2 = norm_dist_l2 / number
adv_li = dist_li / number
else:
adv_l0 = dist_l0 / (number+MIN_COMPENSATION)
norm_adv_l2 = norm_dist_l2 / (number+MIN_COMPENSATION)
adv_li = dist_li / (number+MIN_COMPENSATION)
return adv_l0, norm_adv_l2, adv_li
| [
"numpy.prod",
"numpy.reshape",
"torch.from_numpy",
"torch.argmax"
] | [((1686, 1720), 'torch.from_numpy', 'torch.from_numpy', (['self.outputs_adv'], {}), '(self.outputs_adv)\n', (1702, 1720), False, 'import torch\n'), ((1737, 1761), 'torch.argmax', 'torch.argmax', (['outputs', '(1)'], {}), '(outputs, 1)\n', (1749, 1761), False, 'import torch\n'), ((1502, 1527), 'numpy.prod', 'np.prod', (['cln_xs.shape[1:]'], {}), '(cln_xs.shape[1:])\n', (1509, 1527), True, 'import numpy as np\n'), ((2061, 2084), 'numpy.reshape', 'np.reshape', (['pert[i]', '(-1)'], {}), '(pert[i], -1)\n', (2071, 2084), True, 'import numpy as np\n'), ((2608, 2631), 'numpy.reshape', 'np.reshape', (['pert[i]', '(-1)'], {}), '(pert[i], -1)\n', (2618, 2631), True, 'import numpy as np\n')] |
import random
from tensorflow.keras import layers, models, losses, Model
import tensorflow as tf
import numpy as np
from boardlogic import BoardLogic
from coder import Coder
import matplotlib.pyplot as plt
import math
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax
from keras.applications.vgg19 import VGG19
from helpers import Helpers
class Predictor:
def __init__(self, logger,
color,
location,
checkpoint,
max_loaded_matches,
split_validation,
model_type,
load_model,
batch_size,
epochs,
debug = False):
self.logger = logger
self.debug = debug
self.epochs = epochs
self.batch_size = batch_size
self.create_model(model_type)
if load_model:
self.model.load_weights(filepath=checkpoint)
else:
self.train_model(color, location, checkpoint, max_loaded_matches=max_loaded_matches, split_validation=split_validation)
def create_model(self, model_type):
scaled_board = 32
board_size = 8
if model_type == 'cnn2':
self.create_CNN_model2(scaled_board_size=scaled_board, board_size=board_size)
elif model_type == 'cnn3':
self.create_CNN_model3(scaled_board_size=scaled_board, board_size=board_size)
elif model_type == 'resnet':
self.create_ResNet_model(scaled_board_size=scaled_board, board_size=board_size)
elif model_type == '4conv':
self.create_4conv_cnn(scaled_board_size=scaled_board, board_size=board_size)
elif model_type == 'vgg19':
self.create_VGG19_model(scaled_board_size=scaled_board, board_size=board_size)
def create_CNN_model2(self, scaled_board_size = 32, board_size = 8):
self.model = models.Sequential()
self.model.add(layers.Conv2D(filters=32, padding='same', kernel_size=(3,3), activation = "relu", input_shape=(scaled_board_size, scaled_board_size, 3)))
self.model.add(layers.MaxPooling2D(pool_size=(2,2), padding='same'))
self.model.add(layers.Conv2D(filters=64, padding='same', kernel_size=(3,3), activation = "relu"))
self.model.add(layers.MaxPooling2D(pool_size=(2,2), padding='same'))
self.model.add(layers.Conv2D(filters=64, padding='same', kernel_size=(3,3), activation = "relu"))
self.model.add(layers.MaxPooling2D(pool_size=(2,2), padding='same'))
self.model.add(layers.Flatten())
self.model.add(layers.Dense(128, activation = "relu"))
self.model.add(layers.Dropout(0.4))
self.model.add(layers.Dense(64, activation = "relu"))
self.model.add(layers.Dense(board_size * board_size))
if self.debug:
self.model.summary()
self.model.compile(optimizer = "adam", loss = losses.SparseCategoricalCrossentropy(from_logits=True), metrics = ['acc'])
self.probability_model = keras.Sequential([self.model, tf.keras.layers.Softmax()])
def create_CNN_model3(self, scaled_board_size = 32, board_size = 8):
self.model = models.Sequential()
self.model.add(layers.Conv2D(filters=32, padding='same', kernel_size=(3,3), activation = "relu", input_shape=(scaled_board_size, scaled_board_size, 3)))
self.model.add(layers.MaxPooling2D(pool_size=(3,3), padding='same'))
self.model.add(layers.Conv2D(filters=64, padding='same', kernel_size=(3,3), activation = "relu"))
self.model.add(layers.MaxPooling2D(pool_size=(3,3), padding='same'))
self.model.add(layers.Conv2D(filters=64, padding='same', kernel_size=(3,3), activation = "relu"))
self.model.add(layers.MaxPooling2D(pool_size=(3,3), padding='same'))
self.model.add(layers.Flatten())
self.model.add(layers.Dense(128, activation = "relu"))
self.model.add(layers.Dropout(0.4))
self.model.add(layers.Dense(64, activation = "relu"))
self.model.add(layers.Dense(board_size * board_size))
if self.debug:
self.model.summary()
self.model.compile(optimizer = "adam", loss = losses.SparseCategoricalCrossentropy(from_logits=True), metrics = ['acc'])
self.probability_model = keras.Sequential([self.model, tf.keras.layers.Softmax()])
def create_4conv_cnn(self, scaled_board_size = 32, board_size = 8):
self.model= Sequential()
self.model.add(Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', input_shape=(scaled_board_size, scaled_board_size, 3)))
self.model.add(BatchNormalization())
self.model.add(Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu'))
self.model.add(BatchNormalization())
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Dropout(0.25))
self.model.add(Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.25))
self.model.add(Conv2D(128, kernel_size=(3, 3), padding='same', activation='relu'))
self.model.add(BatchNormalization())
self.model.add(MaxPooling2D(pool_size=(2, 2)))
self.model.add(Dropout(0.25))
self.model.add(Flatten())
self.model.add(Dense(512, activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.5))
self.model.add(Dense(128, activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.5))
self.model.add(Dense(board_size * board_size))
if self.debug:
self.model.summary()
self.model.compile(loss=losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
self.probability_model = keras.Sequential([self.model, Softmax()])
def create_ResNet_model(self, scaled_board_size = 32, board_size = 8):
base_model = tf.keras.applications.ResNet152(weights = 'imagenet', include_top = False, input_shape = (scaled_board_size, scaled_board_size, 3))
# for layer in base_model.layers:
# layer.trainable = False
x = layers.Flatten()(base_model.output)
x = layers.Dense(128, activation='relu')(x)
predictions = layers.Dense(board_size * board_size, activation = 'softmax')(x)
self.model = Model(inputs = base_model.input, outputs = predictions)
if self.debug:
self.model.summary()
self.model.compile(optimizer='adam', loss=losses.sparse_categorical_crossentropy, metrics=['accuracy'])
self.probability_model = keras.Sequential([self.model, Softmax()])
def create_VGG19_model(self, scaled_board_size = 32, board_size = 8):
base_model = VGG19(weights = 'imagenet', include_top = False, input_shape = (scaled_board_size, scaled_board_size, 3))
x = layers.Flatten()(base_model.output)
x = layers.Dense(512, activation='relu')(x)
x = layers.Dropout(0.5)(x)
predictions = layers.Dense(board_size * board_size, activation = 'softmax')(x)
self.model = Model(inputs = base_model.input, outputs = predictions)
if self.debug:
self.model.summary()
self.model.compile(optimizer='adam', loss=losses.sparse_categorical_crossentropy, metrics=['accuracy'])
self.probability_model = keras.Sequential([self.model, Softmax()])
def train_model(self, color, location, checkpoint, max_loaded_matches, split_validation):
data = Helpers.get_games_from_dataset(location)
matches = []
for item in data:
# item[1] is winner, item[2] is game moves
if (color == 'black' and item[1] != '1') or (color == 'white' and item[1] != '-1'):
continue
matches.append(item[2])
train_data = []
train_labels = []
test_data = []
test_labels = []
register_after_move = 0
max_loaded_matches = len(matches) if len(matches) < max_loaded_matches else max_loaded_matches
self.logger.log_info(f'total matches: {max_loaded_matches}')
split_validation_count = int(split_validation * max_loaded_matches)
for i in range(0, split_validation_count):
match_moves = Coder.get_sequence(matches[i])
board_logic = BoardLogic(color)
color_turn = 0
for move in match_moves:
total_valid_moves_black = len(board_logic.get_valid_moves('black'))
total_valid_moves_white = len(board_logic.get_valid_moves('white'))
if color_turn % 2 == 0:
if (color == 'black' and total_valid_moves_black > 0) or (color == 'white' and total_valid_moves_white > 0 and total_valid_moves_black == 0):
if color_turn >= register_after_move:
arr = Coder.get_numpy_array_from_board(board_logic.board)
train_data.append(arr)
i, j = Coder.decode_move(move)
move_index = Coder.get_move_as_numpy(i, j)
train_labels.append(move_index)
if (total_valid_moves_black > 0):
color_turn += 1
else:
if (color == 'white' and total_valid_moves_white > 0) or (color == 'black' and total_valid_moves_black > 0 and total_valid_moves_white == 0):
if color_turn >= register_after_move:
arr = Coder.get_numpy_array_from_board(board_logic.board)
train_data.append(arr)
i, j = Coder.decode_move(move)
move_index = Coder.get_move_as_numpy(i, j)
train_labels.append(move_index)
if (total_valid_moves_white > 0):
color_turn += 1
board_logic.sequence += move
board_logic.move_sequence_to_board()
for i in range(split_validation_count, max_loaded_matches):
match_moves = Coder.get_sequence(matches[i])
board_logic = BoardLogic(color)
color_turn = 0
for move in match_moves:
total_valid_moves_black = len(board_logic.get_valid_moves('black'))
total_valid_moves_white = len(board_logic.get_valid_moves('white'))
if color_turn % 2 == 0:
if (color == 'black' and total_valid_moves_black > 0) or (color == 'white' and total_valid_moves_white > 0 and total_valid_moves_black == 0):
if color_turn >= register_after_move:
arr = Coder.get_numpy_array_from_board(board_logic.board)
test_data.append(arr)
i, j = Coder.decode_move(move)
move_index = Coder.get_move_as_numpy(i, j)
test_labels.append(move_index)
if (total_valid_moves_black > 0):
color_turn += 1
else:
if (color == 'white' and total_valid_moves_white > 0) or (color == 'black' and total_valid_moves_black > 0 and total_valid_moves_white == 0):
if color_turn >= register_after_move:
arr = Coder.get_numpy_array_from_board(board_logic.board)
test_data.append(arr)
i, j = Coder.decode_move(move)
move_index = Coder.get_move_as_numpy(i, j)
test_labels.append(move_index)
if (total_valid_moves_white > 0):
color_turn += 1
board_logic.sequence += move
board_logic.move_sequence_to_board()
train_data = np.asarray(train_data)
train_labels = np.asarray(train_labels)
test_data = np.asarray(test_data)
test_labels = np.asfarray(test_labels)
self.logger.log_info(f'Loaded: {len(train_data)}, {len(train_labels)}, {len(test_data)}, {len(test_labels)},')
self.model.fit(train_data, train_labels, batch_size=self.batch_size ,epochs=self.epochs, validation_data=(test_data, test_labels))
self.model.save_weights(filepath=checkpoint)
def predict_move(self, board, valid_moves, scaled_board_size = 32):
board_image = Coder.get_numpy_array_from_board(board)
board_image = board_image.reshape((1, scaled_board_size, scaled_board_size, 3))
board_heur = [100, -25, 10, 5, 5, 10, -25, 100,-25,-25,2,2,2,2,-25,-25,10,2,5,1,1,5,2,10,5,2,1,2,2,1,2,5,5,2,1,2,2,1,2,5,10,2,5,1,1,5,2,10,-25,-25,2,2,2,2,-25,-25,100,-25,10,5,5,10,-25,100]
board_bench = [80, -26,24,-1,-5,28,-18,76,-23,-39,-18,-9,-6,-8,-39,-1,46,-16,4,1,-3,6,-20,52,-13,-5,2,-1,4,3,-12,-2,-5,-6,1,-2,-3,0,-9,-5,48,-13,12,5,0,5,-24,41,-27,-53,-11,-1,-11,-16,-58,-15,87,-25,27,-1,5,36,-3,100]
pred = self.probability_model.predict(board_image)
best_move = [(-1, -1), float('-inf')] # index, probability of move
for i, j in valid_moves:
move_index = Coder.get_move_as_numpy(i, j)[0]
normalized_heur_move = Helpers.normalize(board_heur[move_index], -25, 100)
normalized_bench_move = Helpers.normalize(board_bench[move_index], -58, 100)
pred_value = pred[0][move_index]
actual_value = math.log((pred_value * 0.65)) + math.log((normalized_heur_move * 0.175)) + math.log((normalized_bench_move * 0.175))
if actual_value > best_move[1]:
best_move[0] = (i, j)
best_move[1] = actual_value
return best_move[0]
def predict_randomly(self, valid_moves):
return random.choice(list(valid_moves)) | [
"keras.layers.Conv2D",
"math.log",
"numpy.asfarray",
"keras.layers.Softmax",
"helpers.Helpers.get_games_from_dataset",
"tensorflow.keras.layers.Dense",
"keras.layers.Dense",
"coder.Coder.get_move_as_numpy",
"tensorflow.keras.layers.Conv2D",
"coder.Coder.decode_move",
"numpy.asarray",
"keras.ap... | [((2095, 2114), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (2112, 2114), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((3362, 3381), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (3379, 3381), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((4627, 4639), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4637, 4639), False, 'from keras.models import Sequential\n'), ((6242, 6371), 'tensorflow.keras.applications.ResNet152', 'tf.keras.applications.ResNet152', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(scaled_board_size, scaled_board_size, 3)'}), "(weights='imagenet', include_top=False,\n input_shape=(scaled_board_size, scaled_board_size, 3))\n", (6273, 6371), True, 'import tensorflow as tf\n'), ((6663, 6714), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'base_model.input', 'outputs': 'predictions'}), '(inputs=base_model.input, outputs=predictions)\n', (6668, 6714), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((7058, 7161), 'keras.applications.vgg19.VGG19', 'VGG19', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(scaled_board_size, scaled_board_size, 3)'}), "(weights='imagenet', include_top=False, input_shape=(scaled_board_size,\n scaled_board_size, 3))\n", (7063, 7161), False, 'from keras.applications.vgg19 import VGG19\n'), ((7408, 7459), 'tensorflow.keras.Model', 'Model', ([], {'inputs': 'base_model.input', 'outputs': 'predictions'}), '(inputs=base_model.input, outputs=predictions)\n', (7413, 7459), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((7817, 7857), 'helpers.Helpers.get_games_from_dataset', 'Helpers.get_games_from_dataset', (['location'], {}), '(location)\n', (7847, 7857), False, 'from helpers import Helpers\n'), ((12381, 12403), 'numpy.asarray', 'np.asarray', (['train_data'], {}), '(train_data)\n', (12391, 12403), True, 'import numpy as np\n'), ((12427, 12451), 'numpy.asarray', 'np.asarray', (['train_labels'], {}), '(train_labels)\n', (12437, 12451), True, 'import numpy as np\n'), ((12472, 12493), 'numpy.asarray', 'np.asarray', (['test_data'], {}), '(test_data)\n', (12482, 12493), True, 'import numpy as np\n'), ((12516, 12540), 'numpy.asfarray', 'np.asfarray', (['test_labels'], {}), '(test_labels)\n', (12527, 12540), True, 'import numpy as np\n'), ((12953, 12992), 'coder.Coder.get_numpy_array_from_board', 'Coder.get_numpy_array_from_board', (['board'], {}), '(board)\n', (12985, 12992), False, 'from coder import Coder\n'), ((2138, 2278), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(32)', 'padding': '"""same"""', 'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': '(scaled_board_size, scaled_board_size, 3)'}), "(filters=32, padding='same', kernel_size=(3, 3), activation=\n 'relu', input_shape=(scaled_board_size, scaled_board_size, 3))\n", (2151, 2278), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((2299, 2352), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), padding='same')\n", (2318, 2352), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((2376, 2461), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'padding': '"""same"""', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=64, padding='same', kernel_size=(3, 3), activation='relu'\n )\n", (2389, 2461), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((2482, 2535), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), padding='same')\n", (2501, 2535), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((2559, 2644), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'padding': '"""same"""', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=64, padding='same', kernel_size=(3, 3), activation='relu'\n )\n", (2572, 2644), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((2665, 2718), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(2, 2)', 'padding': '"""same"""'}), "(pool_size=(2, 2), padding='same')\n", (2684, 2718), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((2742, 2758), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (2756, 2758), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((2783, 2819), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2795, 2819), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((2846, 2865), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (2860, 2865), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((2890, 2925), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (2902, 2925), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((2952, 2989), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(board_size * board_size)'], {}), '(board_size * board_size)\n', (2964, 2989), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((3405, 3545), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(32)', 'padding': '"""same"""', 'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': '(scaled_board_size, scaled_board_size, 3)'}), "(filters=32, padding='same', kernel_size=(3, 3), activation=\n 'relu', input_shape=(scaled_board_size, scaled_board_size, 3))\n", (3418, 3545), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((3566, 3619), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(3, 3)', 'padding': '"""same"""'}), "(pool_size=(3, 3), padding='same')\n", (3585, 3619), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((3643, 3728), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'padding': '"""same"""', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=64, padding='same', kernel_size=(3, 3), activation='relu'\n )\n", (3656, 3728), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((3749, 3802), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(3, 3)', 'padding': '"""same"""'}), "(pool_size=(3, 3), padding='same')\n", (3768, 3802), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((3826, 3911), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'padding': '"""same"""', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=64, padding='same', kernel_size=(3, 3), activation='relu'\n )\n", (3839, 3911), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((3932, 3985), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(3, 3)', 'padding': '"""same"""'}), "(pool_size=(3, 3), padding='same')\n", (3951, 3985), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((4009, 4025), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (4023, 4025), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((4050, 4086), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (4062, 4086), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((4113, 4132), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.4)'], {}), '(0.4)\n', (4127, 4132), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((4157, 4192), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (4169, 4192), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((4219, 4256), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(board_size * board_size)'], {}), '(board_size * board_size)\n', (4231, 4256), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((4663, 4787), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': '(scaled_board_size, scaled_board_size, 3)'}), "(32, kernel_size=(3, 3), padding='same', activation='relu',\n input_shape=(scaled_board_size, scaled_board_size, 3))\n", (4669, 4787), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((4808, 4828), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4826, 4828), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((4854, 4919), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(32, kernel_size=(3, 3), padding='same', activation='relu')\n", (4860, 4919), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((4944, 4964), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4962, 4964), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((4989, 5019), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5001, 5019), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((5044, 5057), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (5051, 5057), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5083, 5148), 'keras.layers.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, kernel_size=(3, 3), padding='same', activation='relu')\n", (5089, 5148), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((5173, 5193), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5191, 5193), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((5218, 5231), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (5225, 5231), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5257, 5323), 'keras.layers.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(128, kernel_size=(3, 3), padding='same', activation='relu')\n", (5263, 5323), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((5348, 5368), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5366, 5368), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((5393, 5423), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5405, 5423), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((5448, 5461), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (5455, 5461), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5487, 5496), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5494, 5496), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5522, 5551), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (5527, 5551), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5576, 5596), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5594, 5596), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((5621, 5633), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5628, 5633), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5659, 5688), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (5664, 5688), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5713, 5733), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5731, 5733), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((5758, 5770), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (5765, 5770), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((5796, 5826), 'keras.layers.Dense', 'Dense', (['(board_size * board_size)'], {}), '(board_size * board_size)\n', (5801, 5826), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((6467, 6483), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (6481, 6483), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((6515, 6551), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (6527, 6551), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((6577, 6636), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(board_size * board_size)'], {'activation': '"""softmax"""'}), "(board_size * board_size, activation='softmax')\n", (6589, 6636), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((7177, 7193), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (7191, 7193), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((7225, 7261), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (7237, 7261), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((7277, 7296), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (7291, 7296), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((7322, 7381), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(board_size * board_size)'], {'activation': '"""softmax"""'}), "(board_size * board_size, activation='softmax')\n", (7334, 7381), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((8584, 8614), 'coder.Coder.get_sequence', 'Coder.get_sequence', (['matches[i]'], {}), '(matches[i])\n', (8602, 8614), False, 'from coder import Coder\n'), ((8641, 8658), 'boardlogic.BoardLogic', 'BoardLogic', (['color'], {}), '(color)\n', (8651, 8658), False, 'from boardlogic import BoardLogic\n'), ((10510, 10540), 'coder.Coder.get_sequence', 'Coder.get_sequence', (['matches[i]'], {}), '(matches[i])\n', (10528, 10540), False, 'from coder import Coder\n'), ((10567, 10584), 'boardlogic.BoardLogic', 'BoardLogic', (['color'], {}), '(color)\n', (10577, 10584), False, 'from boardlogic import BoardLogic\n'), ((13775, 13826), 'helpers.Helpers.normalize', 'Helpers.normalize', (['board_heur[move_index]', '(-25)', '(100)'], {}), '(board_heur[move_index], -25, 100)\n', (13792, 13826), False, 'from helpers import Helpers\n'), ((13863, 13915), 'helpers.Helpers.normalize', 'Helpers.normalize', (['board_bench[move_index]', '(-58)', '(100)'], {}), '(board_bench[move_index], -58, 100)\n', (13880, 13915), False, 'from helpers import Helpers\n'), ((3101, 3155), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (3137, 3155), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((3239, 3264), 'tensorflow.keras.layers.Softmax', 'tf.keras.layers.Softmax', ([], {}), '()\n', (3262, 3264), True, 'import tensorflow as tf\n'), ((4368, 4422), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (4404, 4422), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((4506, 4531), 'tensorflow.keras.layers.Softmax', 'tf.keras.layers.Softmax', ([], {}), '()\n', (4529, 4531), True, 'import tensorflow as tf\n'), ((5917, 5971), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (5953, 5971), False, 'from tensorflow.keras import layers, models, losses, Model\n'), ((6003, 6026), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), '()\n', (6024, 6026), False, 'from tensorflow import keras\n'), ((6133, 6142), 'keras.layers.Softmax', 'Softmax', ([], {}), '()\n', (6140, 6142), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((6950, 6959), 'keras.layers.Softmax', 'Softmax', ([], {}), '()\n', (6957, 6959), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((7695, 7704), 'keras.layers.Softmax', 'Softmax', ([], {}), '()\n', (7702, 7704), False, 'from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Softmax\n'), ((13707, 13736), 'coder.Coder.get_move_as_numpy', 'Coder.get_move_as_numpy', (['i', 'j'], {}), '(i, j)\n', (13730, 13736), False, 'from coder import Coder\n'), ((14076, 14115), 'math.log', 'math.log', (['(normalized_bench_move * 0.175)'], {}), '(normalized_bench_move * 0.175)\n', (14084, 14115), False, 'import math\n'), ((14001, 14028), 'math.log', 'math.log', (['(pred_value * 0.65)'], {}), '(pred_value * 0.65)\n', (14009, 14028), False, 'import math\n'), ((14033, 14071), 'math.log', 'math.log', (['(normalized_heur_move * 0.175)'], {}), '(normalized_heur_move * 0.175)\n', (14041, 14071), False, 'import math\n'), ((9191, 9242), 'coder.Coder.get_numpy_array_from_board', 'Coder.get_numpy_array_from_board', (['board_logic.board'], {}), '(board_logic.board)\n', (9223, 9242), False, 'from coder import Coder\n'), ((9329, 9352), 'coder.Coder.decode_move', 'Coder.decode_move', (['move'], {}), '(move)\n', (9346, 9352), False, 'from coder import Coder\n'), ((9394, 9423), 'coder.Coder.get_move_as_numpy', 'Coder.get_move_as_numpy', (['i', 'j'], {}), '(i, j)\n', (9417, 9423), False, 'from coder import Coder\n'), ((9880, 9931), 'coder.Coder.get_numpy_array_from_board', 'Coder.get_numpy_array_from_board', (['board_logic.board'], {}), '(board_logic.board)\n', (9912, 9931), False, 'from coder import Coder\n'), ((10018, 10041), 'coder.Coder.decode_move', 'Coder.decode_move', (['move'], {}), '(move)\n', (10035, 10041), False, 'from coder import Coder\n'), ((10083, 10112), 'coder.Coder.get_move_as_numpy', 'Coder.get_move_as_numpy', (['i', 'j'], {}), '(i, j)\n', (10106, 10112), False, 'from coder import Coder\n'), ((11117, 11168), 'coder.Coder.get_numpy_array_from_board', 'Coder.get_numpy_array_from_board', (['board_logic.board'], {}), '(board_logic.board)\n', (11149, 11168), False, 'from coder import Coder\n'), ((11254, 11277), 'coder.Coder.decode_move', 'Coder.decode_move', (['move'], {}), '(move)\n', (11271, 11277), False, 'from coder import Coder\n'), ((11319, 11348), 'coder.Coder.get_move_as_numpy', 'Coder.get_move_as_numpy', (['i', 'j'], {}), '(i, j)\n', (11342, 11348), False, 'from coder import Coder\n'), ((11804, 11855), 'coder.Coder.get_numpy_array_from_board', 'Coder.get_numpy_array_from_board', (['board_logic.board'], {}), '(board_logic.board)\n', (11836, 11855), False, 'from coder import Coder\n'), ((11941, 11964), 'coder.Coder.decode_move', 'Coder.decode_move', (['move'], {}), '(move)\n', (11958, 11964), False, 'from coder import Coder\n'), ((12006, 12035), 'coder.Coder.get_move_as_numpy', 'Coder.get_move_as_numpy', (['i', 'j'], {}), '(i, j)\n', (12029, 12035), False, 'from coder import Coder\n')] |
#!/usr/bin/env python
"""demo_simulate_nyu_finger_double
Simple demo showing how the simulation setup works.
License: BSD 3-Clause License
Copyright (C) 2018-2021, New York University , Max Planck Gesellschaft
Copyright note valid unless otherwise stated in individual files.
All rights reserved.
"""
import time
from robot_properties_nyu_finger.wrapper import NYUFingerDoubleRobot
from bullet_utils.env import BulletEnv
import numpy as np
if __name__ == "__main__":
# ! Create a Pybullet simulation environment before any robots !
env = BulletEnv()
# Create a robot instance. This adds the robot to the simulator as well.
robot = NYUFingerDoubleRobot()
# Add the robot to the env to update the internal structure of the robot
# at every simulation steps.
env.add_robot(robot)
# Some control.
tau = np.zeros(robot.nv)
# Reset the robot to some initial state.
q0 = 0.5 * np.ones(robot.nq)
dq0 = np.zeros(robot.nv)
robot.reset_state(q0, dq0)
# Run the simulator for 2000 steps
for i in range(2000):
# TODO: Implement a controller here.
robot.send_joint_command(tau)
# Step the simulator.
env.step(
sleep=True
) # You can sleep here if you want to slow down the replay
# Read the final state and forces after the stepping.
q, dq = robot.get_state()
active_eff, forces = robot.get_force()
print("q", q)
print("dq", dq)
print("active eff", active_eff)
print("forces", forces)
| [
"numpy.ones",
"bullet_utils.env.BulletEnv",
"numpy.zeros",
"robot_properties_nyu_finger.wrapper.NYUFingerDoubleRobot"
] | [((552, 563), 'bullet_utils.env.BulletEnv', 'BulletEnv', ([], {}), '()\n', (561, 563), False, 'from bullet_utils.env import BulletEnv\n'), ((654, 676), 'robot_properties_nyu_finger.wrapper.NYUFingerDoubleRobot', 'NYUFingerDoubleRobot', ([], {}), '()\n', (674, 676), False, 'from robot_properties_nyu_finger.wrapper import NYUFingerDoubleRobot\n'), ((844, 862), 'numpy.zeros', 'np.zeros', (['robot.nv'], {}), '(robot.nv)\n', (852, 862), True, 'import numpy as np\n'), ((952, 970), 'numpy.zeros', 'np.zeros', (['robot.nv'], {}), '(robot.nv)\n', (960, 970), True, 'import numpy as np\n'), ((924, 941), 'numpy.ones', 'np.ones', (['robot.nq'], {}), '(robot.nq)\n', (931, 941), True, 'import numpy as np\n')] |
# usage: mpython lig2protDist.py t1_v178a
import mdtraj as md
import itertools
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sys
key=sys.argv[1]
print('loading in {} trajectory...'.format(key))
t=md.load(key+'.dcd',top=key+'.psf',stride=100)
print('assigning residue groups...')
top = t.topology
gbiAtoms = top.select("resname GBI1")
group0 = [top.atom(gbiAtoms[0]).residue.index]
### mdtraj starts reading residue 0 as the first one (Phe 88)
# so to get mdtraj values, take (protein resid)-88
group1 = list(range(11,38)) # 99-126
group2 = list(range(46,73)) # 134-161
group3 = list(range(80,104)) # 168-192
group4 = list(range(110,133)) # 198-221
print('calculating contacts from s1 helix...')
pairs1 = list(itertools.product(group0, group1))
dists1, inds1 = md.compute_contacts(t,pairs1) # inds1 is same as pairs1 here
print('calculating contacts from s2 helix...')
pairs2 = list(itertools.product(group0, group2))
dists2, inds2 = md.compute_contacts(t,pairs2)
print('calculating contacts from s3 helix...')
pairs3 = list(itertools.product(group0, group3))
dists3, inds3 = md.compute_contacts(t,pairs3)
print('calculating contacts from s4 helix...')
pairs4 = list(itertools.product(group0, group4))
dists4, inds4 = md.compute_contacts(t,pairs4)
### take relative to reference coordinates
print('doing the same with reference coordinates...')
u=md.load('t1_begin.pdb')
group0a = [u.topology.atom(u.topology.select("resname GBI1")[0]).residue.index]
# assign pairs
pairs1a = list(itertools.product(group0a, group1))
pairs2a = list(itertools.product(group0a, group2))
pairs3a = list(itertools.product(group0a, group3))
pairs4a = list(itertools.product(group0a, group4))
# compute distances
dists1a, inds1a = md.compute_contacts(u,pairs1a)
dists2a, inds2a = md.compute_contacts(u,pairs2a)
dists3a, inds3a = md.compute_contacts(u,pairs3a)
dists4a, inds4a = md.compute_contacts(u,pairs4a)
# take relative difference
rel1 = dists1-dists1a
rel2 = dists2-dists2a
rel3 = dists3-dists3a
rel4 = dists4-dists4a
print('plotting original distances...')
plt.clf()
fig = plt.figure(figsize=(12,24))
plt.subplot(4,1,1)
plt.imshow(dists1.T,cmap='jet_r',aspect='auto',interpolation='none', vmin=0.0, vmax=2.30)
plt.ylabel('residue in S1')
ax = plt.gca();
ax.set_yticks(np.arange(0, 27, 1));
ax.set_yticklabels(np.arange(99, 126, 1));
plt.subplot(4,1,2)
plt.imshow(dists2.T,cmap='jet_r',aspect='auto',interpolation='none', vmin=0.0, vmax=2.30)
plt.ylabel('residue in S2')
ax = plt.gca();
ax.set_yticks(np.arange(0, 27, 1));
ax.set_yticklabels(np.arange(134, 161, 1));
plt.subplot(4,1,3)
plt.imshow(dists3.T,cmap='jet_r',aspect='auto',interpolation='none', vmin=0.0, vmax=2.30)
plt.ylabel('residue in S3')
ax = plt.gca();
ax.set_yticks(np.arange(0, 24, 1));
ax.set_yticklabels(np.arange(168, 192, 1));
plt.subplot(4,1,4)
plt.imshow(dists4.T,cmap='jet_r',aspect='auto',interpolation='none', vmin=0.0, vmax=2.30)
plt.ylabel('residue in S4')
ax = plt.gca();
ax.set_yticks(np.arange(0, 23, 1));
ax.set_yticklabels(np.arange(198, 221, 1));
plt.xlabel('every 100th frame')
plt.xticks(np.arange(0, 50, 5))
colorbar = plt.colorbar(cax = fig.add_axes([0.95, 0.12, 0.03, 0.76])) # left bot wid height
plt.savefig(key+'_byChain.eps',bbox_inches='tight')
print('plotting relative distances...')
plt.clf()
fig = plt.figure(figsize=(12,24))
plt.subplot(4,1,1)
plt.imshow(rel1.T,cmap='seismic_r',aspect='auto',interpolation='none', vmin=-0.5, vmax=0.5)
plt.ylabel('residue in S1')
ax = plt.gca();
ax.set_yticks(np.arange(0, 27, 1));
ax.set_yticklabels(np.arange(99, 126, 1));
plt.subplot(4,1,2)
plt.imshow(rel2.T,cmap='seismic_r',aspect='auto',interpolation='none', vmin=-0.5, vmax=0.5)
plt.ylabel('residue in S2')
ax = plt.gca();
ax.set_yticks(np.arange(0, 27, 1));
ax.set_yticklabels(np.arange(134, 161, 1));
plt.subplot(4,1,3)
plt.imshow(rel3.T,cmap='seismic_r',aspect='auto',interpolation='none', vmin=-0.5, vmax=0.5)
plt.ylabel('residue in S3')
ax = plt.gca();
ax.set_yticks(np.arange(0, 24, 1));
ax.set_yticklabels(np.arange(168, 192, 1));
plt.subplot(4,1,4)
plt.imshow(rel4.T,cmap='seismic_r',aspect='auto',interpolation='none', vmin=-0.5, vmax=0.50)
plt.ylabel('residue in S4')
ax = plt.gca();
ax.set_yticks(np.arange(0, 23, 1));
ax.set_yticklabels(np.arange(198, 221, 1));
plt.xlabel('every 100th frame')
plt.xticks(np.arange(0, 50, 5))
colorbar = plt.colorbar(cax = fig.add_axes([0.95, 0.12, 0.03, 0.76])) # left bot wid height
plt.savefig(key+'_byChainRel.eps',bbox_inches='tight')
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"itertools.product",
"mdtraj.compute_contacts",
"matplotlib.pyplot.figure",
"mdtraj.load",
"matplotlib.pyplot.subplot",
"numpy.aran... | [((237, 288), 'mdtraj.load', 'md.load', (["(key + '.dcd')"], {'top': "(key + '.psf')", 'stride': '(100)'}), "(key + '.dcd', top=key + '.psf', stride=100)\n", (244, 288), True, 'import mdtraj as md\n'), ((805, 835), 'mdtraj.compute_contacts', 'md.compute_contacts', (['t', 'pairs1'], {}), '(t, pairs1)\n', (824, 835), True, 'import mdtraj as md\n'), ((978, 1008), 'mdtraj.compute_contacts', 'md.compute_contacts', (['t', 'pairs2'], {}), '(t, pairs2)\n', (997, 1008), True, 'import mdtraj as md\n'), ((1120, 1150), 'mdtraj.compute_contacts', 'md.compute_contacts', (['t', 'pairs3'], {}), '(t, pairs3)\n', (1139, 1150), True, 'import mdtraj as md\n'), ((1262, 1292), 'mdtraj.compute_contacts', 'md.compute_contacts', (['t', 'pairs4'], {}), '(t, pairs4)\n', (1281, 1292), True, 'import mdtraj as md\n'), ((1392, 1415), 'mdtraj.load', 'md.load', (['"""t1_begin.pdb"""'], {}), "('t1_begin.pdb')\n", (1399, 1415), True, 'import mdtraj as md\n'), ((1753, 1784), 'mdtraj.compute_contacts', 'md.compute_contacts', (['u', 'pairs1a'], {}), '(u, pairs1a)\n', (1772, 1784), True, 'import mdtraj as md\n'), ((1802, 1833), 'mdtraj.compute_contacts', 'md.compute_contacts', (['u', 'pairs2a'], {}), '(u, pairs2a)\n', (1821, 1833), True, 'import mdtraj as md\n'), ((1851, 1882), 'mdtraj.compute_contacts', 'md.compute_contacts', (['u', 'pairs3a'], {}), '(u, pairs3a)\n', (1870, 1882), True, 'import mdtraj as md\n'), ((1900, 1931), 'mdtraj.compute_contacts', 'md.compute_contacts', (['u', 'pairs4a'], {}), '(u, pairs4a)\n', (1919, 1931), True, 'import mdtraj as md\n'), ((2088, 2097), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2095, 2097), True, 'import matplotlib.pyplot as plt\n'), ((2104, 2132), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 24)'}), '(figsize=(12, 24))\n', (2114, 2132), True, 'import matplotlib.pyplot as plt\n'), ((2133, 2153), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(1)'], {}), '(4, 1, 1)\n', (2144, 2153), True, 'import matplotlib.pyplot as plt\n'), ((2152, 2247), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dists1.T'], {'cmap': '"""jet_r"""', 'aspect': '"""auto"""', 'interpolation': '"""none"""', 'vmin': '(0.0)', 'vmax': '(2.3)'}), "(dists1.T, cmap='jet_r', aspect='auto', interpolation='none',\n vmin=0.0, vmax=2.3)\n", (2162, 2247), True, 'import matplotlib.pyplot as plt\n'), ((2242, 2269), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""residue in S1"""'], {}), "('residue in S1')\n", (2252, 2269), True, 'import matplotlib.pyplot as plt\n'), ((2275, 2284), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2282, 2284), True, 'import matplotlib.pyplot as plt\n'), ((2366, 2386), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(2)'], {}), '(4, 1, 2)\n', (2377, 2386), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2480), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dists2.T'], {'cmap': '"""jet_r"""', 'aspect': '"""auto"""', 'interpolation': '"""none"""', 'vmin': '(0.0)', 'vmax': '(2.3)'}), "(dists2.T, cmap='jet_r', aspect='auto', interpolation='none',\n vmin=0.0, vmax=2.3)\n", (2395, 2480), True, 'import matplotlib.pyplot as plt\n'), ((2475, 2502), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""residue in S2"""'], {}), "('residue in S2')\n", (2485, 2502), True, 'import matplotlib.pyplot as plt\n'), ((2508, 2517), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2515, 2517), True, 'import matplotlib.pyplot as plt\n'), ((2600, 2620), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(3)'], {}), '(4, 1, 3)\n', (2611, 2620), True, 'import matplotlib.pyplot as plt\n'), ((2619, 2714), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dists3.T'], {'cmap': '"""jet_r"""', 'aspect': '"""auto"""', 'interpolation': '"""none"""', 'vmin': '(0.0)', 'vmax': '(2.3)'}), "(dists3.T, cmap='jet_r', aspect='auto', interpolation='none',\n vmin=0.0, vmax=2.3)\n", (2629, 2714), True, 'import matplotlib.pyplot as plt\n'), ((2709, 2736), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""residue in S3"""'], {}), "('residue in S3')\n", (2719, 2736), True, 'import matplotlib.pyplot as plt\n'), ((2742, 2751), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2749, 2751), True, 'import matplotlib.pyplot as plt\n'), ((2834, 2854), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(4)'], {}), '(4, 1, 4)\n', (2845, 2854), True, 'import matplotlib.pyplot as plt\n'), ((2853, 2948), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dists4.T'], {'cmap': '"""jet_r"""', 'aspect': '"""auto"""', 'interpolation': '"""none"""', 'vmin': '(0.0)', 'vmax': '(2.3)'}), "(dists4.T, cmap='jet_r', aspect='auto', interpolation='none',\n vmin=0.0, vmax=2.3)\n", (2863, 2948), True, 'import matplotlib.pyplot as plt\n'), ((2943, 2970), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""residue in S4"""'], {}), "('residue in S4')\n", (2953, 2970), True, 'import matplotlib.pyplot as plt\n'), ((2976, 2985), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2983, 2985), True, 'import matplotlib.pyplot as plt\n'), ((3068, 3099), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""every 100th frame"""'], {}), "('every 100th frame')\n", (3078, 3099), True, 'import matplotlib.pyplot as plt\n'), ((3224, 3278), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(key + '_byChain.eps')"], {'bbox_inches': '"""tight"""'}), "(key + '_byChain.eps', bbox_inches='tight')\n", (3235, 3278), True, 'import matplotlib.pyplot as plt\n'), ((3319, 3328), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3326, 3328), True, 'import matplotlib.pyplot as plt\n'), ((3335, 3363), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 24)'}), '(figsize=(12, 24))\n', (3345, 3363), True, 'import matplotlib.pyplot as plt\n'), ((3364, 3384), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(1)'], {}), '(4, 1, 1)\n', (3375, 3384), True, 'import matplotlib.pyplot as plt\n'), ((3383, 3481), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rel1.T'], {'cmap': '"""seismic_r"""', 'aspect': '"""auto"""', 'interpolation': '"""none"""', 'vmin': '(-0.5)', 'vmax': '(0.5)'}), "(rel1.T, cmap='seismic_r', aspect='auto', interpolation='none',\n vmin=-0.5, vmax=0.5)\n", (3393, 3481), True, 'import matplotlib.pyplot as plt\n'), ((3475, 3502), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""residue in S1"""'], {}), "('residue in S1')\n", (3485, 3502), True, 'import matplotlib.pyplot as plt\n'), ((3508, 3517), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3515, 3517), True, 'import matplotlib.pyplot as plt\n'), ((3599, 3619), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(2)'], {}), '(4, 1, 2)\n', (3610, 3619), True, 'import matplotlib.pyplot as plt\n'), ((3618, 3716), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rel2.T'], {'cmap': '"""seismic_r"""', 'aspect': '"""auto"""', 'interpolation': '"""none"""', 'vmin': '(-0.5)', 'vmax': '(0.5)'}), "(rel2.T, cmap='seismic_r', aspect='auto', interpolation='none',\n vmin=-0.5, vmax=0.5)\n", (3628, 3716), True, 'import matplotlib.pyplot as plt\n'), ((3710, 3737), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""residue in S2"""'], {}), "('residue in S2')\n", (3720, 3737), True, 'import matplotlib.pyplot as plt\n'), ((3743, 3752), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3750, 3752), True, 'import matplotlib.pyplot as plt\n'), ((3835, 3855), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(3)'], {}), '(4, 1, 3)\n', (3846, 3855), True, 'import matplotlib.pyplot as plt\n'), ((3854, 3952), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rel3.T'], {'cmap': '"""seismic_r"""', 'aspect': '"""auto"""', 'interpolation': '"""none"""', 'vmin': '(-0.5)', 'vmax': '(0.5)'}), "(rel3.T, cmap='seismic_r', aspect='auto', interpolation='none',\n vmin=-0.5, vmax=0.5)\n", (3864, 3952), True, 'import matplotlib.pyplot as plt\n'), ((3946, 3973), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""residue in S3"""'], {}), "('residue in S3')\n", (3956, 3973), True, 'import matplotlib.pyplot as plt\n'), ((3979, 3988), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3986, 3988), True, 'import matplotlib.pyplot as plt\n'), ((4071, 4091), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(1)', '(4)'], {}), '(4, 1, 4)\n', (4082, 4091), True, 'import matplotlib.pyplot as plt\n'), ((4090, 4188), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rel4.T'], {'cmap': '"""seismic_r"""', 'aspect': '"""auto"""', 'interpolation': '"""none"""', 'vmin': '(-0.5)', 'vmax': '(0.5)'}), "(rel4.T, cmap='seismic_r', aspect='auto', interpolation='none',\n vmin=-0.5, vmax=0.5)\n", (4100, 4188), True, 'import matplotlib.pyplot as plt\n'), ((4183, 4210), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""residue in S4"""'], {}), "('residue in S4')\n", (4193, 4210), True, 'import matplotlib.pyplot as plt\n'), ((4216, 4225), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4223, 4225), True, 'import matplotlib.pyplot as plt\n'), ((4308, 4339), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""every 100th frame"""'], {}), "('every 100th frame')\n", (4318, 4339), True, 'import matplotlib.pyplot as plt\n'), ((4464, 4521), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(key + '_byChainRel.eps')"], {'bbox_inches': '"""tight"""'}), "(key + '_byChainRel.eps', bbox_inches='tight')\n", (4475, 4521), True, 'import matplotlib.pyplot as plt\n'), ((754, 787), 'itertools.product', 'itertools.product', (['group0', 'group1'], {}), '(group0, group1)\n', (771, 787), False, 'import itertools\n'), ((927, 960), 'itertools.product', 'itertools.product', (['group0', 'group2'], {}), '(group0, group2)\n', (944, 960), False, 'import itertools\n'), ((1069, 1102), 'itertools.product', 'itertools.product', (['group0', 'group3'], {}), '(group0, group3)\n', (1086, 1102), False, 'import itertools\n'), ((1211, 1244), 'itertools.product', 'itertools.product', (['group0', 'group4'], {}), '(group0, group4)\n', (1228, 1244), False, 'import itertools\n'), ((1526, 1560), 'itertools.product', 'itertools.product', (['group0a', 'group1'], {}), '(group0a, group1)\n', (1543, 1560), False, 'import itertools\n'), ((1577, 1611), 'itertools.product', 'itertools.product', (['group0a', 'group2'], {}), '(group0a, group2)\n', (1594, 1611), False, 'import itertools\n'), ((1628, 1662), 'itertools.product', 'itertools.product', (['group0a', 'group3'], {}), '(group0a, group3)\n', (1645, 1662), False, 'import itertools\n'), ((1679, 1713), 'itertools.product', 'itertools.product', (['group0a', 'group4'], {}), '(group0a, group4)\n', (1696, 1713), False, 'import itertools\n'), ((2300, 2319), 'numpy.arange', 'np.arange', (['(0)', '(27)', '(1)'], {}), '(0, 27, 1)\n', (2309, 2319), True, 'import numpy as np\n'), ((2341, 2362), 'numpy.arange', 'np.arange', (['(99)', '(126)', '(1)'], {}), '(99, 126, 1)\n', (2350, 2362), True, 'import numpy as np\n'), ((2533, 2552), 'numpy.arange', 'np.arange', (['(0)', '(27)', '(1)'], {}), '(0, 27, 1)\n', (2542, 2552), True, 'import numpy as np\n'), ((2574, 2596), 'numpy.arange', 'np.arange', (['(134)', '(161)', '(1)'], {}), '(134, 161, 1)\n', (2583, 2596), True, 'import numpy as np\n'), ((2767, 2786), 'numpy.arange', 'np.arange', (['(0)', '(24)', '(1)'], {}), '(0, 24, 1)\n', (2776, 2786), True, 'import numpy as np\n'), ((2808, 2830), 'numpy.arange', 'np.arange', (['(168)', '(192)', '(1)'], {}), '(168, 192, 1)\n', (2817, 2830), True, 'import numpy as np\n'), ((3001, 3020), 'numpy.arange', 'np.arange', (['(0)', '(23)', '(1)'], {}), '(0, 23, 1)\n', (3010, 3020), True, 'import numpy as np\n'), ((3042, 3064), 'numpy.arange', 'np.arange', (['(198)', '(221)', '(1)'], {}), '(198, 221, 1)\n', (3051, 3064), True, 'import numpy as np\n'), ((3111, 3130), 'numpy.arange', 'np.arange', (['(0)', '(50)', '(5)'], {}), '(0, 50, 5)\n', (3120, 3130), True, 'import numpy as np\n'), ((3533, 3552), 'numpy.arange', 'np.arange', (['(0)', '(27)', '(1)'], {}), '(0, 27, 1)\n', (3542, 3552), True, 'import numpy as np\n'), ((3574, 3595), 'numpy.arange', 'np.arange', (['(99)', '(126)', '(1)'], {}), '(99, 126, 1)\n', (3583, 3595), True, 'import numpy as np\n'), ((3768, 3787), 'numpy.arange', 'np.arange', (['(0)', '(27)', '(1)'], {}), '(0, 27, 1)\n', (3777, 3787), True, 'import numpy as np\n'), ((3809, 3831), 'numpy.arange', 'np.arange', (['(134)', '(161)', '(1)'], {}), '(134, 161, 1)\n', (3818, 3831), True, 'import numpy as np\n'), ((4004, 4023), 'numpy.arange', 'np.arange', (['(0)', '(24)', '(1)'], {}), '(0, 24, 1)\n', (4013, 4023), True, 'import numpy as np\n'), ((4045, 4067), 'numpy.arange', 'np.arange', (['(168)', '(192)', '(1)'], {}), '(168, 192, 1)\n', (4054, 4067), True, 'import numpy as np\n'), ((4241, 4260), 'numpy.arange', 'np.arange', (['(0)', '(23)', '(1)'], {}), '(0, 23, 1)\n', (4250, 4260), True, 'import numpy as np\n'), ((4282, 4304), 'numpy.arange', 'np.arange', (['(198)', '(221)', '(1)'], {}), '(198, 221, 1)\n', (4291, 4304), True, 'import numpy as np\n'), ((4351, 4370), 'numpy.arange', 'np.arange', (['(0)', '(50)', '(5)'], {}), '(0, 50, 5)\n', (4360, 4370), True, 'import numpy as np\n')] |
#!/usr/bin/env pytest
import numpy as np
import pytest
import siglib as sl
@pytest.mark.parametrize(
"x,frame_length,frame_step,pad,pad_value,expected",
(
(np.arange(10), 5, 5, True, 0j, np.arange(10, dtype=np.complex).reshape(2, 5)),
(np.arange(10), 5, 5, False, 0j, np.arange(10, dtype=np.complex).reshape(2, 5)),
),
)
def test_frame(x, frame_length, frame_step, pad, pad_value, expected):
result = sl.frame(x, frame_length, frame_step, pad=pad, pad_value=pad_value)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize(
"x,ntaps,expected",
(
(np.zeros(10), 5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
(np.arange(10), 5, [4, 4, 4, 4, 4, 5, 6, 7, 8, 9]),
),
)
def test_closing(x, ntaps, expected):
result = sl.closing(x, ntaps)
expected = np.array(expected, dtype=np.complex)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize(
"x,ntaps,expected",
(
(np.zeros(10), 5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
(np.arange(10), 5, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
),
)
def test_opening(x, ntaps, expected):
result = sl.opening(x, ntaps)
expected = np.array(expected, dtype=np.complex)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize(
"x,idx,ntaps,expected",
((np.arange(10 ** 2), np.array([45.567]), 5, [44.96565413]),),
)
def test_resample(x, idx, ntaps, expected):
result = sl.resample(x, idx, ntaps)
np.testing.assert_allclose(result, expected, rtol=1e-9)
@pytest.mark.parametrize(
"x,delay,pad,pad_value,expected",
(
(
[1 + 3j, 4 + 2j, 5 + 6j, 1 + 0j],
1,
True,
1 + 0j,
[10 - 10j, 32 + 14j, 5.0 - 6j, 1.0 + 0j],
),
(
[1 + 3j, 4 + 2j, 5 + 6j, 1 + 0j],
1,
False,
1 + 0j,
[10 - 10j, 32 + 14j, 5.0 - 6j],
),
),
)
def test_dcm(x, delay, pad, pad_value, expected):
x = np.array(x)
result = sl.dcm(x, delay, pad=pad, pad_value=pad_value)
expected = np.array(expected)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize(
"x,H,step,expected",
(
(
[
0.0 + 3.0j,
-4.0 + 2.0j,
-4.0 + 1.0j,
-5.0 + 2.0j,
-4.0 - 3.0j,
2.0 + 1.0j,
-1.0 - 2.0j,
3.0 + 3.0j,
-3.0 - 1.0j,
-3.0 + 2.0j,
-4.0 + 0.0j,
-4.0 - 4.0j,
-3.0 - 4.0j,
-4.0 - 3.0j,
-4.0 + 3.0j,
-3.0 + 4.0j,
-4.0 - 1.0j,
-5.0 + 0.0j,
4.0 + 2.0j,
2.0 - 3.0j,
],
[
-8.0 + 7.0j,
-4.70710678 + 8.94974747j,
-1.0 + 8.0j,
0.94974747 + 4.70710678j,
0.0 + 1.0j,
-3.29289322 - 0.94974747j,
-7.0 + 0.0j,
-8.94974747 + 3.29289322j,
],
7,
[
[
-12.0 - 12.0j,
-1.0 - 36.0j,
22.0 - 40.0j,
25.0 - 44.0j,
42.0 - 27.0j,
13.0 + 4.0j,
1.0 + 6.0j,
-14.0 + 5.0j,
-5.0 - 11.0j,
19.0 - 25.0j,
22.0 - 33.0j,
48.0 - 12.0j,
56.0 + 8.0j,
52.0 + 3.0j,
29.0 - 28.0j,
3.0 - 52.0j,
20.0 - 37.0j,
39.0 - 28.0j,
-4.0 - 7.0j,
-18.0 + 24.0j,
1.0 + 18.0j,
]
],
),
(
[
0.0 - 5.0j,
2.0 - 4.0j,
4.0 - 1.0j,
-4.0 - 2.0j,
2.0 + 0.0j,
2.0 + 0.0j,
0.0 - 5.0j,
0.0 - 1.0j,
-2.0 + 1.0j,
-4.0 - 2.0j,
0.0 + 2.0j,
-5.0 - 5.0j,
-5.0 - 1.0j,
-2.0 + 1.0j,
0.0 - 1.0j,
3.0 + 4.0j,
0.0 - 2.0j,
1.0 + 0.0j,
-1.0 - 2.0j,
3.0 + 3.0j,
],
[
-6.0 + 6.0j,
-3.29289322 + 6.53553391j,
-1.0 + 5.0j,
-0.46446609 + 2.29289322j,
-2.0 + 0.0j,
-4.70710678 - 0.53553391j,
-7.0 + 1.0j,
-7.53553391 + 3.70710678j,
],
7,
[
[
15.0 + 20.0j,
19.0 + 32.0j,
-5.0 + 30.0j,
17.0 + 10.0j,
6.0 - 2.0j,
-12.0 + 12.0j,
11.0 + 26.0j,
18.0 + 14.0j,
8.0 - 8.0j,
23.0 - 12.0j,
8.0 - 16.0j,
29.0 + 1.0j,
48.0 - 16.0j,
18.0 - 23.0j,
4.0 - 4.0j,
-21.0 - 5.0j,
-12.0 + 9.0j,
2.0 + 7.0j,
8.0 + 8.0j,
-13.0 - 2.0j,
-15.0 + 3.0j,
]
],
),
(
[
-2.0 - 4.0j,
4.0 - 4.0j,
-1.0 - 1.0j,
-5.0 - 1.0j,
4.0 - 4.0j,
1.0 - 4.0j,
-5.0 + 3.0j,
4.0 + 3.0j,
3.0 - 3.0j,
2.0 - 4.0j,
4.0 - 5.0j,
0.0 - 4.0j,
2.0 + 1.0j,
2.0 - 4.0j,
4.0 + 1.0j,
-5.0 - 3.0j,
2.0 + 3.0j,
4.0 + 4.0j,
1.0 + 4.0j,
-1.0 - 3.0j,
],
[
[
1.0 + 2.0j,
2.24264069 - 1.0j,
1.0 - 4.0j,
-2.0 - 5.24264069j,
-5.0 - 4.0j,
-6.24264069 - 1.0j,
-5.0 + 2.0j,
-2.0 + 3.24264069j,
],
[
-4.0 + 6.0j,
-0.29289322 + 6.94974747j,
3.0 + 5.0j,
3.94974747 + 1.29289322j,
2.0 - 2.0j,
-1.70710678 - 2.94974747j,
-5.0 - 1.0j,
-5.94974747 + 2.70710678j,
],
],
7,
[
[
3.55271368e-15 + 1.0000000e01j,
-6.00000000e00 - 1.4000000e01j,
2.50000000e01 + 3.0000000e00j,
9.00000000e00 + 1.0000000e00j,
-2.40000000e01 - 1.4000000e01j,
1.80000000e01 + 7.0000000e00j,
2.80000000e01 - 1.0000000e01j,
-2.90000000e01 - 1.6000000e01j,
-6.00000000e00 + 2.4000000e01j,
1.00000000e01 + 6.0000000e00j,
5.00000000e00 + 0.0000000e00j,
2.30000000e01 + 5.0000000e00j,
9.00000000e00 - 1.6000000e01j,
-5.00000000e00 + 1.5000000e01j,
1.10000000e01 - 1.2000000e01j,
1.60000000e01 + 2.6000000e01j,
-7.00000000e00 - 3.2000000e01j,
-7.00000000e00 + 3.0000000e00j,
2.00000000e00 + 1.5000000e01j,
-1.00000000e01 + 2.2000000e01j,
6.00000000e00 - 1.2000000e01j,
],
[
1.00000000e01 - 4.4408921e-16j,
2.60000000e01 + 1.6000000e01j,
7.00000000e00 + 2.7000000e01j,
1.40000000e01 - 1.0000000e01j,
2.30000000e01 - 5.0000000e00j,
1.10000000e01 + 3.4000000e01j,
1.20000000e01 + 3.0000000e00j,
-7.00000000e00 - 2.4000000e01j,
-2.10000000e01 + 1.6000000e01j,
9.00000000e00 + 2.9000000e01j,
1.60000000e01 + 3.3000000e01j,
1.60000000e01 + 3.5000000e01j,
1.20000000e01 + 1.5000000e01j,
-4.00000000e00 + 1.3000000e01j,
4.00000000e00 + 2.7000000e01j,
-5.00000000e00 + 6.0000000e00j,
1.90000000e01 - 1.0000000e01j,
-3.00000000e01 + 3.0000000e00j,
-3.70000000e01 + 2.0000000e00j,
-1.20000000e01 - 7.0000000e00j,
1.50000000e01 + 5.0000000e00j,
],
],
),
),
)
def test_overlapsave(x, H, step, expected):
result = sl.overlapsave(np.array(x), np.array(H), step)
expected = np.array(expected)
np.testing.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
"window_length,expected",
(
(4, [0.08, 0.77, 0.77, 0.08],),
(5, [0.08, 0.54, 1.0, 0.54, 0.08],),
(
99,
[
0.08,
0.08094512,
0.08377659,
0.08848279,
0.09504436,
0.10343436,
0.11361829,
0.12555432,
0.1391934,
0.15447947,
0.17134973,
0.18973486,
0.2095593,
0.23074159,
0.25319469,
0.27682634,
0.30153942,
0.32723239,
0.35379966,
0.38113207,
0.40911731,
0.43764037,
0.46658405,
0.49582941,
0.52525627,
0.55474373,
0.58417059,
0.61341595,
0.64235963,
0.67088269,
0.69886793,
0.72620034,
0.75276761,
0.77846058,
0.80317366,
0.82680531,
0.84925841,
0.8704407,
0.89026514,
0.90865027,
0.92552053,
0.9408066,
0.95444568,
0.96638171,
0.97656564,
0.98495564,
0.99151721,
0.99622341,
0.99905488,
1.0,
0.99905488,
0.99622341,
0.99151721,
0.98495564,
0.97656564,
0.96638171,
0.95444568,
0.9408066,
0.92552053,
0.90865027,
0.89026514,
0.8704407,
0.84925841,
0.82680531,
0.80317366,
0.77846058,
0.75276761,
0.72620034,
0.69886793,
0.67088269,
0.64235963,
0.61341595,
0.58417059,
0.55474373,
0.52525627,
0.49582941,
0.46658405,
0.43764037,
0.40911731,
0.38113207,
0.35379966,
0.32723239,
0.30153942,
0.27682634,
0.25319469,
0.23074159,
0.2095593,
0.18973486,
0.17134973,
0.15447947,
0.1391934,
0.12555432,
0.11361829,
0.10343436,
0.09504436,
0.08848279,
0.08377659,
0.08094512,
0.08,
],
),
),
)
def test_hamming(window_length, expected):
result = sl.hamming(window_length)
expected = np.array(expected)
np.testing.assert_almost_equal(result, expected)
| [
"siglib.opening",
"numpy.testing.assert_equal",
"siglib.dcm",
"numpy.arange",
"siglib.closing",
"numpy.testing.assert_allclose",
"pytest.mark.parametrize",
"numpy.array",
"numpy.testing.assert_almost_equal",
"siglib.resample",
"numpy.zeros",
"siglib.frame",
"siglib.hamming"
] | [((1532, 1820), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x,delay,pad,pad_value,expected"""', '(([1 + 3.0j, 4 + 2.0j, 5 + 6.0j, 1 + 0.0j], 1, True, 1 + 0.0j, [10 - 10.0j,\n 32 + 14.0j, 5.0 - 6.0j, 1.0 + 0.0j]), ([1 + 3.0j, 4 + 2.0j, 5 + 6.0j, 1 +\n 0.0j], 1, False, 1 + 0.0j, [10 - 10.0j, 32 + 14.0j, 5.0 - 6.0j]))'], {}), "('x,delay,pad,pad_value,expected', (([1 + 3.0j, 4 + \n 2.0j, 5 + 6.0j, 1 + 0.0j], 1, True, 1 + 0.0j, [10 - 10.0j, 32 + 14.0j, \n 5.0 - 6.0j, 1.0 + 0.0j]), ([1 + 3.0j, 4 + 2.0j, 5 + 6.0j, 1 + 0.0j], 1,\n False, 1 + 0.0j, [10 - 10.0j, 32 + 14.0j, 5.0 - 6.0j])))\n", (1555, 1820), False, 'import pytest\n'), ((2162, 4937), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""x,H,step,expected"""', '(([0.0 + 3.0j, -4.0 + 2.0j, -4.0 + 1.0j, -5.0 + 2.0j, -4.0 - 3.0j, 2.0 + \n 1.0j, -1.0 - 2.0j, 3.0 + 3.0j, -3.0 - 1.0j, -3.0 + 2.0j, -4.0 + 0.0j, -\n 4.0 - 4.0j, -3.0 - 4.0j, -4.0 - 3.0j, -4.0 + 3.0j, -3.0 + 4.0j, -4.0 - \n 1.0j, -5.0 + 0.0j, 4.0 + 2.0j, 2.0 - 3.0j], [-8.0 + 7.0j, -4.70710678 +\n 8.94974747j, -1.0 + 8.0j, 0.94974747 + 4.70710678j, 0.0 + 1.0j, -\n 3.29289322 - 0.94974747j, -7.0 + 0.0j, -8.94974747 + 3.29289322j], 7, [\n [-12.0 - 12.0j, -1.0 - 36.0j, 22.0 - 40.0j, 25.0 - 44.0j, 42.0 - 27.0j,\n 13.0 + 4.0j, 1.0 + 6.0j, -14.0 + 5.0j, -5.0 - 11.0j, 19.0 - 25.0j, 22.0 -\n 33.0j, 48.0 - 12.0j, 56.0 + 8.0j, 52.0 + 3.0j, 29.0 - 28.0j, 3.0 - \n 52.0j, 20.0 - 37.0j, 39.0 - 28.0j, -4.0 - 7.0j, -18.0 + 24.0j, 1.0 + \n 18.0j]]), ([0.0 - 5.0j, 2.0 - 4.0j, 4.0 - 1.0j, -4.0 - 2.0j, 2.0 + 0.0j,\n 2.0 + 0.0j, 0.0 - 5.0j, 0.0 - 1.0j, -2.0 + 1.0j, -4.0 - 2.0j, 0.0 + \n 2.0j, -5.0 - 5.0j, -5.0 - 1.0j, -2.0 + 1.0j, 0.0 - 1.0j, 3.0 + 4.0j, \n 0.0 - 2.0j, 1.0 + 0.0j, -1.0 - 2.0j, 3.0 + 3.0j], [-6.0 + 6.0j, -\n 3.29289322 + 6.53553391j, -1.0 + 5.0j, -0.46446609 + 2.29289322j, -2.0 +\n 0.0j, -4.70710678 - 0.53553391j, -7.0 + 1.0j, -7.53553391 + 3.70710678j\n ], 7, [[15.0 + 20.0j, 19.0 + 32.0j, -5.0 + 30.0j, 17.0 + 10.0j, 6.0 - \n 2.0j, -12.0 + 12.0j, 11.0 + 26.0j, 18.0 + 14.0j, 8.0 - 8.0j, 23.0 - \n 12.0j, 8.0 - 16.0j, 29.0 + 1.0j, 48.0 - 16.0j, 18.0 - 23.0j, 4.0 - 4.0j,\n -21.0 - 5.0j, -12.0 + 9.0j, 2.0 + 7.0j, 8.0 + 8.0j, -13.0 - 2.0j, -15.0 +\n 3.0j]]), ([-2.0 - 4.0j, 4.0 - 4.0j, -1.0 - 1.0j, -5.0 - 1.0j, 4.0 - \n 4.0j, 1.0 - 4.0j, -5.0 + 3.0j, 4.0 + 3.0j, 3.0 - 3.0j, 2.0 - 4.0j, 4.0 -\n 5.0j, 0.0 - 4.0j, 2.0 + 1.0j, 2.0 - 4.0j, 4.0 + 1.0j, -5.0 - 3.0j, 2.0 +\n 3.0j, 4.0 + 4.0j, 1.0 + 4.0j, -1.0 - 3.0j], [[1.0 + 2.0j, 2.24264069 - \n 1.0j, 1.0 - 4.0j, -2.0 - 5.24264069j, -5.0 - 4.0j, -6.24264069 - 1.0j, \n -5.0 + 2.0j, -2.0 + 3.24264069j], [-4.0 + 6.0j, -0.29289322 + \n 6.94974747j, 3.0 + 5.0j, 3.94974747 + 1.29289322j, 2.0 - 2.0j, -\n 1.70710678 - 2.94974747j, -5.0 - 1.0j, -5.94974747 + 2.70710678j]], 7,\n [[3.55271368e-15 + 10.0j, -6.0 - 14.0j, 25.0 + 3.0j, 9.0 + 1.0j, -24.0 -\n 14.0j, 18.0 + 7.0j, 28.0 - 10.0j, -29.0 - 16.0j, -6.0 + 24.0j, 10.0 + \n 6.0j, 5.0 + 0.0j, 23.0 + 5.0j, 9.0 - 16.0j, -5.0 + 15.0j, 11.0 - 12.0j,\n 16.0 + 26.0j, -7.0 - 32.0j, -7.0 + 3.0j, 2.0 + 15.0j, -10.0 + 22.0j, \n 6.0 - 12.0j], [10.0 - 4.4408921e-16j, 26.0 + 16.0j, 7.0 + 27.0j, 14.0 -\n 10.0j, 23.0 - 5.0j, 11.0 + 34.0j, 12.0 + 3.0j, -7.0 - 24.0j, -21.0 + \n 16.0j, 9.0 + 29.0j, 16.0 + 33.0j, 16.0 + 35.0j, 12.0 + 15.0j, -4.0 + \n 13.0j, 4.0 + 27.0j, -5.0 + 6.0j, 19.0 - 10.0j, -30.0 + 3.0j, -37.0 + \n 2.0j, -12.0 - 7.0j, 15.0 + 5.0j]]))'], {}), "('x,H,step,expected', (([0.0 + 3.0j, -4.0 + 2.0j, -\n 4.0 + 1.0j, -5.0 + 2.0j, -4.0 - 3.0j, 2.0 + 1.0j, -1.0 - 2.0j, 3.0 + \n 3.0j, -3.0 - 1.0j, -3.0 + 2.0j, -4.0 + 0.0j, -4.0 - 4.0j, -3.0 - 4.0j, \n -4.0 - 3.0j, -4.0 + 3.0j, -3.0 + 4.0j, -4.0 - 1.0j, -5.0 + 0.0j, 4.0 + \n 2.0j, 2.0 - 3.0j], [-8.0 + 7.0j, -4.70710678 + 8.94974747j, -1.0 + 8.0j,\n 0.94974747 + 4.70710678j, 0.0 + 1.0j, -3.29289322 - 0.94974747j, -7.0 +\n 0.0j, -8.94974747 + 3.29289322j], 7, [[-12.0 - 12.0j, -1.0 - 36.0j, \n 22.0 - 40.0j, 25.0 - 44.0j, 42.0 - 27.0j, 13.0 + 4.0j, 1.0 + 6.0j, -\n 14.0 + 5.0j, -5.0 - 11.0j, 19.0 - 25.0j, 22.0 - 33.0j, 48.0 - 12.0j, \n 56.0 + 8.0j, 52.0 + 3.0j, 29.0 - 28.0j, 3.0 - 52.0j, 20.0 - 37.0j, 39.0 -\n 28.0j, -4.0 - 7.0j, -18.0 + 24.0j, 1.0 + 18.0j]]), ([0.0 - 5.0j, 2.0 - \n 4.0j, 4.0 - 1.0j, -4.0 - 2.0j, 2.0 + 0.0j, 2.0 + 0.0j, 0.0 - 5.0j, 0.0 -\n 1.0j, -2.0 + 1.0j, -4.0 - 2.0j, 0.0 + 2.0j, -5.0 - 5.0j, -5.0 - 1.0j, -\n 2.0 + 1.0j, 0.0 - 1.0j, 3.0 + 4.0j, 0.0 - 2.0j, 1.0 + 0.0j, -1.0 - 2.0j,\n 3.0 + 3.0j], [-6.0 + 6.0j, -3.29289322 + 6.53553391j, -1.0 + 5.0j, -\n 0.46446609 + 2.29289322j, -2.0 + 0.0j, -4.70710678 - 0.53553391j, -7.0 +\n 1.0j, -7.53553391 + 3.70710678j], 7, [[15.0 + 20.0j, 19.0 + 32.0j, -5.0 +\n 30.0j, 17.0 + 10.0j, 6.0 - 2.0j, -12.0 + 12.0j, 11.0 + 26.0j, 18.0 + \n 14.0j, 8.0 - 8.0j, 23.0 - 12.0j, 8.0 - 16.0j, 29.0 + 1.0j, 48.0 - 16.0j,\n 18.0 - 23.0j, 4.0 - 4.0j, -21.0 - 5.0j, -12.0 + 9.0j, 2.0 + 7.0j, 8.0 +\n 8.0j, -13.0 - 2.0j, -15.0 + 3.0j]]), ([-2.0 - 4.0j, 4.0 - 4.0j, -1.0 - \n 1.0j, -5.0 - 1.0j, 4.0 - 4.0j, 1.0 - 4.0j, -5.0 + 3.0j, 4.0 + 3.0j, 3.0 -\n 3.0j, 2.0 - 4.0j, 4.0 - 5.0j, 0.0 - 4.0j, 2.0 + 1.0j, 2.0 - 4.0j, 4.0 +\n 1.0j, -5.0 - 3.0j, 2.0 + 3.0j, 4.0 + 4.0j, 1.0 + 4.0j, -1.0 - 3.0j], [[\n 1.0 + 2.0j, 2.24264069 - 1.0j, 1.0 - 4.0j, -2.0 - 5.24264069j, -5.0 - \n 4.0j, -6.24264069 - 1.0j, -5.0 + 2.0j, -2.0 + 3.24264069j], [-4.0 + \n 6.0j, -0.29289322 + 6.94974747j, 3.0 + 5.0j, 3.94974747 + 1.29289322j, \n 2.0 - 2.0j, -1.70710678 - 2.94974747j, -5.0 - 1.0j, -5.94974747 + \n 2.70710678j]], 7, [[3.55271368e-15 + 10.0j, -6.0 - 14.0j, 25.0 + 3.0j, \n 9.0 + 1.0j, -24.0 - 14.0j, 18.0 + 7.0j, 28.0 - 10.0j, -29.0 - 16.0j, -\n 6.0 + 24.0j, 10.0 + 6.0j, 5.0 + 0.0j, 23.0 + 5.0j, 9.0 - 16.0j, -5.0 + \n 15.0j, 11.0 - 12.0j, 16.0 + 26.0j, -7.0 - 32.0j, -7.0 + 3.0j, 2.0 + \n 15.0j, -10.0 + 22.0j, 6.0 - 12.0j], [10.0 - 4.4408921e-16j, 26.0 + \n 16.0j, 7.0 + 27.0j, 14.0 - 10.0j, 23.0 - 5.0j, 11.0 + 34.0j, 12.0 + \n 3.0j, -7.0 - 24.0j, -21.0 + 16.0j, 9.0 + 29.0j, 16.0 + 33.0j, 16.0 + \n 35.0j, 12.0 + 15.0j, -4.0 + 13.0j, 4.0 + 27.0j, -5.0 + 6.0j, 19.0 - \n 10.0j, -30.0 + 3.0j, -37.0 + 2.0j, -12.0 - 7.0j, 15.0 + 5.0j]])))\n", (2185, 4937), False, 'import pytest\n'), ((9468, 10835), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""window_length,expected"""', '((4, [0.08, 0.77, 0.77, 0.08]), (5, [0.08, 0.54, 1.0, 0.54, 0.08]), (99, [\n 0.08, 0.08094512, 0.08377659, 0.08848279, 0.09504436, 0.10343436, \n 0.11361829, 0.12555432, 0.1391934, 0.15447947, 0.17134973, 0.18973486, \n 0.2095593, 0.23074159, 0.25319469, 0.27682634, 0.30153942, 0.32723239, \n 0.35379966, 0.38113207, 0.40911731, 0.43764037, 0.46658405, 0.49582941,\n 0.52525627, 0.55474373, 0.58417059, 0.61341595, 0.64235963, 0.67088269,\n 0.69886793, 0.72620034, 0.75276761, 0.77846058, 0.80317366, 0.82680531,\n 0.84925841, 0.8704407, 0.89026514, 0.90865027, 0.92552053, 0.9408066, \n 0.95444568, 0.96638171, 0.97656564, 0.98495564, 0.99151721, 0.99622341,\n 0.99905488, 1.0, 0.99905488, 0.99622341, 0.99151721, 0.98495564, \n 0.97656564, 0.96638171, 0.95444568, 0.9408066, 0.92552053, 0.90865027, \n 0.89026514, 0.8704407, 0.84925841, 0.82680531, 0.80317366, 0.77846058, \n 0.75276761, 0.72620034, 0.69886793, 0.67088269, 0.64235963, 0.61341595,\n 0.58417059, 0.55474373, 0.52525627, 0.49582941, 0.46658405, 0.43764037,\n 0.40911731, 0.38113207, 0.35379966, 0.32723239, 0.30153942, 0.27682634,\n 0.25319469, 0.23074159, 0.2095593, 0.18973486, 0.17134973, 0.15447947, \n 0.1391934, 0.12555432, 0.11361829, 0.10343436, 0.09504436, 0.08848279, \n 0.08377659, 0.08094512, 0.08]))'], {}), "('window_length,expected', ((4, [0.08, 0.77, 0.77, \n 0.08]), (5, [0.08, 0.54, 1.0, 0.54, 0.08]), (99, [0.08, 0.08094512, \n 0.08377659, 0.08848279, 0.09504436, 0.10343436, 0.11361829, 0.12555432,\n 0.1391934, 0.15447947, 0.17134973, 0.18973486, 0.2095593, 0.23074159, \n 0.25319469, 0.27682634, 0.30153942, 0.32723239, 0.35379966, 0.38113207,\n 0.40911731, 0.43764037, 0.46658405, 0.49582941, 0.52525627, 0.55474373,\n 0.58417059, 0.61341595, 0.64235963, 0.67088269, 0.69886793, 0.72620034,\n 0.75276761, 0.77846058, 0.80317366, 0.82680531, 0.84925841, 0.8704407, \n 0.89026514, 0.90865027, 0.92552053, 0.9408066, 0.95444568, 0.96638171, \n 0.97656564, 0.98495564, 0.99151721, 0.99622341, 0.99905488, 1.0, \n 0.99905488, 0.99622341, 0.99151721, 0.98495564, 0.97656564, 0.96638171,\n 0.95444568, 0.9408066, 0.92552053, 0.90865027, 0.89026514, 0.8704407, \n 0.84925841, 0.82680531, 0.80317366, 0.77846058, 0.75276761, 0.72620034,\n 0.69886793, 0.67088269, 0.64235963, 0.61341595, 0.58417059, 0.55474373,\n 0.52525627, 0.49582941, 0.46658405, 0.43764037, 0.40911731, 0.38113207,\n 0.35379966, 0.32723239, 0.30153942, 0.27682634, 0.25319469, 0.23074159,\n 0.2095593, 0.18973486, 0.17134973, 0.15447947, 0.1391934, 0.12555432, \n 0.11361829, 0.10343436, 0.09504436, 0.08848279, 0.08377659, 0.08094512,\n 0.08])))\n", (9491, 10835), False, 'import pytest\n'), ((434, 501), 'siglib.frame', 'sl.frame', (['x', 'frame_length', 'frame_step'], {'pad': 'pad', 'pad_value': 'pad_value'}), '(x, frame_length, frame_step, pad=pad, pad_value=pad_value)\n', (442, 501), True, 'import siglib as sl\n'), ((506, 547), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (529, 547), True, 'import numpy as np\n'), ((785, 805), 'siglib.closing', 'sl.closing', (['x', 'ntaps'], {}), '(x, ntaps)\n', (795, 805), True, 'import siglib as sl\n'), ((821, 857), 'numpy.array', 'np.array', (['expected'], {'dtype': 'np.complex'}), '(expected, dtype=np.complex)\n', (829, 857), True, 'import numpy as np\n'), ((862, 903), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (885, 903), True, 'import numpy as np\n'), ((1141, 1161), 'siglib.opening', 'sl.opening', (['x', 'ntaps'], {}), '(x, ntaps)\n', (1151, 1161), True, 'import siglib as sl\n'), ((1177, 1213), 'numpy.array', 'np.array', (['expected'], {'dtype': 'np.complex'}), '(expected, dtype=np.complex)\n', (1185, 1213), True, 'import numpy as np\n'), ((1218, 1259), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (1241, 1259), True, 'import numpy as np\n'), ((1442, 1468), 'siglib.resample', 'sl.resample', (['x', 'idx', 'ntaps'], {}), '(x, idx, ntaps)\n', (1453, 1468), True, 'import siglib as sl\n'), ((1473, 1529), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['result', 'expected'], {'rtol': '(1e-09)'}), '(result, expected, rtol=1e-09)\n', (1499, 1529), True, 'import numpy as np\n'), ((2007, 2018), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2015, 2018), True, 'import numpy as np\n'), ((2032, 2078), 'siglib.dcm', 'sl.dcm', (['x', 'delay'], {'pad': 'pad', 'pad_value': 'pad_value'}), '(x, delay, pad=pad, pad_value=pad_value)\n', (2038, 2078), True, 'import siglib as sl\n'), ((2094, 2112), 'numpy.array', 'np.array', (['expected'], {}), '(expected)\n', (2102, 2112), True, 'import numpy as np\n'), ((2117, 2158), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (2140, 2158), True, 'import numpy as np\n'), ((9393, 9411), 'numpy.array', 'np.array', (['expected'], {}), '(expected)\n', (9401, 9411), True, 'import numpy as np\n'), ((9416, 9464), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (9446, 9464), True, 'import numpy as np\n'), ((12490, 12515), 'siglib.hamming', 'sl.hamming', (['window_length'], {}), '(window_length)\n', (12500, 12515), True, 'import siglib as sl\n'), ((12531, 12549), 'numpy.array', 'np.array', (['expected'], {}), '(expected)\n', (12539, 12549), True, 'import numpy as np\n'), ((12554, 12602), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (12584, 12602), True, 'import numpy as np\n'), ((9346, 9357), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (9354, 9357), True, 'import numpy as np\n'), ((9359, 9370), 'numpy.array', 'np.array', (['H'], {}), '(H)\n', (9367, 9370), True, 'import numpy as np\n'), ((173, 186), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (182, 186), True, 'import numpy as np\n'), ((261, 274), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (270, 274), True, 'import numpy as np\n'), ((615, 627), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (623, 627), True, 'import numpy as np\n'), ((674, 687), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (683, 687), True, 'import numpy as np\n'), ((971, 983), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (979, 983), True, 'import numpy as np\n'), ((1030, 1043), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1039, 1043), True, 'import numpy as np\n'), ((1322, 1340), 'numpy.arange', 'np.arange', (['(10 ** 2)'], {}), '(10 ** 2)\n', (1331, 1340), True, 'import numpy as np\n'), ((1342, 1360), 'numpy.array', 'np.array', (['[45.567]'], {}), '([45.567])\n', (1350, 1360), True, 'import numpy as np\n'), ((204, 235), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'np.complex'}), '(10, dtype=np.complex)\n', (213, 235), True, 'import numpy as np\n'), ((293, 324), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'np.complex'}), '(10, dtype=np.complex)\n', (302, 324), True, 'import numpy as np\n')] |
# This example implements macroscopic homogenized model of Biot-Darcy-Brinkman model of flow in deformable
# double porous media.
# The mathematical model is described in:
#
#<NAME>., <NAME>., <NAME>.
#The Biot-Darcy-Brinkman model of flow in deformable double porous media; homogenization and numerical modelling.
# Computers and Mathematics with applications, 78(9):3044-3066, 2019,
# https://doi.org/10.1016/j.camwa.2019.04.004
#
# Run simulation:
#
# ./simple.py example_perfusion_BDB/perf_BDB_mac.py
#
# The results are stored in `example_perfusion_BDB/results/macro` directory.
#
import numpy as nm
from sfepy.homogenization.micmac import get_homog_coefs_linear
from sfepy.homogenization.utils import define_box_regions
from sfepy.discrete.fem.mesh import Mesh
import os.path as osp
material_cache = {}
data_dir = 'example_perfusion_BDB'
def coefs2qp(coefs, nqp):
out = {}
for k, v in coefs.items():
if type(v) not in [nm.ndarray, float]:
continue
if type(v) is nm.ndarray:
if len(v.shape) >= 3:
out[k] = v
out[k] = nm.tile(v, (nqp, 1, 1))
return out
# Get raw homogenized coefficients, recalculate them if necessary
def get_raw_coefs(problem):
if 'raw_coefs' not in material_cache:
micro_filename = material_cache['meso_filename']
coefs_filename = 'coefs_meso'
coefs_filename = osp.join(problem.conf.options.get('output_dir', '.'),
coefs_filename) + '.h5'
coefs = get_homog_coefs_linear(0, 0, None,
micro_filename=micro_filename, coefs_filename=coefs_filename)
coefs['B'] = coefs['B'][:, nm.newaxis]
material_cache['raw_coefs'] = coefs
return material_cache['raw_coefs']
#Get homogenized coefficients in quadrature points
def get_homog(coors,pb, mode, **kwargs):
if not (mode == 'qp'):
return
nqp = coors.shape[0]
coefs=get_raw_coefs(pb)
for k in coefs.keys():
v = coefs[k]
if type(v) is nm.ndarray:
if len(v.shape) == 0:
coefs[k] = v.reshape((1, 1))
elif len(v.shape) == 1:
coefs[k] = v[:, nm.newaxis]
elif isinstance(v, float):
coefs[k] = nm.array([[v]])
out = coefs2qp(coefs, nqp)
return out
#Definition of dirichlet boundary conditions
def get_ebc( coors, amplitude, cg1, cg2,const=False):
"""
Define the essential boundary conditions as a function of coordinates
`coors` of region nodes.
"""
y = coors[:, 1] - cg1
z = coors[:, 2] - cg2
val = amplitude*((cg1**2 - (abs(y)**2))+(cg2**2 - (abs(z)**2)))
if const:
val=nm.ones_like(y) *amplitude
return val
#Returns value of \phi_c\bar{w}^{mes} as a material function
def get_ebc_mat( coors,pb, mode, amplitude, cg1, cg2,konst=False):
if mode == 'qp':
val = get_ebc( coors, amplitude, cg1, cg2,konst)
phic = get_raw_coefs(pb)['vol']["fraction_Zc"]
v_w1 = val[:, nm.newaxis, nm.newaxis]
return {'val': v_w1*phic}
#Definition of boundary conditions for numerical example at http://sfepy.org/sfepy_examples/example_perfusion_BDB/
def define_bc(cg1,cg2, val_in=1e2, val_out=1e2):
funs = {
'w_in': (lambda ts, coor, bc, problem, **kwargs:
get_ebc( coor, val_in, cg1, cg2),),
'w_out': (lambda ts, coor, bc, problem, **kwargs:
get_ebc( coor, val_out, cg1, cg2),),
'w_in_mat': (lambda ts,coor, problem, mode=None, **kwargs:
get_ebc_mat( coor, problem, mode, val_in,
cg1, cg2),),
'w_out_mat': (lambda ts,coor, problem, mode=None, **kwargs:
get_ebc_mat( coor, problem, mode, val_out,
cg1, cg2),),
}
mats = {
'w_in': 'w_in_mat',
'w_out': 'w_out_mat',
}
ebcs = {
'fix_u_in': ('In', {'u.all': 0.0}),
'fix_u_out': ('Out', {'u.all': 0.0}),
'w_in': ('In', {'w.0': 'w_in','w.[1,2]': 0.0}),
'w_out': ('Out', {'w.0': 'w_out','w.[1,2]': 0.0}),
'wB_dirichlet':('Bottom',{'w.2' :0.0,'u.2':0.0}),
'WT_dirichlet':('Top',{'w.2' :0.0,'u.2':0.0}),
'wN_dirichlet':('Near',{'w.1' :0.0,'u.1':0.0}),
'wF_dirichlet':('Far',{'w.1' :0.0,'u.1':0.0}),
}
lcbcs = {
'imv': ('Omega', {'ls.all' : None}, None, 'integral_mean_value')
}
return ebcs, funs, mats, lcbcs
#Definition of macroscopic problem
def define(filename_mesh=None,cg1=None, cg2=None):
if filename_mesh is None:
filename_mesh = osp.join(data_dir, 'macro_perf.vtk')
cg1, cg2 = 0.0015, 0.0015 # y and z coordinates of center of gravity
mesh = Mesh.from_file(filename_mesh)
poroela_mezo_file = osp.join(data_dir,'perf_BDB_mes.py')
material_cache['meso_filename']=poroela_mezo_file
bbox = mesh.get_bounding_box()
regions = define_box_regions(mesh.dim, bbox[0], bbox[1], eps=1e-6)
regions.update({
'Omega': 'all',
'Wall': ('r.Top +v r.Bottom +v r.Far +v r.Near', 'facet'),
'In': ('r.Left -v r.Wall', 'facet'),
'Out': ('r.Right -v r.Wall', 'facet'),
})
ebcs, bc_funs, mats, lcbcs = define_bc(cg1,cg2,val_in=1.e4,val_out=1.e4)
fields = {
'displacement': ('real', 'vector', 'Omega', 1),
'pressure': ('real', 'scalar', 'Omega', 1),
'velocity': ('real', 'vector', 'Omega', 2),
}
variables = {
#Displacement
'u': ('unknown field', 'displacement'),
'v': ('test field', 'displacement', 'u'),
#Pressure
'p': ('unknown field', 'pressure'),
'q': ('test field', 'pressure', 'p'),
'ls': ('unknown field', 'pressure'),
'lv': ('test field', 'pressure', 'ls'),
#Velocity
'w': ('unknown field', 'velocity'),
'z': ('test field', 'velocity', 'w'),
}
functions = {
'get_homog': (lambda ts, coors, problem, mode=None, **kwargs: \
get_homog(coors,problem, mode, **kwargs),), }
functions.update(bc_funs)
materials = {
'hom': 'get_homog',
}
materials.update(mats)
integrals = {
'i': 4,
"is": ("s", 4),
}
#Definition of solvers
solvers = {
'ls': ('ls.mumps', {}),
'newton': ('nls.newton',
{'i_max': 2,
'eps_a': 1e-12,
'eps_r': 1e-3,
'problem': 'nonlinear',
})
}
#Definition of macroscopic equations, see (43)
equations = {
'eq1': """
dw_lin_elastic.i.Omega(hom.A, v, u)
- dw_biot.i.Omega(hom.B, v, p)
- dw_v_dot_grad_s.i.Omega(hom.PT, v, p)
- dw_volume_dot.i.Omega(hom.H, v, w)
= 0""",
'eq2': """
dw_diffusion.i.Omega(hom.K, q, p)
- dw_v_dot_grad_s.i.Omega(hom.P, w, q)+ dw_volume_dot.i.Omega( q,ls )
= + dw_surface_integrate.is.In(w_in.val, q) - dw_surface_integrate.is.Out(w_out.val, q)
""",
'eq3': """
dw_lin_elastic.i.Omega(hom.S, z, w)
+ dw_volume_dot.i.Omega(hom.H, z, w)
+ dw_v_dot_grad_s.i.Omega(hom.PT, z, p)
= 0""",
'eq_imv': 'dw_volume_dot.i.Omega( lv, p ) = 0',
}
options = {
'output_dir': data_dir + '/results/macro',
'ls': 'ls',
'nls': 'newton',
'micro_filename' : poroela_mezo_file,
'absolute_mesh_path': True,
}
return locals() | [
"numpy.tile",
"numpy.ones_like",
"os.path.join",
"numpy.array",
"sfepy.homogenization.micmac.get_homog_coefs_linear",
"sfepy.homogenization.utils.define_box_regions",
"sfepy.discrete.fem.mesh.Mesh.from_file"
] | [((4957, 4986), 'sfepy.discrete.fem.mesh.Mesh.from_file', 'Mesh.from_file', (['filename_mesh'], {}), '(filename_mesh)\n', (4971, 4986), False, 'from sfepy.discrete.fem.mesh import Mesh\n'), ((5012, 5049), 'os.path.join', 'osp.join', (['data_dir', '"""perf_BDB_mes.py"""'], {}), "(data_dir, 'perf_BDB_mes.py')\n", (5020, 5049), True, 'import os.path as osp\n'), ((5157, 5214), 'sfepy.homogenization.utils.define_box_regions', 'define_box_regions', (['mesh.dim', 'bbox[0]', 'bbox[1]'], {'eps': '(1e-06)'}), '(mesh.dim, bbox[0], bbox[1], eps=1e-06)\n', (5175, 5214), False, 'from sfepy.homogenization.utils import define_box_regions\n'), ((1140, 1163), 'numpy.tile', 'nm.tile', (['v', '(nqp, 1, 1)'], {}), '(v, (nqp, 1, 1))\n', (1147, 1163), True, 'import numpy as nm\n'), ((1576, 1676), 'sfepy.homogenization.micmac.get_homog_coefs_linear', 'get_homog_coefs_linear', (['(0)', '(0)', 'None'], {'micro_filename': 'micro_filename', 'coefs_filename': 'coefs_filename'}), '(0, 0, None, micro_filename=micro_filename,\n coefs_filename=coefs_filename)\n', (1598, 1676), False, 'from sfepy.homogenization.micmac import get_homog_coefs_linear\n'), ((4827, 4863), 'os.path.join', 'osp.join', (['data_dir', '"""macro_perf.vtk"""'], {}), "(data_dir, 'macro_perf.vtk')\n", (4835, 4863), True, 'import os.path as osp\n'), ((2800, 2815), 'numpy.ones_like', 'nm.ones_like', (['y'], {}), '(y)\n', (2812, 2815), True, 'import numpy as nm\n'), ((2352, 2367), 'numpy.array', 'nm.array', (['[[v]]'], {}), '([[v]])\n', (2360, 2367), True, 'import numpy as nm\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
import numbers
from typing import Optional, Tuple
import numpy as np
class BBox:
def __init__(self, x: int, y: int, width: int, height: int) -> None:
assert min(width, height) >= 0, "width and height must be non-negative"
self._center: np.ndarray = np.asarray((x + width // 2, y + height // 2))
self._size: np.ndarray = np.asarray((width, height))
@property
def size(self) -> np.ndarray:
return self._size
@property
def center(self) -> np.ndarray:
return self._center
@size.setter
def size(self, new_size: np.ndarray) -> None:
assert (new_size.ndim == 1) and (len(new_size) == 2)
assert new_size.min() >= 0, "width and height must be non-negative"
assert issubclass(new_size.dtype.type, numbers.Integral)
self._size = new_size
@staticmethod
def build_from_center_and_size(
center: np.ndarray, size: np.ndarray) -> 'BBox':
assert issubclass(center.dtype.type, numbers.Integral)
assert issubclass(size.dtype.type, numbers.Integral)
x, y = center - size // 2
return BBox(x, y, *size)
def as_corners(self) -> np.ndarray:
xy = self.center - self.size // 2
return np.concatenate((xy, xy + self.size))
def as_xywh(self, zero_based: bool = True) -> np.ndarray:
center = self.center if zero_based else self.center + 1
xy = center - self.size // 2
return np.concatenate((xy, self.size))
def as_tl_br(self) -> Tuple[np.ndarray, np.ndarray]:
size_half = self.size // 2
tl = self.center - size_half
br = self.center + size_half
return tl, br
def shift(
self, center_shift: np.ndarray, in_place=True) -> Optional['BBox']:
assert (center_shift.ndim == 1) and (len(center_shift) == 2)
assert issubclass(center_shift.dtype.type, numbers.Integral)
new_center = self._center + center_shift
if in_place:
self._center = new_center
return None
else:
return BBox.build_from_center_and_size(new_center, self.size)
def rescale(
self, width_scale: float, height_scale: float,
in_place=True) -> Optional['BBox']:
assert min(width_scale, height_scale) >= 0, \
"width and height scale factors must be non-negative"
new_size = self.size * np.asarray((width_scale, height_scale))
new_size = new_size.round().astype(np.int)
if in_place:
self.size = new_size
return None
else:
return BBox.build_from_center_and_size(self.center, new_size)
def __repr__(self) -> str:
x, y = self.center - self.size // 2
width, height = self.size
return f'{self.__class__.__name__}({x},{y},{width},{height})'
| [
"numpy.asarray",
"numpy.concatenate"
] | [((356, 401), 'numpy.asarray', 'np.asarray', (['(x + width // 2, y + height // 2)'], {}), '((x + width // 2, y + height // 2))\n', (366, 401), True, 'import numpy as np\n'), ((435, 462), 'numpy.asarray', 'np.asarray', (['(width, height)'], {}), '((width, height))\n', (445, 462), True, 'import numpy as np\n'), ((1360, 1396), 'numpy.concatenate', 'np.concatenate', (['(xy, xy + self.size)'], {}), '((xy, xy + self.size))\n', (1374, 1396), True, 'import numpy as np\n'), ((1580, 1611), 'numpy.concatenate', 'np.concatenate', (['(xy, self.size)'], {}), '((xy, self.size))\n', (1594, 1611), True, 'import numpy as np\n'), ((2561, 2600), 'numpy.asarray', 'np.asarray', (['(width_scale, height_scale)'], {}), '((width_scale, height_scale))\n', (2571, 2600), True, 'import numpy as np\n')] |
'''
Script to infer labels on data from a pre-saved keras model, using folder-structured testing data
'''
import argparse
import os
import csv
import PIL
from PIL import Image
import numpy as np
import cv2
import tensorflow
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import load_model
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def predict(model, classes, directory, batch_size, preprocess, size):
"""
Predict using image generators
"""
if not preprocess:
rescale_val = 1./255
else:
rescale_val = None
datagen = ImageDataGenerator(
rescale=rescale_val,
preprocessing_function=preprocess
)
prediction_generator = datagen.flow_from_directory(
directory,
classes=classes,
target_size=(size, size),
batch_size=batch_size,
shuffle=False
)
preds = model.predict(
prediction_generator,
verbose=1,
)
preds_cls_idx = preds.argmax(axis=-1)
prednames = [labels[k] for k in preds_cls_idx]
filenames_to_cls = list(zip(prediction_generator.filenames, prednames))
return filenames_to_cls
def predict_new(model, classes, directory, batch_size, preprocess, size):
if not preprocess:
rescale_val = 1./255
else:
rescale_val = None
datagen = ImageDataGenerator(
rescale=rescale_val,
preprocessing_function=preprocess
)
prediction_generator = datagen.flow_from_directory(
directory,
target_size=(size, size),
class_mode=None,
batch_size=batch_size,
shuffle=False,
)
preds = model.predict(
prediction_generator,
use_multiprocessing=True,
workers=30,
verbose=1,
)
preds_cls_idx = preds.argmax(axis=-1)
prednames = [classes[k] for k in preds_cls_idx]
filenames_to_cls = list(zip(prediction_generator.filenames, prednames))
return filenames_to_cls
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('image_dir')
parser.add_argument('model')
parser.add_argument('--batch-size', default=32, help='Batch size', type=int)
parser.add_argument('--classes', default=['ABCA4', 'USH2A'], help='List of classes', nargs='+')
parser.add_argument('--size', type=int, default=256, help='Shape of input e.g 256 for (256,256)')
parser.add_argument('--preprocess', choices=['inceptionv3', 'inception_resnetv2'], help='Preprocessing to perform on images')
parser.add_argument('--new', action='store_true', help='Set if predicting on a flat folder of new data')
parser.add_argument('--manual', action='store_true', help='Manually iterate over images and run prediction on each one. If not selected Keras flow_from_directory is used to perform predictions')
parser.add_argument('--output', help='File to output CSV results to')
args = parser.parse_args()
path = args.image_dir
print('Loading model')
loaded_model = load_model(args.model)
print('Model loaded')
if args.preprocess == 'inceptionv3':
preprocess = tensorflow.keras.applications.inception_v3.preprocess_input
elif args.preprocess == 'inception_resnetv2':
preprocess = tensorflow.keras.applications.inception_resnet_v2.preprocess_input
else:
preprocess = None
labels = args.classes
if args.new:
print('New data mode selected')
predictions = predict_new(loaded_model, args.classes, args.image_dir, args.batch_size, preprocess=preprocess, size=args.size)
predictions = np.array(predictions)
print(np.unique(predictions[:,1], return_counts=True))
exit(0)
else:
predictions = predict(loaded_model, args.classes, args.image_dir, args.batch_size, preprocess=preprocess, size=args.size)
if not args.manual:
correct = 0
images = 0
correct_map = {label : 0 for label in labels}
total_map = {label: 0 for label in labels}
for p in predictions:
gene = p[0].split('/')[0]
pred = p[1]
if gene == pred:
correct_map[gene] += 1
correct += 1
total_map[gene] += 1
images += 1
for label in labels:
print('{}: {} / {}'.format(label, correct_map[label], total_map[label]))
print('Percentage correct (generator): {:.2f}, {}/{}'.format(correct / len(predictions) * 100, correct, images))
else:
count = 0
images = 0
results = list()
for d in os.listdir(path):
if not os.path.isdir(os.path.join(path, d)):
continue
for f in os.listdir(os.path.join(path, d)):
# Load image
try:
image = Image.open(os.path.join(os.path.join(path, d), f))
images += 1
except PIL.UnidentifiedImageError:
print('Unable to load image file', os.path.join(os.path.join(path, d), f))
results.append((f, 0, 0, 'ERROR'))
print(f, 'ERROR')
continue
# Convert to grayscale
if image.mode == 'RGB':
image = image.convert('L')
# Convert to numpy array
image = np.array(image, dtype='float32')
# Squeeze extra dimensions
if len(image.shape) == 3:
image = np.squeeze(image)
# Resize
if image.shape != (args.size, args.size):
image = cv2.resize(image, dsize=(args.size, args.size), interpolation=cv2.INTER_CUBIC)
# Make grayscale 3 channel input (might be able to bin this)
image = np.repeat(image[:, :, np.newaxis], 3, axis=2)
image = image[np.newaxis, :, :, :]
# Do any image preprocessing
if preprocess:
image = preprocess(image)
else:
image /= 255
# Get network prediction
prediction = loaded_model.predict(image)
prediction_class = labels[prediction.argmax(axis=-1)[0]]
print(f, prediction[0], prediction_class)
results.append((f, prediction[0][0], prediction[0][1], prediction_class))
# Save result
if d == prediction_class:
count += 1
print('Percentage correct (manual): {:.2f}, {}/{}'.format((count / images * 100), count, images))
if args.output:
with open(args.output, 'w') as csvout:
writer = csv.writer(csvout)
headers = ['file'] + args.classes + ['label']
writer.writerow(headers)
writer.writerows(results) | [
"os.listdir",
"numpy.repeat",
"numpy.unique",
"argparse.ArgumentParser",
"csv.writer",
"os.path.join",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"numpy.squeeze",
"numpy.array",
"tensorflow.keras.models.load_model",
"cv2.resize"
] | [((611, 685), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': 'rescale_val', 'preprocessing_function': 'preprocess'}), '(rescale=rescale_val, preprocessing_function=preprocess)\n', (629, 685), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((1363, 1437), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': 'rescale_val', 'preprocessing_function': 'preprocess'}), '(rescale=rescale_val, preprocessing_function=preprocess)\n', (1381, 1437), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2036, 2061), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2059, 2061), False, 'import argparse\n'), ((3031, 3053), 'tensorflow.keras.models.load_model', 'load_model', (['args.model'], {}), '(args.model)\n', (3041, 3053), False, 'from tensorflow.keras.models import load_model\n'), ((3618, 3639), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (3626, 3639), True, 'import numpy as np\n'), ((4599, 4615), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4609, 4615), False, 'import os\n'), ((3654, 3702), 'numpy.unique', 'np.unique', (['predictions[:, 1]'], {'return_counts': '(True)'}), '(predictions[:, 1], return_counts=True)\n', (3663, 3702), True, 'import numpy as np\n'), ((6734, 6752), 'csv.writer', 'csv.writer', (['csvout'], {}), '(csvout)\n', (6744, 6752), False, 'import csv\n'), ((4733, 4754), 'os.path.join', 'os.path.join', (['path', 'd'], {}), '(path, d)\n', (4745, 4754), False, 'import os\n'), ((5380, 5412), 'numpy.array', 'np.array', (['image'], {'dtype': '"""float32"""'}), "(image, dtype='float32')\n", (5388, 5412), True, 'import numpy as np\n'), ((5838, 5883), 'numpy.repeat', 'np.repeat', (['image[:, :, np.newaxis]', '(3)'], {'axis': '(2)'}), '(image[:, :, np.newaxis], 3, axis=2)\n', (5847, 5883), True, 'import numpy as np\n'), ((4651, 4672), 'os.path.join', 'os.path.join', (['path', 'd'], {}), '(path, d)\n', (4663, 4672), False, 'import os\n'), ((5527, 5544), 'numpy.squeeze', 'np.squeeze', (['image'], {}), '(image)\n', (5537, 5544), True, 'import numpy as np\n'), ((5657, 5735), 'cv2.resize', 'cv2.resize', (['image'], {'dsize': '(args.size, args.size)', 'interpolation': 'cv2.INTER_CUBIC'}), '(image, dsize=(args.size, args.size), interpolation=cv2.INTER_CUBIC)\n', (5667, 5735), False, 'import cv2\n'), ((4860, 4881), 'os.path.join', 'os.path.join', (['path', 'd'], {}), '(path, d)\n', (4872, 4881), False, 'import os\n'), ((5038, 5059), 'os.path.join', 'os.path.join', (['path', 'd'], {}), '(path, d)\n', (5050, 5059), False, 'import os\n')] |
import os,argparse,time
import numpy as np
from omegaconf import OmegaConf
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data
import utils
import wandb
tstart=time.time()
# Arguments
parser = argparse.ArgumentParser(description='RRR')
parser.add_argument('--config', type=str, default='./configs/config_cub_rrr.yaml')
parser.add_argument('--name', type=str, default='')
parser.add_argument('overrides', nargs='*', help="Any key=svalue arguments to override config values "
"(use dots for.nested=overrides)")
flags = parser.parse_args()
overrides = OmegaConf.from_cli(flags.overrides)
cfg = OmegaConf.load(flags.config)
args = OmegaConf.merge(cfg, overrides)
########################################################################################################################
# Args -- Data generator
from dataloaders import datagenerator
# Args -- Aporoach
from approaches.rrr import RRR as approach
# Args -- Network
if args.experiment.dataset == 'cifar100':
from networks import resnet_cifar as network
# args.architecture.target_layer = "features.layer4.1.conv2" # resnet_cifar
args.architecture.target_layer = "m_8_0.3" # resnet used in itaml
else:
from networks import resnet as network
if args.architecture.backbone == 'resnet18':
args.architecture.target_layer = "features.7.1.conv2"
elif args.architecture.backbone == 'densenet121':
args.architecture.target_layer = "features.0.denseblock4.denselayer16.conv2"
elif args.architecture.backbone == 'alexnet':
args.architecture.target_layer = "features.0.10"
elif args.architecture.backbone == 'vgg11':
args.architecture.target_layer = "features.0.18"
elif args.architecture.backbone == 'squeezenet1_1':
args.architecture.target_layer = "features.0.12.expand3x3"
elif args.architecture.backbone == 'googlenet':
args.architecture.target_layer = 'features.15.branch4.1.conv'
########################################################################################################################
def run(args, run_id):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Data loader
print('Instantiate data generators and model...')
dataset = datagenerator.DatasetGen(args)
args.taskcla, args.inputsize = dataset.taskcla, dataset.inputsize
args.num_classes = dataset.num_classes
# Network
net = network.Net(args)
net.print_model_size()
if args.device.multi:
net = network._CustomDataParallel(net)
net = net.to(device=args.device.name)
for n,p in net.named_parameters():
print (n, p.size())
if args.device.multi:
args.architecture.target_layer = 'module.'+ args.architecture.target_layer
# Approach
appr = approach(net, args, dataset=dataset, network=network)
# Loop tasks
perf =np.zeros((len(args.taskcla),len(args.taskcla)),dtype=np.float32)
avg_rii = np.zeros((args.experiment.ntasks, 2))
for t,ncla in args.taskcla:
# Train and test
appr.train(t, perf)
def main(args):
utils.print_time(start=True)
args.path.checkpoint, args.wandb.notes = utils.make_directories(args)
if args.wandb.log:
wandb.init(project=args.wandb.project,name=args.wandb.notes,
config=args.config,notes=args.wandb.notes,
allow_val_change=True)
utils.save_code(args)
print('=' * 100)
print('Arguments =')
for arg in vars(args):
print('\t' + arg + ':', getattr(args, arg))
print('=' * 100)
for n in range(args.train.num_runs):
args.seed = n+1
args.experiment.memory_budget = int(args.experiment.memory_budget)
args.path.output = 'Run_{}_{}.txt'.format(n+1, args.wandb.notes)
if args.wandb.log:
wandb.config.update(args, allow_val_change=True)
print (">"*30, "Run #", n+1)
run(args, n)
print ("All Done! ")
print('[Elapsed time = {:.1f} min - {:0.1f} hours]'.format((time.time()-tstart)/(60), (time.time()-tstart)/(3600)))
utils.print_time(start=False)
#######################################################################################################################
if __name__ == '__main__':
main(args)
| [
"utils.save_code",
"wandb.init",
"torch.cuda.is_available",
"utils.print_time",
"argparse.ArgumentParser",
"wandb.config.update",
"numpy.random.seed",
"omegaconf.OmegaConf.from_cli",
"dataloaders.datagenerator.DatasetGen",
"omegaconf.OmegaConf.merge",
"time.time",
"utils.make_directories",
"... | [((184, 195), 'time.time', 'time.time', ([], {}), '()\n', (193, 195), False, 'import os, argparse, time\n'), ((218, 260), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""RRR"""'}), "(description='RRR')\n", (241, 260), False, 'import os, argparse, time\n'), ((626, 661), 'omegaconf.OmegaConf.from_cli', 'OmegaConf.from_cli', (['flags.overrides'], {}), '(flags.overrides)\n', (644, 661), False, 'from omegaconf import OmegaConf\n'), ((668, 696), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['flags.config'], {}), '(flags.config)\n', (682, 696), False, 'from omegaconf import OmegaConf\n'), ((704, 735), 'omegaconf.OmegaConf.merge', 'OmegaConf.merge', (['cfg', 'overrides'], {}), '(cfg, overrides)\n', (719, 735), False, 'from omegaconf import OmegaConf\n'), ((2108, 2133), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2122, 2133), True, 'import numpy as np\n'), ((2138, 2166), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2155, 2166), False, 'import torch\n'), ((2174, 2199), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2197, 2199), False, 'import torch\n'), ((2427, 2457), 'dataloaders.datagenerator.DatasetGen', 'datagenerator.DatasetGen', (['args'], {}), '(args)\n', (2451, 2457), False, 'from dataloaders import datagenerator\n'), ((2596, 2613), 'networks.resnet.Net', 'network.Net', (['args'], {}), '(args)\n', (2607, 2613), True, 'from networks import resnet as network\n'), ((2962, 3015), 'approaches.rrr.RRR', 'approach', (['net', 'args'], {'dataset': 'dataset', 'network': 'network'}), '(net, args, dataset=dataset, network=network)\n', (2970, 3015), True, 'from approaches.rrr import RRR as approach\n'), ((3123, 3160), 'numpy.zeros', 'np.zeros', (['(args.experiment.ntasks, 2)'], {}), '((args.experiment.ntasks, 2))\n', (3131, 3160), True, 'import numpy as np\n'), ((3271, 3299), 'utils.print_time', 'utils.print_time', ([], {'start': '(True)'}), '(start=True)\n', (3287, 3299), False, 'import utils\n'), ((3345, 3373), 'utils.make_directories', 'utils.make_directories', (['args'], {}), '(args)\n', (3367, 3373), False, 'import utils\n'), ((3576, 3597), 'utils.save_code', 'utils.save_code', (['args'], {}), '(args)\n', (3591, 3597), False, 'import utils\n'), ((4259, 4288), 'utils.print_time', 'utils.print_time', ([], {'start': '(False)'}), '(start=False)\n', (4275, 4288), False, 'import utils\n'), ((2209, 2242), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2231, 2242), False, 'import torch\n'), ((2682, 2714), 'networks.resnet._CustomDataParallel', 'network._CustomDataParallel', (['net'], {}), '(net)\n', (2709, 2714), True, 'from networks import resnet as network\n'), ((3406, 3539), 'wandb.init', 'wandb.init', ([], {'project': 'args.wandb.project', 'name': 'args.wandb.notes', 'config': 'args.config', 'notes': 'args.wandb.notes', 'allow_val_change': '(True)'}), '(project=args.wandb.project, name=args.wandb.notes, config=args.\n config, notes=args.wandb.notes, allow_val_change=True)\n', (3416, 3539), False, 'import wandb\n'), ((4000, 4048), 'wandb.config.update', 'wandb.config.update', (['args'], {'allow_val_change': '(True)'}), '(args, allow_val_change=True)\n', (4019, 4048), False, 'import wandb\n'), ((4199, 4210), 'time.time', 'time.time', ([], {}), '()\n', (4208, 4210), False, 'import os, argparse, time\n'), ((4226, 4237), 'time.time', 'time.time', ([], {}), '()\n', (4235, 4237), False, 'import os, argparse, time\n')] |
"""
References:
[1] <NAME> "Factorization Machines"
(https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf)
[2] <NAME> et al. "Neural Factorization Machines for Sparse Predictive Analytics"
(https://arxiv.org/pdf/1708.05027.pdf)
author: massquantity
"""
from itertools import islice
import os
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
from tensorflow.keras.initializers import (
truncated_normal as tf_truncated_normal
)
from .base import Base, TfMixin
from ..data.data_generator import DataGenFeat
from ..evaluation.evaluate import EvalMixin
from ..utils.tf_ops import (
reg_config,
dropout_config,
lr_decay_config,
multi_sparse_combine_embedding
)
from ..utils.sampling import NegativeSampling
from ..utils.misc import count_params
from ..feature import (
get_predict_indices_and_values,
get_recommend_indices_and_values,
features_from_dict,
add_item_features
)
tf.disable_v2_behavior()
class FM(Base, TfMixin, EvalMixin):
"""
Note this implementation is actually a mixture of FM and NFM,
since it uses one dense layer in the final output
"""
user_variables = ["linear_user_feat", "pairwise_user_feat"]
item_variables = ["linear_item_feat", "pairwise_item_feat"]
sparse_variables = ["linear_sparse_feat", "pairwise_sparse_feat"]
dense_variables = ["linear_dense_feat", "pairwise_dense_feat"]
def __init__(
self,
task,
data_info=None,
embed_size=16,
n_epochs=20,
lr=0.01,
lr_decay=False,
reg=None,
batch_size=256,
num_neg=1,
use_bn=True,
dropout_rate=None,
batch_sampling=False,
multi_sparse_combiner="sqrtn",
seed=42,
lower_upper_bound=None,
tf_sess_config=None
):
Base.__init__(self, task, data_info, lower_upper_bound)
TfMixin.__init__(self, tf_sess_config)
EvalMixin.__init__(self, task, data_info)
self.task = task
self.data_info = data_info
self.embed_size = embed_size
self.n_epochs = n_epochs
self.lr = lr
self.lr_decay = lr_decay
self.reg = reg_config(reg)
self.batch_size = batch_size
self.num_neg = num_neg
self.use_bn = use_bn
self.dropout_rate = dropout_config(dropout_rate)
self.batch_sampling = batch_sampling
self.n_users = data_info.n_users
self.n_items = data_info.n_items
self.seed = seed
self.user_consumed = data_info.user_consumed
self.sparse = self._decide_sparse_indices(data_info)
self.dense = self._decide_dense_values(data_info)
if self.sparse:
self.sparse_feature_size = self._sparse_feat_size(data_info)
self.sparse_field_size = self._sparse_field_size(data_info)
self.multi_sparse_combiner = self._check_multi_sparse(
data_info, multi_sparse_combiner)
self.true_sparse_field_size = self._true_sparse_field_size(
data_info, self.sparse_field_size, self.multi_sparse_combiner)
if self.dense:
self.dense_field_size = self._dense_field_size(data_info)
self.all_args = locals()
def _build_model(self):
self.graph_built = True
tf.set_random_seed(self.seed)
self.labels = tf.placeholder(tf.float32, shape=[None])
self.is_training = tf.placeholder_with_default(False, shape=[])
self.linear_embed, self.pairwise_embed = [], []
self._build_user_item()
if self.sparse:
self._build_sparse()
if self.dense:
self._build_dense()
linear_embed = tf.concat(self.linear_embed, axis=1)
pairwise_embed = tf.concat(self.pairwise_embed, axis=1)
# linear_term = tf.reduce_sum(linear_embed, axis=1,
# keepdims=True)
# B * 1
linear_term = tf.layers.dense(linear_embed, units=1, activation=None)
# B * K
pairwise_term = 0.5 * tf.subtract(
tf.square(tf.reduce_sum(pairwise_embed, axis=1)),
tf.reduce_sum(tf.square(pairwise_embed), axis=1)
)
# For original FM, just add K dim together:
# pairwise_term = 0.5 * tf.reduce_sum(pairwise_term, axis=1)
if self.use_bn:
pairwise_term = tf.layers.batch_normalization(
pairwise_term, training=self.is_training)
pairwise_term = tf.layers.dense(inputs=pairwise_term,
units=1,
activation=tf.nn.elu)
self.output = tf.squeeze(tf.add(linear_term, pairwise_term))
count_params()
def _build_user_item(self):
self.user_indices = tf.placeholder(tf.int32, shape=[None])
self.item_indices = tf.placeholder(tf.int32, shape=[None])
linear_user_feat = tf.get_variable(
name="linear_user_feat",
shape=[self.n_users + 1, 1],
initializer=tf_truncated_normal(0.0, 0.03),
regularizer=self.reg)
linear_item_feat = tf.get_variable(
name="linear_item_feat",
shape=[self.n_items + 1, 1],
initializer=tf_truncated_normal(0.0, 0.03),
regularizer=self.reg)
pairwise_user_feat = tf.get_variable(
name="pairwise_user_feat",
shape=[self.n_users + 1, self.embed_size],
initializer=tf_truncated_normal(0.0, 0.03),
regularizer=self.reg)
pairwise_item_feat = tf.get_variable(
name="pairwise_item_feat",
shape=[self.n_items + 1, self.embed_size],
initializer=tf_truncated_normal(0.0, 0.03),
regularizer=self.reg)
# print(linear_embed.get_shape().as_list())
linear_user_embed = tf.nn.embedding_lookup(linear_user_feat,
self.user_indices)
linear_item_embed = tf.nn.embedding_lookup(linear_item_feat,
self.item_indices)
self.linear_embed.extend([linear_user_embed, linear_item_embed])
pairwise_user_embed = tf.expand_dims(
tf.nn.embedding_lookup(pairwise_user_feat, self.user_indices),
axis=1)
pairwise_item_embed = tf.expand_dims(
tf.nn.embedding_lookup(pairwise_item_feat, self.item_indices),
axis=1
)
self.pairwise_embed.extend([pairwise_user_embed, pairwise_item_embed])
def _build_sparse(self):
self.sparse_indices = tf.placeholder(
tf.int32, shape=[None, self.sparse_field_size])
linear_sparse_feat = tf.get_variable(
name="linear_sparse_feat",
shape=[self.sparse_feature_size],
initializer=tf_truncated_normal(0.0, 0.03),
regularizer=self.reg)
pairwise_sparse_feat = tf.get_variable(
name="pairwise_sparse_feat",
shape=[self.sparse_feature_size, self.embed_size],
initializer=tf_truncated_normal(0.0, 0.03),
regularizer=self.reg)
if (self.data_info.multi_sparse_combine_info
and self.multi_sparse_combiner in ("sum", "mean", "sqrtn")):
linear_sparse_embed = multi_sparse_combine_embedding(
self.data_info, linear_sparse_feat, self.sparse_indices,
self.multi_sparse_combiner, 1)
pairwise_sparse_embed = multi_sparse_combine_embedding(
self.data_info, pairwise_sparse_feat, self.sparse_indices,
self.multi_sparse_combiner, self.embed_size)
else:
linear_sparse_embed = tf.nn.embedding_lookup( # B * F1
linear_sparse_feat, self.sparse_indices)
pairwise_sparse_embed = tf.nn.embedding_lookup( # B * F1 * K
pairwise_sparse_feat, self.sparse_indices)
self.linear_embed.append(linear_sparse_embed)
self.pairwise_embed.append(pairwise_sparse_embed)
def _build_dense(self):
self.dense_values = tf.placeholder(
tf.float32, shape=[None, self.dense_field_size])
dense_values_reshape = tf.reshape(
self.dense_values, [-1, self.dense_field_size, 1])
batch_size = tf.shape(self.dense_values)[0]
linear_dense_feat = tf.get_variable(
name="linear_dense_feat",
shape=[self.dense_field_size],
initializer=tf_truncated_normal(0.0, 0.03),
regularizer=self.reg)
pairwise_dense_feat = tf.get_variable(
name="pairwise_dense_feat",
shape=[self.dense_field_size, self.embed_size],
initializer=tf_truncated_normal(0.0, 0.03),
regularizer=self.reg)
# B * F2
linear_dense_embed = tf.tile(linear_dense_feat, [batch_size])
linear_dense_embed = tf.reshape(
linear_dense_embed, [-1, self.dense_field_size])
linear_dense_embed = tf.multiply(
linear_dense_embed, self.dense_values)
pairwise_dense_embed = tf.expand_dims(pairwise_dense_feat, axis=0)
# B * F2 * K
pairwise_dense_embed = tf.tile(
pairwise_dense_embed, [batch_size, 1, 1])
pairwise_dense_embed = tf.multiply(
pairwise_dense_embed, dense_values_reshape)
self.linear_embed.append(linear_dense_embed)
self.pairwise_embed.append(pairwise_dense_embed)
def _build_train_ops(self, **kwargs):
if self.task == "rating":
self.loss = tf.losses.mean_squared_error(labels=self.labels,
predictions=self.output)
elif self.task == "ranking":
self.loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=self.labels,
logits=self.output)
)
if self.reg is not None:
reg_keys = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = self.loss + tf.add_n(reg_keys)
else:
total_loss = self.loss
if self.lr_decay:
n_batches = int(self.data_info.data_size / self.batch_size)
self.lr, global_steps = lr_decay_config(self.lr, n_batches,
**kwargs)
else:
global_steps = None
optimizer = tf.train.AdamOptimizer(self.lr)
optimizer_op = optimizer.minimize(total_loss, global_step=global_steps)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.training_op = tf.group([optimizer_op, update_ops])
self.sess.run(tf.global_variables_initializer())
def fit(self, train_data, verbose=1, shuffle=True,
eval_data=None, metrics=None, **kwargs):
self.show_start_time()
if not self.graph_built:
self._build_model()
self._build_train_ops(**kwargs)
if self.task == "ranking" and self.batch_sampling:
self._check_has_sampled(train_data, verbose)
data_generator = NegativeSampling(train_data,
self.data_info,
self.num_neg,
self.sparse,
self.dense,
batch_sampling=True)
else:
data_generator = DataGenFeat(train_data,
self.sparse,
self.dense)
self.train_feat(data_generator, verbose, shuffle, eval_data, metrics,
**kwargs)
self.assign_oov()
def predict(self, user, item, feats=None, cold_start="average",
inner_id=False):
user, item = self.convert_id(user, item, inner_id)
unknown_num, unknown_index, user, item = self._check_unknown(user, item)
(
user_indices,
item_indices,
sparse_indices,
dense_values
) = get_predict_indices_and_values(
self.data_info, user, item, self.n_items, self.sparse, self.dense)
if feats is not None:
assert isinstance(feats, (dict, pd.Series)), (
"feats must be dict or pandas.Series.")
assert len(user_indices) == 1, "only support single user for feats"
sparse_indices, dense_values = features_from_dict(
self.data_info, sparse_indices, dense_values, feats, "predict")
feed_dict = self._get_feed_dict(user_indices, item_indices,
sparse_indices, dense_values,
None, False)
preds = self.sess.run(self.output, feed_dict)
if self.task == "rating":
preds = np.clip(preds, self.lower_bound, self.upper_bound)
elif self.task == "ranking":
preds = 1 / (1 + np.exp(-preds))
if unknown_num > 0 and cold_start == "popular":
preds[unknown_index] = self.default_prediction
return preds
def recommend_user(self, user, n_rec, user_feats=None, item_data=None,
cold_start="average", inner_id=False):
user_id = self._check_unknown_user(user, inner_id)
if user_id is None:
if cold_start == "average":
user_id = self.n_users
elif cold_start == "popular":
return self.popular_recommends(inner_id, n_rec)
else:
raise ValueError(user)
(
user_indices,
item_indices,
sparse_indices,
dense_values
) = get_recommend_indices_and_values(
self.data_info, user_id, self.n_items, self.sparse, self.dense)
if user_feats is not None:
assert isinstance(user_feats, (dict, pd.Series)), (
"feats must be dict or pandas.Series.")
sparse_indices, dense_values = features_from_dict(
self.data_info, sparse_indices, dense_values, user_feats,
"recommend")
if item_data is not None:
assert isinstance(item_data, pd.DataFrame), (
"item_data must be pandas DataFrame")
assert "item" in item_data.columns, (
"item_data must contain 'item' column")
sparse_indices, dense_values = add_item_features(
self.data_info, sparse_indices, dense_values, item_data)
feed_dict = self._get_feed_dict(user_indices, item_indices,
sparse_indices, dense_values,
None, False)
recos = self.sess.run(self.output, feed_dict)
if self.task == "ranking":
recos = 1 / (1 + np.exp(-recos))
consumed = set(self.user_consumed[user_id])
count = n_rec + len(consumed)
ids = np.argpartition(recos, -count)[-count:]
rank = sorted(zip(ids, recos[ids]), key=lambda x: -x[1])
recs_and_scores = islice(
(rec if inner_id else (self.data_info.id2item[rec[0]], rec[1])
for rec in rank if rec[0] not in consumed),
n_rec
)
return list(recs_and_scores)
def save(self, path, model_name, manual=True, inference_only=False):
if not os.path.isdir(path):
print(f"file folder {path} doesn't exists, creating a new one...")
os.makedirs(path)
self.save_params(path)
if manual:
self.save_variables(path, model_name, inference_only)
else:
self.save_tf_model(path, model_name)
@classmethod
def load(cls, path, model_name, data_info, manual=True):
if manual:
return cls.load_variables(path, model_name, data_info)
else:
return cls.load_tf_model(path, model_name, data_info)
| [
"numpy.clip",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v1.losses.mean_squared_error",
"tensorflow.compat.v1.set_random_seed",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.get_collection",
"tensorf... | [((963, 987), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (985, 987), True, 'import tensorflow.compat.v1 as tf\n'), ((3404, 3433), 'tensorflow.compat.v1.set_random_seed', 'tf.set_random_seed', (['self.seed'], {}), '(self.seed)\n', (3422, 3433), True, 'import tensorflow.compat.v1 as tf\n'), ((3456, 3496), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]'}), '(tf.float32, shape=[None])\n', (3470, 3496), True, 'import tensorflow.compat.v1 as tf\n'), ((3524, 3568), 'tensorflow.compat.v1.placeholder_with_default', 'tf.placeholder_with_default', (['(False)'], {'shape': '[]'}), '(False, shape=[])\n', (3551, 3568), True, 'import tensorflow.compat.v1 as tf\n'), ((3794, 3830), 'tensorflow.compat.v1.concat', 'tf.concat', (['self.linear_embed'], {'axis': '(1)'}), '(self.linear_embed, axis=1)\n', (3803, 3830), True, 'import tensorflow.compat.v1 as tf\n'), ((3856, 3894), 'tensorflow.compat.v1.concat', 'tf.concat', (['self.pairwise_embed'], {'axis': '(1)'}), '(self.pairwise_embed, axis=1)\n', (3865, 3894), True, 'import tensorflow.compat.v1 as tf\n'), ((4046, 4101), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', (['linear_embed'], {'units': '(1)', 'activation': 'None'}), '(linear_embed, units=1, activation=None)\n', (4061, 4101), True, 'import tensorflow.compat.v1 as tf\n'), ((4579, 4647), 'tensorflow.compat.v1.layers.dense', 'tf.layers.dense', ([], {'inputs': 'pairwise_term', 'units': '(1)', 'activation': 'tf.nn.elu'}), '(inputs=pairwise_term, units=1, activation=tf.nn.elu)\n', (4594, 4647), True, 'import tensorflow.compat.v1 as tf\n'), ((4881, 4919), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]'}), '(tf.int32, shape=[None])\n', (4895, 4919), True, 'import tensorflow.compat.v1 as tf\n'), ((4948, 4986), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]'}), '(tf.int32, shape=[None])\n', (4962, 4986), True, 'import tensorflow.compat.v1 as tf\n'), ((5953, 6012), 'tensorflow.compat.v1.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['linear_user_feat', 'self.user_indices'], {}), '(linear_user_feat, self.user_indices)\n', (5975, 6012), True, 'import tensorflow.compat.v1 as tf\n'), ((6092, 6151), 'tensorflow.compat.v1.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['linear_item_feat', 'self.item_indices'], {}), '(linear_item_feat, self.item_indices)\n', (6114, 6151), True, 'import tensorflow.compat.v1 as tf\n'), ((6707, 6769), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, self.sparse_field_size]'}), '(tf.int32, shape=[None, self.sparse_field_size])\n', (6721, 6769), True, 'import tensorflow.compat.v1 as tf\n'), ((8212, 8275), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, self.dense_field_size]'}), '(tf.float32, shape=[None, self.dense_field_size])\n', (8226, 8275), True, 'import tensorflow.compat.v1 as tf\n'), ((8320, 8381), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['self.dense_values', '[-1, self.dense_field_size, 1]'], {}), '(self.dense_values, [-1, self.dense_field_size, 1])\n', (8330, 8381), True, 'import tensorflow.compat.v1 as tf\n'), ((8948, 8988), 'tensorflow.compat.v1.tile', 'tf.tile', (['linear_dense_feat', '[batch_size]'], {}), '(linear_dense_feat, [batch_size])\n', (8955, 8988), True, 'import tensorflow.compat.v1 as tf\n'), ((9018, 9077), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['linear_dense_embed', '[-1, self.dense_field_size]'], {}), '(linear_dense_embed, [-1, self.dense_field_size])\n', (9028, 9077), True, 'import tensorflow.compat.v1 as tf\n'), ((9120, 9170), 'tensorflow.compat.v1.multiply', 'tf.multiply', (['linear_dense_embed', 'self.dense_values'], {}), '(linear_dense_embed, self.dense_values)\n', (9131, 9170), True, 'import tensorflow.compat.v1 as tf\n'), ((9216, 9259), 'tensorflow.compat.v1.expand_dims', 'tf.expand_dims', (['pairwise_dense_feat'], {'axis': '(0)'}), '(pairwise_dense_feat, axis=0)\n', (9230, 9259), True, 'import tensorflow.compat.v1 as tf\n'), ((9312, 9361), 'tensorflow.compat.v1.tile', 'tf.tile', (['pairwise_dense_embed', '[batch_size, 1, 1]'], {}), '(pairwise_dense_embed, [batch_size, 1, 1])\n', (9319, 9361), True, 'import tensorflow.compat.v1 as tf\n'), ((9406, 9461), 'tensorflow.compat.v1.multiply', 'tf.multiply', (['pairwise_dense_embed', 'dense_values_reshape'], {}), '(pairwise_dense_embed, dense_values_reshape)\n', (9417, 9461), True, 'import tensorflow.compat.v1 as tf\n'), ((10573, 10604), 'tensorflow.compat.v1.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.lr'], {}), '(self.lr)\n', (10595, 10604), True, 'import tensorflow.compat.v1 as tf\n'), ((10706, 10748), 'tensorflow.compat.v1.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (10723, 10748), True, 'import tensorflow.compat.v1 as tf\n'), ((10776, 10812), 'tensorflow.compat.v1.group', 'tf.group', (['[optimizer_op, update_ops]'], {}), '([optimizer_op, update_ops])\n', (10784, 10812), True, 'import tensorflow.compat.v1 as tf\n'), ((15311, 15435), 'itertools.islice', 'islice', (['(rec if inner_id else (self.data_info.id2item[rec[0]], rec[1]) for rec in\n rank if rec[0] not in consumed)', 'n_rec'], {}), '((rec if inner_id else (self.data_info.id2item[rec[0]], rec[1]) for\n rec in rank if rec[0] not in consumed), n_rec)\n', (15317, 15435), False, 'from itertools import islice\n'), ((4466, 4537), 'tensorflow.compat.v1.layers.batch_normalization', 'tf.layers.batch_normalization', (['pairwise_term'], {'training': 'self.is_training'}), '(pairwise_term, training=self.is_training)\n', (4495, 4537), True, 'import tensorflow.compat.v1 as tf\n'), ((4761, 4795), 'tensorflow.compat.v1.add', 'tf.add', (['linear_term', 'pairwise_term'], {}), '(linear_term, pairwise_term)\n', (4767, 4795), True, 'import tensorflow.compat.v1 as tf\n'), ((6335, 6396), 'tensorflow.compat.v1.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['pairwise_user_feat', 'self.user_indices'], {}), '(pairwise_user_feat, self.user_indices)\n', (6357, 6396), True, 'import tensorflow.compat.v1 as tf\n'), ((6476, 6537), 'tensorflow.compat.v1.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['pairwise_item_feat', 'self.item_indices'], {}), '(pairwise_item_feat, self.item_indices)\n', (6498, 6537), True, 'import tensorflow.compat.v1 as tf\n'), ((7816, 7879), 'tensorflow.compat.v1.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['linear_sparse_feat', 'self.sparse_indices'], {}), '(linear_sparse_feat, self.sparse_indices)\n', (7838, 7879), True, 'import tensorflow.compat.v1 as tf\n'), ((7945, 8010), 'tensorflow.compat.v1.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['pairwise_sparse_feat', 'self.sparse_indices'], {}), '(pairwise_sparse_feat, self.sparse_indices)\n', (7967, 8010), True, 'import tensorflow.compat.v1 as tf\n'), ((8416, 8443), 'tensorflow.compat.v1.shape', 'tf.shape', (['self.dense_values'], {}), '(self.dense_values)\n', (8424, 8443), True, 'import tensorflow.compat.v1 as tf\n'), ((9687, 9760), 'tensorflow.compat.v1.losses.mean_squared_error', 'tf.losses.mean_squared_error', ([], {'labels': 'self.labels', 'predictions': 'self.output'}), '(labels=self.labels, predictions=self.output)\n', (9715, 9760), True, 'import tensorflow.compat.v1 as tf\n'), ((10114, 10167), 'tensorflow.compat.v1.get_collection', 'tf.get_collection', (['tf.GraphKeys.REGULARIZATION_LOSSES'], {}), '(tf.GraphKeys.REGULARIZATION_LOSSES)\n', (10131, 10167), True, 'import tensorflow.compat.v1 as tf\n'), ((10835, 10868), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (10866, 10868), True, 'import tensorflow.compat.v1 as tf\n'), ((13065, 13115), 'numpy.clip', 'np.clip', (['preds', 'self.lower_bound', 'self.upper_bound'], {}), '(preds, self.lower_bound, self.upper_bound)\n', (13072, 13115), True, 'import numpy as np\n'), ((15180, 15210), 'numpy.argpartition', 'np.argpartition', (['recos', '(-count)'], {}), '(recos, -count)\n', (15195, 15210), True, 'import numpy as np\n'), ((15605, 15624), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (15618, 15624), False, 'import os\n'), ((15717, 15734), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (15728, 15734), False, 'import os\n'), ((5134, 5164), 'tensorflow.keras.initializers.truncated_normal', 'tf_truncated_normal', (['(0.0)', '(0.03)'], {}), '(0.0, 0.03)\n', (5153, 5164), True, 'from tensorflow.keras.initializers import truncated_normal as tf_truncated_normal\n'), ((5346, 5376), 'tensorflow.keras.initializers.truncated_normal', 'tf_truncated_normal', (['(0.0)', '(0.03)'], {}), '(0.0, 0.03)\n', (5365, 5376), True, 'from tensorflow.keras.initializers import truncated_normal as tf_truncated_normal\n'), ((5576, 5606), 'tensorflow.keras.initializers.truncated_normal', 'tf_truncated_normal', (['(0.0)', '(0.03)'], {}), '(0.0, 0.03)\n', (5595, 5606), True, 'from tensorflow.keras.initializers import truncated_normal as tf_truncated_normal\n'), ((5806, 5836), 'tensorflow.keras.initializers.truncated_normal', 'tf_truncated_normal', (['(0.0)', '(0.03)'], {}), '(0.0, 0.03)\n', (5825, 5836), True, 'from tensorflow.keras.initializers import truncated_normal as tf_truncated_normal\n'), ((6939, 6969), 'tensorflow.keras.initializers.truncated_normal', 'tf_truncated_normal', (['(0.0)', '(0.03)'], {}), '(0.0, 0.03)\n', (6958, 6969), True, 'from tensorflow.keras.initializers import truncated_normal as tf_truncated_normal\n'), ((7181, 7211), 'tensorflow.keras.initializers.truncated_normal', 'tf_truncated_normal', (['(0.0)', '(0.03)'], {}), '(0.0, 0.03)\n', (7200, 7211), True, 'from tensorflow.keras.initializers import truncated_normal as tf_truncated_normal\n'), ((8598, 8628), 'tensorflow.keras.initializers.truncated_normal', 'tf_truncated_normal', (['(0.0)', '(0.03)'], {}), '(0.0, 0.03)\n', (8617, 8628), True, 'from tensorflow.keras.initializers import truncated_normal as tf_truncated_normal\n'), ((8835, 8865), 'tensorflow.keras.initializers.truncated_normal', 'tf_truncated_normal', (['(0.0)', '(0.03)'], {}), '(0.0, 0.03)\n', (8854, 8865), True, 'from tensorflow.keras.initializers import truncated_normal as tf_truncated_normal\n'), ((10205, 10223), 'tensorflow.compat.v1.add_n', 'tf.add_n', (['reg_keys'], {}), '(reg_keys)\n', (10213, 10223), True, 'import tensorflow.compat.v1 as tf\n'), ((4183, 4220), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['pairwise_embed'], {'axis': '(1)'}), '(pairwise_embed, axis=1)\n', (4196, 4220), True, 'import tensorflow.compat.v1 as tf\n'), ((4249, 4274), 'tensorflow.compat.v1.square', 'tf.square', (['pairwise_embed'], {}), '(pairwise_embed)\n', (4258, 4274), True, 'import tensorflow.compat.v1 as tf\n'), ((9907, 9986), 'tensorflow.compat.v1.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'self.labels', 'logits': 'self.output'}), '(labels=self.labels, logits=self.output)\n', (9946, 9986), True, 'import tensorflow.compat.v1 as tf\n'), ((15060, 15074), 'numpy.exp', 'np.exp', (['(-recos)'], {}), '(-recos)\n', (15066, 15074), True, 'import numpy as np\n'), ((13182, 13196), 'numpy.exp', 'np.exp', (['(-preds)'], {}), '(-preds)\n', (13188, 13196), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import os
import pandas as pd
import utils_snpko as utils
from scipy.stats import fisher_exact
logger = utils.logger
def stats(args):
'''
Compute some simple statistics on the data:
* Univariate (uncorrected) p-value
* (Uncorrected) likelihood ratio
* Bonferroni corrected p-value
'''
df = pd.read_csv(os.path.join(args.working_dir, 'cleaned_input.csv'))
utils.safe_mkdir(os.path.join(args.working_dir, 'results'))
df_wild = pd.read_csv(os.path.join(args.working_dir, 'wild_types.csv'))
SNP_to_wild_type = dict(
zip(df_wild['SNP'].values, df_wild['wild_type'].values))
# "features" are SNPs
feature_list = [field for field in df.columns if field.startswith('rs')]
# "labels" are the dependent variable (e.g., MRI observations)
label_list = [
field for field in df.columns if field.startswith(args.data_prefix)]
N = len(df)
feature_array = np.zeros((N, len(feature_list)))
for i, feature in enumerate(feature_list):
feature_array[:, i] = utils.genotype_to_nonwild_type_count(
df[feature].values, SNP_to_wild_type[feature])
label_array = np.zeros((N, len(label_list)))
for i, label in enumerate(label_list):
label_array[:, i] = df[label].values[:]
# The above counts the number of non-wild-type haplotypes, so the values are
# 0 (wild type diploid), 1, or 2. To analyze with 2x2 contingency table, we
# will combine 1 and 2 into a single state, so we either have "diploid wild type"
# or not.
feature_array[feature_array == 2] = 1
# Uncorrected p-value
with open(os.path.join(args.working_dir, 'results', 'uncorrected.csv'), 'w') as f:
f.write('SNP,label,uncorrected_p_value,uncorrected_odds_ratio,'
'bonferroni_corrected_p_value,empirical_ratio_with_imaging_feature,'
'empirical_ratio_without_imaging_feature\n')
contingency_table = np.zeros((2, 2))
p_raw_array = np.zeros((len(label_list), len(feature_list)))
logger.info('Bonferroni correction: (%d labels x %d SNPs = %d' % (
len(label_list), len(feature_list), len(label_list) * len(feature_list)))
for label_index, label in enumerate(label_list):
for feature_index, feature in enumerate(feature_list):
for label_state in [0, 1]:
for feature_state in [0, 1]:
contingency_table[feature_state, label_state] = (
np.sum(np.logical_and(
feature_array[
:, feature_index] == feature_state,
df[label].values == label_state)))
oddsratio, pvalue = fisher_exact(contingency_table)
p_raw_array[label_index, feature_index] = pvalue
bonferroni = pvalue * len(feature_list) * len(label_list)
if bonferroni > 1.0:
bonferroni = 1.0
# Unfortunately, an "imaging feature" is what we call a "label" in the
# contingency table, not a "feature".
empirical_ratio_with_feature = '%d/%d' % (contingency_table[1, 1],
contingency_table[1, 1] + contingency_table[0, 1])
empirical_ratio_without_feature = '%d/%d' % (contingency_table[1, 0],
contingency_table[1, 0] + contingency_table[0, 0])
f.write('%s,%s,%f,%f,%f,%s,%s\n' %
(feature, label, pvalue, oddsratio, bonferroni,
empirical_ratio_with_feature, empirical_ratio_without_feature))
if __name__ == '__main__':
args = utils.parse_arguments()
utils.safe_mkdir(args.working_dir)
utils.initialize_logger(args)
stats(args)
| [
"utils_snpko.safe_mkdir",
"numpy.logical_and",
"utils_snpko.parse_arguments",
"scipy.stats.fisher_exact",
"os.path.join",
"numpy.zeros",
"utils_snpko.initialize_logger",
"utils_snpko.genotype_to_nonwild_type_count"
] | [((3828, 3851), 'utils_snpko.parse_arguments', 'utils.parse_arguments', ([], {}), '()\n', (3849, 3851), True, 'import utils_snpko as utils\n'), ((3856, 3890), 'utils_snpko.safe_mkdir', 'utils.safe_mkdir', (['args.working_dir'], {}), '(args.working_dir)\n', (3872, 3890), True, 'import utils_snpko as utils\n'), ((3895, 3924), 'utils_snpko.initialize_logger', 'utils.initialize_logger', (['args'], {}), '(args)\n', (3918, 3924), True, 'import utils_snpko as utils\n'), ((378, 429), 'os.path.join', 'os.path.join', (['args.working_dir', '"""cleaned_input.csv"""'], {}), "(args.working_dir, 'cleaned_input.csv')\n", (390, 429), False, 'import os\n'), ((452, 493), 'os.path.join', 'os.path.join', (['args.working_dir', '"""results"""'], {}), "(args.working_dir, 'results')\n", (464, 493), False, 'import os\n'), ((522, 570), 'os.path.join', 'os.path.join', (['args.working_dir', '"""wild_types.csv"""'], {}), "(args.working_dir, 'wild_types.csv')\n", (534, 570), False, 'import os\n'), ((1080, 1168), 'utils_snpko.genotype_to_nonwild_type_count', 'utils.genotype_to_nonwild_type_count', (['df[feature].values', 'SNP_to_wild_type[feature]'], {}), '(df[feature].values, SNP_to_wild_type[\n feature])\n', (1116, 1168), True, 'import utils_snpko as utils\n'), ((1984, 2000), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (1992, 2000), True, 'import numpy as np\n'), ((1664, 1724), 'os.path.join', 'os.path.join', (['args.working_dir', '"""results"""', '"""uncorrected.csv"""'], {}), "(args.working_dir, 'results', 'uncorrected.csv')\n", (1676, 1724), False, 'import os\n'), ((2796, 2827), 'scipy.stats.fisher_exact', 'fisher_exact', (['contingency_table'], {}), '(contingency_table)\n', (2808, 2827), False, 'from scipy.stats import fisher_exact\n'), ((2558, 2660), 'numpy.logical_and', 'np.logical_and', (['(feature_array[:, feature_index] == feature_state)', '(df[label].values == label_state)'], {}), '(feature_array[:, feature_index] == feature_state, df[label].\n values == label_state)\n', (2572, 2660), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from BasicAutoencoder import DeepAE as DAE
from shrink import l21shrink as SHR
class RobustL21Autoencoder():
"""
@author: <NAME>
first version.
complete: 10/20/2016
Update to Python3: 03/15/2019
Des:
X = L + S
L is a non-linearly low dimension matrix and S is a sparse matrix.
argmin ||L - Decoder(Encoder(L))|| + ||S||_2,1
Use Alternating projection to train model
The idea of shrink the l21 norm comes from the wiki 'Regularization' link: {
https://en.wikipedia.org/wiki/Regularization_(mathematics)
}
Improve:
1. fix the 0-cost bugs
"""
def __init__(self, sess, layers_sizes, lambda_=1.0, error = 1.0e-5):
self.lambda_ = lambda_
self.layers_sizes = layers_sizes
self.error = error
self.errors=[]
self.AE = DAE.Deep_Autoencoder( sess = sess, input_dim_list = self.layers_sizes)
def fit(self, X, sess, learning_rate=0.15, inner_iteration = 50,
iteration=20, batch_size=40, verbose=False):
## The first layer must be the input layer, so they should have same sizes.
assert X.shape[1] == self.layers_sizes[0]
## initialize L, S, mu(shrinkage operator)
self.L = np.zeros(X.shape)
self.S = np.zeros(X.shape)
## To estimate the size of input X
if verbose:
print ("X shape: ", X.shape)
print ("L shape: ", self.L.shape)
print ("S shape: ", self.S.shape)
for it in range(iteration):
if verbose:
print ("Out iteration: " , it)
## alternating project, first project to L
self.L = X - self.S
## Using L to train the auto-encoder
self.AE.fit(self.L, sess = sess,
iteration = inner_iteration,
learning_rate = learning_rate,
batch_size = batch_size,
verbose = verbose)
## get optmized L
self.L = self.AE.getRecon(X = self.L, sess = sess)
## alternating project, now project to S and shrink S
self.S = SHR.l21shrink(self.lambda_, (X - self.L))
return self.L , self.S
def transform(self, X, sess):
L = X - self.S
return self.AE.transform(X = L, sess = sess)
def getRecon(self, X, sess):
return self.AE.getRecon(X, sess = sess)
if __name__ == "__main__":
x = np.load(r"../data/data.npk")[:500]
with tf.Session() as sess:
rae = RobustL21Autoencoder(sess = sess, lambda_= 4000, layers_sizes=[784,400,255,100])
L, S = rae.fit(x, sess = sess, inner_iteration = 60, iteration = 5,verbose = True)
| [
"tensorflow.Session",
"shrink.l21shrink.l21shrink",
"numpy.zeros",
"BasicAutoencoder.DeepAE.Deep_Autoencoder",
"numpy.load"
] | [((900, 965), 'BasicAutoencoder.DeepAE.Deep_Autoencoder', 'DAE.Deep_Autoencoder', ([], {'sess': 'sess', 'input_dim_list': 'self.layers_sizes'}), '(sess=sess, input_dim_list=self.layers_sizes)\n', (920, 965), True, 'from BasicAutoencoder import DeepAE as DAE\n'), ((1300, 1317), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (1308, 1317), True, 'import numpy as np\n'), ((1335, 1352), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (1343, 1352), True, 'import numpy as np\n'), ((2599, 2626), 'numpy.load', 'np.load', (['"""../data/data.npk"""'], {}), "('../data/data.npk')\n", (2606, 2626), True, 'import numpy as np\n'), ((2648, 2660), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2658, 2660), True, 'import tensorflow as tf\n'), ((2287, 2326), 'shrink.l21shrink.l21shrink', 'SHR.l21shrink', (['self.lambda_', '(X - self.L)'], {}), '(self.lambda_, X - self.L)\n', (2300, 2326), True, 'from shrink import l21shrink as SHR\n')] |
"""
Comparison of Dimension Reduction Techniques
--------------------------------------------
A comparison of several different dimension reduction
techniques on a variety of toy datasets. The datasets
are all toy datasets, but should provide a representative
range of the strengths and weaknesses of the different
algorithms.
The time to perform the dimension reduction with each
algorithm and each dataset is recorded in the lower
right of each plot.
Things to note about the datasets:
- Blobs: A set of five gaussian blobs in 10 dimensional
space. This should be a prototypical example
of something that should clearly separate
even in a reduced dimension space.
- Iris: a classic small dataset with one distinct class
and two classes that are not clearly separated.
- Digits: handwritten digits -- ideally different digit
classes should form distinct groups. Due to
the nature of handwriting digits may have several
forms (crossed or uncrossed sevens, capped or
straight line oes, etc.)
- Wine: wine characteristics ideally used for a toy
regression. Ultimately the data is essentially
one dimensional in nature.
- Swiss Roll: data is essentially a rectangle, but
has been "rolled up" like a swiss roll
in three dimensional space. Ideally a
dimension reduction technique should
be able to "unroll" it. The data
has been coloured according to one dimension
of the rectangle, so should form
a rectangle of smooth color variation.
- Sphere: the two dimensional surface of a three
dimensional sphere. This cannot be represented
accurately in two dimensions without tearing.
The sphere has been coloured with hue around
the equator and black to white from the south
to north pole.
"""
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import time
from sklearn import datasets, decomposition, manifold, preprocessing
from colorsys import hsv_to_rgb
import umap
import gtsne
sns.set(context="paper", style="white")
blobs, blob_labels = datasets.make_blobs(
n_samples=500, n_features=10, centers=5, random_state=42
)
iris = datasets.load_iris()
digits = datasets.load_digits(n_class=10)
wine = datasets.load_wine()
swissroll, swissroll_labels = datasets.make_swiss_roll(
n_samples=1000, noise=0.1, random_state=42
)
sphere = np.random.normal(size=(600, 3))
sphere = preprocessing.normalize(sphere)
sphere_hsv = np.array(
[
(
(np.arctan2(c[1], c[0]) + np.pi) / (2 * np.pi),
np.abs(c[2]),
min((c[2] + 1.1), 1.0),
)
for c in sphere
]
)
sphere_colors = np.array([hsv_to_rgb(*c) for c in sphere_hsv])
reducers = [
(decomposition.PCA, {}),
(gtsne.gtsne, {}),
(manifold.TSNE, {"perplexity": 30}),
(manifold.Isomap, {"n_neighbors": 30}),
# (manifold.MDS, {}),
(umap.UMAP, {"n_neighbors": 30, "min_dist": 0.3}),
]
test_data = [
(blobs, blob_labels),
(iris.data, iris.target),
(digits.data, digits.target),
(wine.data, wine.target),
(swissroll, swissroll_labels),
(sphere, sphere_colors),
]
dataset_names = ["Blobs", "Iris",
"Digits",
"Wine", "Swiss Roll", "Sphere"]
n_rows = len(test_data)
n_cols = len(reducers)
ax_index = 1
ax_list = []
# plt.figure(figsize=(9 * 2 + 3, 12.5))
plt.figure(figsize=(10, 8))
plt.subplots_adjust(
left=0.02, right=0.98, bottom=0.001, top=0.96, wspace=0.05, hspace=0.01
)
for data, labels in test_data:
for reducer, args in reducers:
start_time = time.time()
if reducer == gtsne.gtsne:
embedding = gtsne.gtsne(data, d=2, K = None, theta=0.5,alpha=1e-2,beta=5e-2,verbose=True )
else:
embedding = reducer(n_components=2, **args).fit_transform(data)
elapsed_time = time.time() - start_time
ax = plt.subplot(n_rows, n_cols, ax_index)
if isinstance(labels[0], tuple):
ax.scatter(*embedding.T, s=10, c=labels, alpha=0.5)
else:
ax.scatter(*embedding.T, s=10, c=labels, cmap="Spectral", alpha=0.5)
ax.text(
0.99,
0.01,
"{:.2f} s".format(elapsed_time),
transform=ax.transAxes,
size=14,
horizontalalignment="right",
)
ax_list.append(ax)
ax_index += 1
plt.setp(ax_list, xticks=[], yticks=[])
for i in np.arange(n_rows) * n_cols:
ax_list[i].set_ylabel(dataset_names[i // n_cols], size=16)
for i in range(n_cols):
if reducers[i][0] != gtsne.gtsne:
ax_list[i].set_xlabel(repr(reducers[i][0]()).split("(")[0], size=16)
ax_list[i].xaxis.set_label_position("top")
else:
ax_list[i].set_xlabel("GTSNE", size=16)
ax_list[i].xaxis.set_label_position("top")
plt.tight_layout()
plt.show()
| [
"colorsys.hsv_to_rgb",
"numpy.arctan2",
"numpy.arange",
"seaborn.set",
"sklearn.datasets.make_blobs",
"gtsne.gtsne",
"sklearn.datasets.load_iris",
"numpy.random.normal",
"numpy.abs",
"time.time",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"matplotlib.pyplot.setp",
"skle... | [((2141, 2180), 'seaborn.set', 'sns.set', ([], {'context': '"""paper"""', 'style': '"""white"""'}), "(context='paper', style='white')\n", (2148, 2180), True, 'import seaborn as sns\n'), ((2203, 2280), 'sklearn.datasets.make_blobs', 'datasets.make_blobs', ([], {'n_samples': '(500)', 'n_features': '(10)', 'centers': '(5)', 'random_state': '(42)'}), '(n_samples=500, n_features=10, centers=5, random_state=42)\n', (2222, 2280), False, 'from sklearn import datasets, decomposition, manifold, preprocessing\n'), ((2294, 2314), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (2312, 2314), False, 'from sklearn import datasets, decomposition, manifold, preprocessing\n'), ((2324, 2356), 'sklearn.datasets.load_digits', 'datasets.load_digits', ([], {'n_class': '(10)'}), '(n_class=10)\n', (2344, 2356), False, 'from sklearn import datasets, decomposition, manifold, preprocessing\n'), ((2364, 2384), 'sklearn.datasets.load_wine', 'datasets.load_wine', ([], {}), '()\n', (2382, 2384), False, 'from sklearn import datasets, decomposition, manifold, preprocessing\n'), ((2415, 2483), 'sklearn.datasets.make_swiss_roll', 'datasets.make_swiss_roll', ([], {'n_samples': '(1000)', 'noise': '(0.1)', 'random_state': '(42)'}), '(n_samples=1000, noise=0.1, random_state=42)\n', (2439, 2483), False, 'from sklearn import datasets, decomposition, manifold, preprocessing\n'), ((2499, 2530), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(600, 3)'}), '(size=(600, 3))\n', (2515, 2530), True, 'import numpy as np\n'), ((2540, 2571), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['sphere'], {}), '(sphere)\n', (2563, 2571), False, 'from sklearn import datasets, decomposition, manifold, preprocessing\n'), ((3499, 3526), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (3509, 3526), True, 'import matplotlib.pyplot as plt\n'), ((3527, 3624), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.02)', 'right': '(0.98)', 'bottom': '(0.001)', 'top': '(0.96)', 'wspace': '(0.05)', 'hspace': '(0.01)'}), '(left=0.02, right=0.98, bottom=0.001, top=0.96, wspace=\n 0.05, hspace=0.01)\n', (3546, 3624), True, 'import matplotlib.pyplot as plt\n'), ((4507, 4546), 'matplotlib.pyplot.setp', 'plt.setp', (['ax_list'], {'xticks': '[]', 'yticks': '[]'}), '(ax_list, xticks=[], yticks=[])\n', (4515, 4546), True, 'import matplotlib.pyplot as plt\n'), ((4949, 4967), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4965, 4967), True, 'import matplotlib.pyplot as plt\n'), ((4968, 4978), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4976, 4978), True, 'import matplotlib.pyplot as plt\n'), ((4557, 4574), 'numpy.arange', 'np.arange', (['n_rows'], {}), '(n_rows)\n', (4566, 4574), True, 'import numpy as np\n'), ((2801, 2815), 'colorsys.hsv_to_rgb', 'hsv_to_rgb', (['*c'], {}), '(*c)\n', (2811, 2815), False, 'from colorsys import hsv_to_rgb\n'), ((3713, 3724), 'time.time', 'time.time', ([], {}), '()\n', (3722, 3724), False, 'import time\n'), ((4014, 4051), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_rows', 'n_cols', 'ax_index'], {}), '(n_rows, n_cols, ax_index)\n', (4025, 4051), True, 'import matplotlib.pyplot as plt\n'), ((2683, 2695), 'numpy.abs', 'np.abs', (['c[2]'], {}), '(c[2])\n', (2689, 2695), True, 'import numpy as np\n'), ((3784, 3862), 'gtsne.gtsne', 'gtsne.gtsne', (['data'], {'d': '(2)', 'K': 'None', 'theta': '(0.5)', 'alpha': '(0.01)', 'beta': '(0.05)', 'verbose': '(True)'}), '(data, d=2, K=None, theta=0.5, alpha=0.01, beta=0.05, verbose=True)\n', (3795, 3862), False, 'import gtsne\n'), ((3976, 3987), 'time.time', 'time.time', ([], {}), '()\n', (3985, 3987), False, 'import time\n'), ((2624, 2646), 'numpy.arctan2', 'np.arctan2', (['c[1]', 'c[0]'], {}), '(c[1], c[0])\n', (2634, 2646), True, 'import numpy as np\n')] |
import pre_ml
import numpy as np
import re
def import_data(data_name):
with open("samples/datasets/"+data_name+"_data.csv","r", encoding="utf-8") as f_data_in: #waveform_data
lines = f_data_in.readlines()
# print(type(lines))
dataset = list()
for line in lines:
line = re.sub("\s+", ",", line.strip())
parts = line.split(",")
dataset.append(parts)
dataset = np.array(dataset, dtype=np.float64)
# print("dataset :")
# dataset = dataset.T
# print(dataset.shape)
# print(dataset)
with open("samples/datasets/"+data_name+"_targets.csv","r", encoding="utf-8") as f_target_in:
lines = f_target_in.readlines()
targets = list()
for line in lines:
targets.append(line)
targets = np.array(targets, dtype=np.int64)
# print("targets :")
# targets = targets.reshape(len(targets), 1) # to make it vertical if it is needed
# print(targets.shape)
# print(targets)
return (dataset, targets)
def run_sample():
(x, y) = import_data("waveform")
solution = pre_ml.baco(x, y, t_percent=40, heu_meth="method_1", ml_alg="knn1", iter_num=10)
pre_ml.draw_baco(solution)
if __name__ == "__main__":
# execute only if run as a script
run_sample()
| [
"pre_ml.baco",
"numpy.array",
"pre_ml.draw_baco"
] | [((1242, 1327), 'pre_ml.baco', 'pre_ml.baco', (['x', 'y'], {'t_percent': '(40)', 'heu_meth': '"""method_1"""', 'ml_alg': '"""knn1"""', 'iter_num': '(10)'}), "(x, y, t_percent=40, heu_meth='method_1', ml_alg='knn1', iter_num=10\n )\n", (1253, 1327), False, 'import pre_ml\n'), ((1328, 1354), 'pre_ml.draw_baco', 'pre_ml.draw_baco', (['solution'], {}), '(solution)\n', (1344, 1354), False, 'import pre_ml\n'), ((476, 511), 'numpy.array', 'np.array', (['dataset'], {'dtype': 'np.float64'}), '(dataset, dtype=np.float64)\n', (484, 511), True, 'import numpy as np\n'), ((907, 940), 'numpy.array', 'np.array', (['targets'], {'dtype': 'np.int64'}), '(targets, dtype=np.int64)\n', (915, 940), True, 'import numpy as np\n')] |
from itertools import takewhile, product
import numpy as np
import string
# used for doc testing
def letters_25():
"""
>>> letters_25()
array([['A', 'B', 'C', 'D', 'E'],
['F', 'G', 'H', 'I', 'J'],
['K', 'L', 'M', 'N', 'O'],
['P', 'Q', 'R', 'S', 'T'],
['U', 'W', 'X', 'Y', 'Z']], dtype='<U1')
"""
return np.array(sorted(set(string.ascii_uppercase).difference("V"))).reshape(5, 5)
def row(point):
"""
Given a point, return the row of the point.
>>> row((0, 1))
0
>>> row((1, 0))
1
"""
return point[0]
def width(array):
return array.shape[1]
def height(array):
return array.shape[0]
def col(point):
"""
Given a point, return the column of the point.
>>> col((0, 1))
1
>>> col((1, 0))
0
"""
return point[1]
# used for doc testing
def value_at(point, array):
"""
Given a point and a 2D array, return the value at the point.
>>> value_at((0, 0), np.array([[1, 2, 3], [4, 5, 6]]))
1
"""
return array[row(point), col(point)]
def area_from_shape(shape):
"""
Given a shape, return the area of the shape.
>>> area_from_shape((2, 3))
6
"""
return row(shape) * col(shape)
def area(array):
"""
Given a 2D array, return the area of the array.
>>> area(np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]))
9
"""
return area_from_shape(array.shape)
def points(array):
"""
Return all points in the 2d array.
>>> list(points(np.array([[1, 2, 3], [4, 5, 6]])))
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]
"""
return product(range(array.shape[0]), range(array.shape[1]))
def can_extend_right(base, p, slice):
"""
Given a slice of an array starting at point p, return whether the slice can be extended to the right.
>>> arr = letters_25()
>>> point = (2,4)
>>> value_at(point, arr)
'O'
>>> can_extend_right(arr, point, arr[2:4, 4:5])
False
>>> can_extend_right(arr, (2,3), arr[2:4, 3:4])
True
"""
return col(p) + width(slice) < width(base)
def can_extend_down(base, p, slice):
"""
Given a slice of an array starting at point p, return whether the slice can be extended downwards.
>>> arr = letters_25()
>>> point = (2,4)
>>> value_at(point, arr)
'O'
>>> can_extend_down(arr, point, arr[2:5, 4:5])
False
>>> can_extend_down(arr, point, arr[2:4, 4:5])
True
"""
return row(p) + height(slice) < height(base)
def extend_right(base, p, slice):
"""
Given a slice of an array starting at point p, return the slice extended to the right.
>>> arr = letters_25()
>>> point = (2,4)
>>> value_at(point, arr)
'O'
>>> extend_right(arr, point, arr[2:4, 4:5])
>>> extend_right(arr, (2,3), arr[2:4, 3:4])
array([['N', 'O'],
['S', 'T']], dtype='<U1')
"""
if not can_extend_right(base, p, slice):
return None
return base[row(p) : row(p) + height(slice), col(p) : col(p) + width(slice) + 1]
def extend_down(base, p, slice):
"""
Given a slice of an array starting at point p, return the slice extended downwards.
>>> arr = letters_25()
>>> point = (2,4)
>>> value_at(point, arr)
'O'
>>> extend_down(arr, point, arr[2:5, 4:5])
>>> extend_down(arr, point, arr[2:4, 4:5])
array([['O'],
['T'],
['Z']], dtype='<U1')
"""
if not can_extend_down(base, p, slice):
return None
return base[row(p) : row(p) + height(slice) + 1, col(p) : col(p) + width(slice)]
def extend_diagonally(base, p, slice):
"""
Given a slice of an array starting at point p, return the slice extended diagonally.
>>> arr = letters_25()
>>> point = (2,4)
>>> value_at(point, arr)
'O'
>>> extend_diagonally(arr, point, arr[2:5, 4:5])
>>> extend_diagonally(arr, (2,2), arr[2:4, 2:4])
array([['M', 'N', 'O'],
['R', 'S', 'T'],
['X', 'Y', 'Z']], dtype='<U1')
"""
if not (can_extend_down(base, p, slice) and can_extend_right(base, p, slice)):
return None
return base[row(p) : row(p) + height(slice) + 1, col(p) : col(p) + width(slice) + 1]
def point_to_slice(base, p):
"""
Given a point in the array, return the slice of the array that extends from the point.
>>> arr = letters_25()
>>> point = (2,3)
>>> value_at(point, arr)
'N'
>>> point_to_slice(arr, point)
array([['N']], dtype='<U1')
"""
return base[row(p) : row(p) + 1, col(p) : col(p) + 1]
def all_ones(slice):
"""
Given a slice of an array, return whether the slice is all ones.
>>> arr = np.ones((3,3))
>>> all_ones(arr[:,:])
True
>>> arr = np.zeros((3,3))
>>> all_ones(arr[:,:])
False
"""
return slice.all()
def array_to_point_slices(base):
"""
generate a list of (point, point slice) for every point in the base array.
"""
return ((p, point_to_slice(base, p)) for p in points(base))
def rectangles_of_one(base):
"""
Given a base array, generate list of all (point, rectangle) where the rectangle is all ones.
"""
stack = []
for p, slice in array_to_point_slices(base):
stack.append((p, slice))
while stack:
p, slice = stack.pop()
if all_ones(slice):
yield p, slice
if can_extend_right(base, p, slice):
stack.append((p, extend_right(base, p, slice)))
if can_extend_down(base, p, slice):
stack.append((p, extend_down(base, p, slice)))
if can_extend_right(base, p, slice) and can_extend_down(base, p, slice):
stack.append((p, extend_diagonally(base, p, slice)))
def max_point_and_rectangle_of_one(base):
"""
Given a base array, return the largest rectangle of one and its point.
>>> max_point_and_rectangle_of_one(np.ones((3,3)))[1].shape
(3, 3)
>>> max_point_and_rectangle_of_one(np.zeros((3,3)))[1].shape
(0, 0)
"""
return max(
rectangles_of_one(base),
key=lambda pair: area(pair[1]),
default=(None, np.ones((0, 0))),
)
def largest_rectangle_(array):
"""
Given a 2D array of 0s and 1s, return the size of the largest rectangle of 1s in the array.
Example (javascript version):
let islands = [
0,0,0,1,0
1,1,0,0,1
1,1,0,0,0
0,0,1,0,0
]
$ largestRect(islands)
$ '2x2'
>>> islands = np.array([0,0,0,1,0,1,1,0,0,1,1,1,0,0,0,0,0,1,0,0]).reshape(4,5)
>>> islands
array([[0, 0, 0, 1, 0],
[1, 1, 0, 0, 1],
[1, 1, 0, 0, 0],
[0, 0, 1, 0, 0]])
>>> largest_rectangle_(islands)
(2, 2)
>>> largest_rectangle_(np.zeros((5,10)))
(0, 0)
>>> largest_rectangle_(np.ones((5,10)))
(5, 10)
>>> a24 = np.array([1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1])
>>> a24.reshape(8,3)
array([[1, 1, 0],
[1, 1, 0],
[1, 0, 1],
[1, 0, 1],
[0, 1, 1],
[0, 0, 1],
[1, 0, 0],
[1, 0, 1]])
>>> area_from_shape(largest_rectangle_(a24.reshape(8, 3)))
4
>>> area_from_shape(largest_rectangle_(a24.reshape(4, 6)))
3
>>> area_from_shape(largest_rectangle_(a24.reshape(6, 4)))
4
>>> a60 = np.array([[1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1]])
>>> a60.reshape(6, 10)
array([[1, 0, 0, 1, 0, 0, 0, 0, 0, 1],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 0, 0, 0, 1, 1, 1],
[1, 0, 1, 0, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 0, 1, 0, 1]])
>>> area_from_shape(largest_rectangle_(a60.reshape(6, 10)))
6
>>> area_from_shape(largest_rectangle_(a60.reshape(10, 6)))
4
"""
return max_point_and_rectangle_of_one(array)[1].shape
def largest_rectangle(array):
"""
The string-based API as defined in the problem statement.
>>> islands = np.array([0,0,0,1,0,1,1,0,0,1,1,1,0,0,0,0,0,1,0,0]).reshape(4,5)
>>> largest_rectangle(islands)
'2x2'
"""
shape = largest_rectangle_(array)
return f"{shape[0]}x{shape[1]}"
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"doctest.testmod",
"numpy.ones"
] | [((8393, 8410), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (8408, 8410), False, 'import doctest\n'), ((6135, 6150), 'numpy.ones', 'np.ones', (['(0, 0)'], {}), '((0, 0))\n', (6142, 6150), True, 'import numpy as np\n')] |
"""Script to use ordinary least squares and ridge regression with stochastic gradient descend."""
import numpy as np
def gradient_RR_OLS(y, X, beta, lmbda):
"""
Define the gradient for ordinary least squares and ridge regression
:param y: observed values
:param X: design matrix
:param beta: parameter vector/ regression coefficients
:param lmbda: When lam=0 it is OLS and otherwise ridge regression.
:return: gradient of cost function
"""
n = len(y)
gradient = (-2 / n) * X.T @ (y - X @ beta) + 2 * lmbda * beta
return gradient
def sigmoid_func(z):
"""Calculate sigmoid function."""
sigmoid = np.zeros((z.shape[0], 1))
for elem_in, elem in enumerate(z):
if -700 <= elem <= 700:
sigmoid[elem_in, 0] = 1 / (1 + np.exp(-z[elem_in, 0]))
elif elem > 700:
sigmoid[elem_in, 0] = 1 # exp(-z) -> 0 if z -> inf
else:
sigmoid[elem_in, 0] = 0 # exp(-z) -> inf if z -> -inf and 1/inf = 0
return sigmoid
def gradient_LR(y, X, beta, lmbda):
"""
Define the gradient for logistic regression
:param y: observed values
:param X: design matrix
:param beta: parameter vector/ regression coefficients
:param lmbda: L2 regularization parameter
:return: gradient of cost function
"""
gradient = (-1) * X.T @ (y - sigmoid_func(X @ beta)) - lmbda*beta
return gradient
def stochastic_gradient_descent_method(gradient, y, X, start, num_epoch, learn_rate, num_min_batch, lmbda):
"""
Define gradient descent method to find optimal beta for given gradient
:param gradient: gradient of cost function
:param y: observed values
:param X: design matrix
:param start: initial values
:param num_epoch: number of epochs
:param learn_rate: learn rate
:param num_min_batch: size of mini batches
:param lmbda: When lam=0 it is OLS and otherwise ridge regression.
:return: beta
"""
vector = start.reshape(start.shape[0], 1)
num_observations = X.shape[0]
for _ in range(num_epoch):
for _ in range(int(num_observations / num_min_batch)):
batch_index = np.random.randint(num_observations, size=num_min_batch)
X_batch = X[batch_index, :]
y_batch = y[batch_index]
descend = learn_rate * gradient(y=y_batch, X=X_batch, beta=vector, lmbda=lmbda)
# Stop if all values are smaller or equal than machine precision
if np.any(descend) <= np.finfo(float).eps:
break
vector -= descend
return vector
| [
"numpy.any",
"numpy.exp",
"numpy.zeros",
"numpy.random.randint",
"numpy.finfo"
] | [((651, 676), 'numpy.zeros', 'np.zeros', (['(z.shape[0], 1)'], {}), '((z.shape[0], 1))\n', (659, 676), True, 'import numpy as np\n'), ((2161, 2216), 'numpy.random.randint', 'np.random.randint', (['num_observations'], {'size': 'num_min_batch'}), '(num_observations, size=num_min_batch)\n', (2178, 2216), True, 'import numpy as np\n'), ((2478, 2493), 'numpy.any', 'np.any', (['descend'], {}), '(descend)\n', (2484, 2493), True, 'import numpy as np\n'), ((791, 813), 'numpy.exp', 'np.exp', (['(-z[elem_in, 0])'], {}), '(-z[elem_in, 0])\n', (797, 813), True, 'import numpy as np\n'), ((2497, 2512), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (2505, 2512), True, 'import numpy as np\n')] |
from pathlib import Path
import keras
from keras.datasets import cifar10, cifar100, mnist
from keras.utils import to_categorical # Does One-hot-encoding
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from ..utils.data import consolidate_bins, crop_center
def load_data(load_cat=False, **config):
if config["dataset"] == "mnist":
(x_train, y_train), (x_test, y_test) = load_mnist(**config)
elif config["dataset"] == "cifar10":
(x_train, y_train), (x_test, y_test) = load_cifar10(**config)
elif config["dataset"] == "cifar100":
(x_train, y_train), (x_test, y_test) = load_cifar100(**config)
elif "sdss" in config["dataset"]:
if load_cat:
(
(x_train, y_train, vals_train, z_spec_train, cat_train),
(x_dev, y_dev, vals_dev, z_spec_dev, cat_dev),
(x_test, y_test, vals_test, z_spec_test, cat_test),
) = load_sdss(load_cat=load_cat, **config)
return (
(x_train, y_train, vals_train, z_spec_train, cat_train),
(x_dev, y_dev, vals_dev, z_spec_dev, cat_dev),
(x_test, y_test, vals_test, z_spec_test, cat_test),
)
else:
(
(x_train, y_train, vals_train, z_spec_train),
(x_dev, y_dev, vals_dev, z_spec_dev),
(x_test, y_test, vals_test, z_spec_test),
) = load_sdss(load_cat=load_cat, **config)
return (
(x_train, y_train, vals_train, z_spec_train),
(x_dev, y_dev, vals_dev, z_spec_dev),
(x_test, y_test, vals_test, z_spec_test),
)
else:
raise ValueError(
f"`{config['dataset']}` is not one of the valid datasets: "
"'mnist', 'cifar10', and 'sdss'."
)
return (x_train, y_train), (x_test, y_test)
def load_mnist(num_class, **params):
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype("float32") / 255.0
x_test = x_test.reshape(-1, 28, 28, 1).astype("float32") / 255.0
y_train = to_categorical(y_train.astype("float32"), num_class)
y_test = to_categorical(y_test.astype("float32"), num_class)
return (x_train, y_train), (x_test, y_test)
def load_cifar10(num_class, **params):
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
y_train = to_categorical(y_train, num_class)
y_test = to_categorical(y_test, num_class)
return (x_train, y_train), (x_test, y_test)
def load_cifar100(num_class, **params):
(x_train, y_train), (x_test, y_test) = cifar100.load_data(label_mode="fine")
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
y_train = to_categorical(y_train, num_class)
y_test = to_categorical(y_test, num_class)
return (x_train, y_train), (x_test, y_test)
def load_sdss(
num_class,
path_data,
frac_train=0.8,
frac_dev=0.1,
random_state=200,
image_scale=10.0,
load_cat=False,
**params,
):
filename = f"{params['dataset']}.npz"
paths = [
Path(path_data),
Path("/bgfs/jnewman/bid13/photoZ/data/pasquet2019"),
Path("/Users/andrews/projects/photoz/data/pasquet2019"),
Path("/home/biprateep/Documents/photozCapsNet/photozCapsNet"),
]
data = None
for path in paths:
try:
data = np.load(str(path / filename), allow_pickle=True)
break
except FileNotFoundError:
continue
if data is None:
raise FileNotFoundError
n_gal = len(data["labels"])
np.random.seed(random_state)
indices = np.random.permutation(n_gal)
ind_split_train = int(np.ceil(frac_train * n_gal))
ind_split_dev = ind_split_train + int(np.ceil(frac_dev * n_gal))
# ind_bands = {"u": 0, "g": 1, "r": 2, "i": 3, "z": 4}
# bands = params.get("bands", ("u", "g", "r", "i", "z"))
# channels = np.array([ind_bands[band] for band, ind_band in zip(bands, ind_bands)])
# slice_y, slice_x = crop_center(data["cube"].shape[1:3], params["image_shape"])
# images = data["cube"][:, slice(*slice_y), slice(*slice_x), channels]
# labels = consolidate_bins(data["labels"], n_bins_in=num_class, n_bins_out=num_class)
images = data["cube"]
labels = data["labels"]
labels = keras.utils.to_categorical(labels, num_classes=num_class)
z_spec = data["z"]
cat = data["cat"]
vals = pd.DataFrame()
vals["u-g"] = (cat["modelMag_u"] - cat["extinction_u"]) - (
cat["modelMag_g"] - cat["extinction_g"]
)
vals["g-r"] = (cat["modelMag_g"] - cat["extinction_g"]) - (
cat["modelMag_r"] - cat["extinction_r"]
)
vals["r-i"] = (cat["modelMag_r"] - cat["extinction_r"]) - (
cat["modelMag_i"] - cat["extinction_i"]
)
vals["i-z"] = (cat["modelMag_i"] - cat["extinction_i"]) - (
cat["modelMag_z"] - cat["extinction_z"]
)
vals["EBV"] = cat["EBV"]
vals["r"] = cat["cModelMag_r"] - cat["extinction_r"]
scaler = StandardScaler()
vals = scaler.fit_transform(np.array(vals))
if params["logistic"]:
z_spec = np.log((z_spec - params["z_min"]) / (params["z_max"] - z_spec))
x_train = images[indices[:ind_split_train]] / float(image_scale)
x_dev = images[indices[ind_split_train:ind_split_dev]] / float(image_scale)
x_test = images[indices[ind_split_dev:]] / float(image_scale)
y_train = labels[indices[:ind_split_train]]
y_dev = labels[indices[ind_split_train:ind_split_dev]]
y_test = labels[indices[ind_split_dev:]]
z_spec_train = z_spec[indices[:ind_split_train]]
z_spec_dev = z_spec[indices[ind_split_train:ind_split_dev]]
z_spec_test = z_spec[indices[ind_split_dev:]]
vals_train = vals[indices[:ind_split_train]]
vals_dev = vals[indices[ind_split_train:ind_split_dev]]
vals_test = vals[indices[ind_split_dev:]]
if load_cat == False:
return (
(x_train, y_train, vals_train, z_spec_train),
(x_dev, y_dev, vals_dev, z_spec_dev),
(x_test, y_test, vals_test, z_spec_test),
)
if load_cat == True:
cat_train = cat[indices[:ind_split_train]]
cat_dev = cat[indices[ind_split_train:ind_split_dev]]
cat_test = cat[indices[ind_split_dev:]]
return (
(x_train, y_train, vals_train, z_spec_train, cat_train),
(x_dev, y_dev, vals_dev, z_spec_dev, cat_dev),
(x_test, y_test, vals_test, z_spec_test, cat_test),
)
| [
"numpy.ceil",
"keras.datasets.cifar10.load_data",
"keras.datasets.mnist.load_data",
"pathlib.Path",
"keras.datasets.cifar100.load_data",
"numpy.log",
"keras.utils.to_categorical",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.random.perm... | [((2009, 2026), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (2024, 2026), False, 'from keras.datasets import cifar10, cifar100, mnist\n'), ((2431, 2450), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (2448, 2450), False, 'from keras.datasets import cifar10, cifar100, mnist\n'), ((2580, 2614), 'keras.utils.to_categorical', 'to_categorical', (['y_train', 'num_class'], {}), '(y_train, num_class)\n', (2594, 2614), False, 'from keras.utils import to_categorical\n'), ((2628, 2661), 'keras.utils.to_categorical', 'to_categorical', (['y_test', 'num_class'], {}), '(y_test, num_class)\n', (2642, 2661), False, 'from keras.utils import to_categorical\n'), ((2795, 2832), 'keras.datasets.cifar100.load_data', 'cifar100.load_data', ([], {'label_mode': '"""fine"""'}), "(label_mode='fine')\n", (2813, 2832), False, 'from keras.datasets import cifar10, cifar100, mnist\n'), ((2962, 2996), 'keras.utils.to_categorical', 'to_categorical', (['y_train', 'num_class'], {}), '(y_train, num_class)\n', (2976, 2996), False, 'from keras.utils import to_categorical\n'), ((3010, 3043), 'keras.utils.to_categorical', 'to_categorical', (['y_test', 'num_class'], {}), '(y_test, num_class)\n', (3024, 3043), False, 'from keras.utils import to_categorical\n'), ((3827, 3855), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (3841, 3855), True, 'import numpy as np\n'), ((3870, 3898), 'numpy.random.permutation', 'np.random.permutation', (['n_gal'], {}), '(n_gal)\n', (3891, 3898), True, 'import numpy as np\n'), ((4551, 4608), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['labels'], {'num_classes': 'num_class'}), '(labels, num_classes=num_class)\n', (4577, 4608), False, 'import keras\n'), ((4665, 4679), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4677, 4679), True, 'import pandas as pd\n'), ((5252, 5268), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5266, 5268), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3322, 3337), 'pathlib.Path', 'Path', (['path_data'], {}), '(path_data)\n', (3326, 3337), False, 'from pathlib import Path\n'), ((3347, 3398), 'pathlib.Path', 'Path', (['"""/bgfs/jnewman/bid13/photoZ/data/pasquet2019"""'], {}), "('/bgfs/jnewman/bid13/photoZ/data/pasquet2019')\n", (3351, 3398), False, 'from pathlib import Path\n'), ((3408, 3463), 'pathlib.Path', 'Path', (['"""/Users/andrews/projects/photoz/data/pasquet2019"""'], {}), "('/Users/andrews/projects/photoz/data/pasquet2019')\n", (3412, 3463), False, 'from pathlib import Path\n'), ((3473, 3534), 'pathlib.Path', 'Path', (['"""/home/biprateep/Documents/photozCapsNet/photozCapsNet"""'], {}), "('/home/biprateep/Documents/photozCapsNet/photozCapsNet')\n", (3477, 3534), False, 'from pathlib import Path\n'), ((3925, 3952), 'numpy.ceil', 'np.ceil', (['(frac_train * n_gal)'], {}), '(frac_train * n_gal)\n', (3932, 3952), True, 'import numpy as np\n'), ((5301, 5315), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (5309, 5315), True, 'import numpy as np\n'), ((5362, 5425), 'numpy.log', 'np.log', (["((z_spec - params['z_min']) / (params['z_max'] - z_spec))"], {}), "((z_spec - params['z_min']) / (params['z_max'] - z_spec))\n", (5368, 5425), True, 'import numpy as np\n'), ((3996, 4021), 'numpy.ceil', 'np.ceil', (['(frac_dev * n_gal)'], {}), '(frac_dev * n_gal)\n', (4003, 4021), True, 'import numpy as np\n')] |
import numpy as np
import pytest
import pyximport; pyximport.install()
from CRADLE.correctbiasutils.cython import coalesceSections
@pytest.mark.parametrize("starts,values,analysisEnd,stepSize,sectionCount,startEntries,endEntries,valueEntries", [
(
np.arange(0, 0),
np.array([]),
1,
1,
0,
[],
[],
[]
),
(
np.arange(0, 10),
np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
11,
1,
1,
[0],
[10],
[1.0]
),
(
np.arange(0, 30, 3),
np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
31,
3,
1,
[0],
[30],
[1.0]
),
(
np.arange(10, 31),
np.array([1, 2, np.nan, np.nan, 1, 1, 1, 2, 2, np.nan, 0, 1, 1, np.nan, 9, 8, 7, 7, 6, 5, 4]),
32,
1,
12,
[10, 11, 14, 17, 20, 21, 24, 25, 26, 28, 29, 30],
[11, 12, 17, 19, 21, 23, 25, 26, 28, 29, 30, 31],
[1.0, 2.0, 1.0, 2.0, 0.0, 1.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0]
),
(
np.arange(10, 52, 2),
np.array([1, 2, np.nan, np.nan, 1, 1, 1, 2, 2, np.nan, 0, 1, 1, np.nan, 9, 8, 7, 7, 6, 5, 4]),
53,
2,
12,
[10, 12, 18, 24, 30, 32, 38, 40, 42, 46, 48, 50],
[12, 14, 24, 28, 32, 36, 40, 42, 46, 48, 50, 52],
[1.0, 2.0, 1.0, 2.0, 0.0, 1.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0]
),
])
def testCoalesceSections(starts, values, analysisEnd, stepSize, sectionCount, startEntries, endEntries, valueEntries):
idx = np.where(np.isnan(values) == False)[0]
starts = starts[idx]
values = values[idx]
result = coalesceSections(starts, values, analysisEnd, stepSize)
assert result[0] == sectionCount
assert result[1] == startEntries
assert result[2] == endEntries
assert result[3] == valueEntries
| [
"CRADLE.correctbiasutils.cython.coalesceSections",
"numpy.array",
"pyximport.install",
"numpy.isnan",
"numpy.arange"
] | [((51, 70), 'pyximport.install', 'pyximport.install', ([], {}), '()\n', (68, 70), False, 'import pyximport\n'), ((1389, 1444), 'CRADLE.correctbiasutils.cython.coalesceSections', 'coalesceSections', (['starts', 'values', 'analysisEnd', 'stepSize'], {}), '(starts, values, analysisEnd, stepSize)\n', (1405, 1444), False, 'from CRADLE.correctbiasutils.cython import coalesceSections\n'), ((253, 268), 'numpy.arange', 'np.arange', (['(0)', '(0)'], {}), '(0, 0)\n', (262, 268), True, 'import numpy as np\n'), ((272, 284), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (280, 284), True, 'import numpy as np\n'), ((327, 343), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (336, 343), True, 'import numpy as np\n'), ((347, 387), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n', (355, 387), True, 'import numpy as np\n'), ((437, 456), 'numpy.arange', 'np.arange', (['(0)', '(30)', '(3)'], {}), '(0, 30, 3)\n', (446, 456), True, 'import numpy as np\n'), ((460, 500), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n', (468, 500), True, 'import numpy as np\n'), ((550, 567), 'numpy.arange', 'np.arange', (['(10)', '(31)'], {}), '(10, 31)\n', (559, 567), True, 'import numpy as np\n'), ((571, 669), 'numpy.array', 'np.array', (['[1, 2, np.nan, np.nan, 1, 1, 1, 2, 2, np.nan, 0, 1, 1, np.nan, 9, 8, 7, 7, \n 6, 5, 4]'], {}), '([1, 2, np.nan, np.nan, 1, 1, 1, 2, 2, np.nan, 0, 1, 1, np.nan, 9, \n 8, 7, 7, 6, 5, 4])\n', (579, 669), True, 'import numpy as np\n'), ((859, 879), 'numpy.arange', 'np.arange', (['(10)', '(52)', '(2)'], {}), '(10, 52, 2)\n', (868, 879), True, 'import numpy as np\n'), ((883, 981), 'numpy.array', 'np.array', (['[1, 2, np.nan, np.nan, 1, 1, 1, 2, 2, np.nan, 0, 1, 1, np.nan, 9, 8, 7, 7, \n 6, 5, 4]'], {}), '([1, 2, np.nan, np.nan, 1, 1, 1, 2, 2, np.nan, 0, 1, 1, np.nan, 9, \n 8, 7, 7, 6, 5, 4])\n', (891, 981), True, 'import numpy as np\n'), ((1304, 1320), 'numpy.isnan', 'np.isnan', (['values'], {}), '(values)\n', (1312, 1320), True, 'import numpy as np\n')] |
import numpy as np
import collections
def raw_frequency(term, doc):
count = 0
if isinstance(doc, str):
for word in doc.split():
if term == word:
count = count + 1
return count
def get_most_freq_term( doc):
doc_freq = dict([word, raw_frequency(word, doc)] for word in doc.split())
value, count = collections.Counter(doc_freq).most_common(1)[0]
return count
def compute_tf( term, doc):
if (raw_frequency(term, doc) > 0):
return (1 + np.log10(raw_frequency(term, doc))) / (1 + np.log10(get_most_freq_term(doc)))
else:
return 0
def compute_idf(term, N, index):
return np.log10(N / get_docs_count(index, term))
def get_docs_count(index, term):
return len(index[term])
def isInDoc( term, docID, index):
if docID in index[term]:
return True
return False
def compute_tf_idf(term, doc, docID, N):
if (isInDoc(term, docID) == False):
return 0
else:
return compute_tf(term, doc) * compute_idf(term, N)
##Vectorization
def vectorize(query, index, index_values):
vec = np.zeros((len(index)), dtype=int)
for term in query:
ind = index_values.index(term)
vec[ind] = 1
return vec.tolist()
##Vector normalization
def get_euclidean_norm( vector):
vector_sum = np.sum(np.square(vector))
return np.sqrt(vector_sum)
def normalize_vector(vector):
euclid_norm = get_euclidean_norm(vector)
vec = np.array(vector)
if euclid_norm == 0:
euclid_norm = 1
normalized_vector = vec / euclid_norm
return normalized_vector.tolist()
##Euclidean distance, note that both vectors are length normalized -> Normalized Euclidean Distance
def compute_euclidean_distance( vector1, vector2):
v1 = np.array(vector1)
v2 = np.array(vector2)
return np.sqrt(np.sum((v1 - v2) ** 2))
def dot_product(vector1, vector2):
return np.dot(np.array(vector1), np.array(vector2))
def compute_cosine_similarity_without_norm( vector1, vector2):
return dot_product(vector1, vector2)
def compute_cosine_similarity_with_norm(vector1, vector2):
return dot_product(vector1, vector2) / (
get_euclidean_norm(vector1) * get_euclidean_norm(vector2))
def compute_cosine_distance_unnormalized_inputs(vector1, vector2):
return 1 - compute_cosine_similarity_with_norm(vector1, vector2)
def compute_cosine_distance_normalized_inputs( vector1, vector2):
return 1 - dot_product(vector1, vector2) | [
"numpy.sqrt",
"numpy.square",
"collections.Counter",
"numpy.array",
"numpy.sum"
] | [((1376, 1395), 'numpy.sqrt', 'np.sqrt', (['vector_sum'], {}), '(vector_sum)\n', (1383, 1395), True, 'import numpy as np\n'), ((1483, 1499), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (1491, 1499), True, 'import numpy as np\n'), ((1796, 1813), 'numpy.array', 'np.array', (['vector1'], {}), '(vector1)\n', (1804, 1813), True, 'import numpy as np\n'), ((1823, 1840), 'numpy.array', 'np.array', (['vector2'], {}), '(vector2)\n', (1831, 1840), True, 'import numpy as np\n'), ((1346, 1363), 'numpy.square', 'np.square', (['vector'], {}), '(vector)\n', (1355, 1363), True, 'import numpy as np\n'), ((1861, 1883), 'numpy.sum', 'np.sum', (['((v1 - v2) ** 2)'], {}), '((v1 - v2) ** 2)\n', (1867, 1883), True, 'import numpy as np\n'), ((1940, 1957), 'numpy.array', 'np.array', (['vector1'], {}), '(vector1)\n', (1948, 1957), True, 'import numpy as np\n'), ((1959, 1976), 'numpy.array', 'np.array', (['vector2'], {}), '(vector2)\n', (1967, 1976), True, 'import numpy as np\n'), ((356, 385), 'collections.Counter', 'collections.Counter', (['doc_freq'], {}), '(doc_freq)\n', (375, 385), False, 'import collections\n')] |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import pytest
from modnet.preprocessing import get_cross_nmi
from modnet.preprocessing import nmi_target
def test_nmi_target():
# Test with linear data (should get 1.0 mutual information, or very close due to algorithm used
# in mutual_info_regression)
npoints = 31
x = np.linspace(0.5, 3.5, npoints)
y = 2*x - 2
z = 4*x + 2
df_feat = pd.DataFrame({'x': x, 'y': y})
df_target = pd.DataFrame({'z': z})
# Here we fix the number of neighbors for the call to sklearn.feature_selection's mutual_info_regression to 2 so
# that we get exactly 1 for the mutual information.
df_nmi_target = nmi_target(df_feat=df_feat, df_target=df_target, n_neighbors=2)
assert df_nmi_target.shape == (2, 1)
assert df_nmi_target.loc['x']['z'] == pytest.approx(1.0)
assert df_nmi_target.loc['y']['z'] == pytest.approx(1.0)
# Same data shuffled
# Shuffle the x, y and z
indices = np.arange(npoints)
np.random.seed(42)
np.random.shuffle(indices)
xs = x.take(indices)
ys = y.take(indices)
zs = z.take(indices)
df_feat = pd.DataFrame({'x': xs, 'y': ys})
df_target = pd.DataFrame({'z': zs})
df_nmi_target = nmi_target(df_feat=df_feat, df_target=df_target, n_neighbors=2)
assert df_nmi_target.shape == (2, 1)
assert df_nmi_target.loc['x']['z'] == pytest.approx(1.0)
assert df_nmi_target.loc['y']['z'] == pytest.approx(1.0)
# Test with one constant feature
c = np.ones(npoints) * 1.4
df_feat = pd.DataFrame({'x': x, 'y': y, 'c': c})
df_target = pd.DataFrame({'z': z})
df_nmi_target = nmi_target(df_feat=df_feat, df_target=df_target, n_neighbors=2)
assert df_nmi_target.shape == (2, 1)
assert df_nmi_target.loc['x']['z'] == pytest.approx(1.0)
assert df_nmi_target.loc['y']['z'] == pytest.approx(1.0)
df_nmi_target = nmi_target(df_feat=df_feat, df_target=df_target, drop_constant_features=False, n_neighbors=2)
assert df_nmi_target.shape == (3, 1)
assert df_nmi_target.loc['x']['z'] == pytest.approx(1.0)
assert df_nmi_target.loc['y']['z'] == pytest.approx(1.0)
assert df_nmi_target.loc['c']['z'] == pytest.approx(0.0)
# Test with unrelated data (grid)
x = np.linspace(start=2, stop=5, num=4)
z = np.linspace(start=3, stop=7, num=5)
x, z = np.meshgrid(x, z)
x = x.flatten()
z = z.flatten()
df_feat = pd.DataFrame({'x': x})
df_target = pd.DataFrame({'z': z})
df_nmi_target = nmi_target(df_feat=df_feat, df_target=df_target)
assert df_nmi_target.shape == (1, 1)
assert df_nmi_target.loc['x']['z'] == pytest.approx(0.0)
# Test initial checks
# Incompatible shapes
x = np.linspace(start=2, stop=3, num=5)
z = np.linspace(start=2, stop=3, num=8)
df_feat = pd.DataFrame({'x': x})
df_target = pd.DataFrame({'z': z})
with pytest.raises(ValueError, match=r'The input features DataFrame and the target variable DataFrame '
r'should contain the same number of data points.'):
nmi_target(df_feat=df_feat, df_target=df_target)
# Target DataFrame does not have exactly one column
x = np.linspace(start=2, stop=3, num=5)
z = np.linspace(start=2, stop=3, num=5)
df_feat = pd.DataFrame({'x': x})
df_target = pd.DataFrame({'z2': z, 'z': z})
with pytest.raises(ValueError, match=r'The target DataFrame should have exactly one column.'):
nmi_target(df_feat=df_feat, df_target=df_target)
# Test with some more real data (for which NMI is not just 0.0 or 1.0)
npoints = 200
np.random.seed(42)
x = np.random.rand(npoints)
z = 4 * x + 1.0 * np.random.rand(npoints)
df_feat = pd.DataFrame({'x': x})
df_target = pd.DataFrame({'z': z})
# Here we fix the random_state for the call to sklearn.feature_selection's mutual_info_regression so
# that we always get the same value.
df_nmi_target = nmi_target(df_feat=df_feat, df_target=df_target, random_state=42)
assert df_nmi_target.shape == (1, 1)
assert df_nmi_target.loc['x']['z'] == pytest.approx(0.3417665092162398)
def test_get_cross_nmi():
# Test with linear data (should get 1.0 mutual information, or very close due to algorithm used
# in mutual_info_regression)
npoints = 31
x = np.linspace(0.5, 3.5, npoints)
y = 2*x - 2
z = 4*x + 2
df_feat = pd.DataFrame({'x': x, 'y': y, 'z': z})
# Here we fix the number of neighbors for the call to sklearn.feature_selection's mutual_info_regression to 2 so
# that we get exactly 1 for the mutual information.
df_cross_nmi = get_cross_nmi(df_feat=df_feat, n_neighbors=2)
assert df_cross_nmi.shape == (3, 3)
for idx in df_cross_nmi.index:
for col in df_cross_nmi.columns:
assert df_cross_nmi.loc[idx][col] == pytest.approx(1.0)
# Same data shuffled
# Shuffle the x, y and z
indices = np.arange(npoints)
np.random.seed(42)
np.random.shuffle(indices)
xs = x.take(indices)
ys = y.take(indices)
zs = z.take(indices)
df_feat = pd.DataFrame({'x': xs, 'y': ys, 'z': zs})
df_cross_nmi = get_cross_nmi(df_feat=df_feat, n_neighbors=2)
assert df_cross_nmi.shape == (3, 3)
for idx in df_cross_nmi.index:
for col in df_cross_nmi.columns:
assert df_cross_nmi.loc[idx][col] == pytest.approx(1.0)
# Test with one constant feature
c = np.ones(npoints) * 1.4
df_feat = pd.DataFrame({'x': x, 'y': y, 'z': z, 'c': c})
df_cross_nmi = get_cross_nmi(df_feat=df_feat, n_neighbors=2)
assert df_cross_nmi.shape == (4, 4)
for idx in df_cross_nmi.index:
for col in df_cross_nmi.columns:
expected = 0.0 if idx == 'c' or col == 'c' else 1.0
assert df_cross_nmi.loc[idx][col] == pytest.approx(expected)
# Test with unrelated data (grid)
x = np.linspace(start=2, stop=5, num=4)
y = np.linspace(start=3, stop=7, num=5)
x, y = np.meshgrid(x, y)
x = x.flatten()
y = y.flatten()
df_feat = pd.DataFrame({'x': x, 'y': y})
df_cross_nmi = get_cross_nmi(df_feat=df_feat, n_neighbors=2)
assert df_cross_nmi.shape == (2, 2)
assert df_cross_nmi.loc['x']['y'] == pytest.approx(0.0)
assert df_cross_nmi.loc['y']['x'] == pytest.approx(0.0)
# Test with some more real data (for which NMI is not just 0.0 or 1.0)
npoints = 200
np.random.seed(42)
x = np.random.rand(npoints)
y = 4 * x + 1.0 * np.random.rand(npoints)
df_feat = pd.DataFrame({'x': x, 'y': y})
# Here we fix the random_state for the call to sklearn.feature_selection's mutual_info_regression so
# that we always get the same value.
df_cross_nmi = get_cross_nmi(df_feat=df_feat, random_state=42)
assert df_cross_nmi.shape == (2, 2)
assert df_cross_nmi.loc['x']['x'] == pytest.approx(1.0)
assert df_cross_nmi.loc['y']['y'] == pytest.approx(1.0)
assert df_cross_nmi.loc['x']['y'] == pytest.approx(0.3417665092162398)
assert df_cross_nmi.loc['y']['x'] == pytest.approx(0.3417665092162398)
| [
"pytest.approx",
"numpy.random.rand",
"numpy.ones",
"modnet.preprocessing.nmi_target",
"numpy.linspace",
"pytest.raises",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.meshgrid",
"modnet.preprocessing.get_cross_nmi",
"numpy.arange",
"numpy.random.shuffle"
] | [((352, 382), 'numpy.linspace', 'np.linspace', (['(0.5)', '(3.5)', 'npoints'], {}), '(0.5, 3.5, npoints)\n', (363, 382), True, 'import numpy as np\n'), ((430, 460), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y}"], {}), "({'x': x, 'y': y})\n", (442, 460), True, 'import pandas as pd\n'), ((477, 499), 'pandas.DataFrame', 'pd.DataFrame', (["{'z': z}"], {}), "({'z': z})\n", (489, 499), True, 'import pandas as pd\n'), ((694, 757), 'modnet.preprocessing.nmi_target', 'nmi_target', ([], {'df_feat': 'df_feat', 'df_target': 'df_target', 'n_neighbors': '(2)'}), '(df_feat=df_feat, df_target=df_target, n_neighbors=2)\n', (704, 757), False, 'from modnet.preprocessing import nmi_target\n'), ((991, 1009), 'numpy.arange', 'np.arange', (['npoints'], {}), '(npoints)\n', (1000, 1009), True, 'import numpy as np\n'), ((1014, 1032), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1028, 1032), True, 'import numpy as np\n'), ((1037, 1063), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (1054, 1063), True, 'import numpy as np\n'), ((1154, 1186), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': xs, 'y': ys}"], {}), "({'x': xs, 'y': ys})\n", (1166, 1186), True, 'import pandas as pd\n'), ((1203, 1226), 'pandas.DataFrame', 'pd.DataFrame', (["{'z': zs}"], {}), "({'z': zs})\n", (1215, 1226), True, 'import pandas as pd\n'), ((1248, 1311), 'modnet.preprocessing.nmi_target', 'nmi_target', ([], {'df_feat': 'df_feat', 'df_target': 'df_target', 'n_neighbors': '(2)'}), '(df_feat=df_feat, df_target=df_target, n_neighbors=2)\n', (1258, 1311), False, 'from modnet.preprocessing import nmi_target\n'), ((1559, 1597), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y, 'c': c}"], {}), "({'x': x, 'y': y, 'c': c})\n", (1571, 1597), True, 'import pandas as pd\n'), ((1614, 1636), 'pandas.DataFrame', 'pd.DataFrame', (["{'z': z}"], {}), "({'z': z})\n", (1626, 1636), True, 'import pandas as pd\n'), ((1658, 1721), 'modnet.preprocessing.nmi_target', 'nmi_target', ([], {'df_feat': 'df_feat', 'df_target': 'df_target', 'n_neighbors': '(2)'}), '(df_feat=df_feat, df_target=df_target, n_neighbors=2)\n', (1668, 1721), False, 'from modnet.preprocessing import nmi_target\n'), ((1906, 2004), 'modnet.preprocessing.nmi_target', 'nmi_target', ([], {'df_feat': 'df_feat', 'df_target': 'df_target', 'drop_constant_features': '(False)', 'n_neighbors': '(2)'}), '(df_feat=df_feat, df_target=df_target, drop_constant_features=\n False, n_neighbors=2)\n', (1916, 2004), False, 'from modnet.preprocessing import nmi_target\n'), ((2271, 2306), 'numpy.linspace', 'np.linspace', ([], {'start': '(2)', 'stop': '(5)', 'num': '(4)'}), '(start=2, stop=5, num=4)\n', (2282, 2306), True, 'import numpy as np\n'), ((2315, 2350), 'numpy.linspace', 'np.linspace', ([], {'start': '(3)', 'stop': '(7)', 'num': '(5)'}), '(start=3, stop=7, num=5)\n', (2326, 2350), True, 'import numpy as np\n'), ((2362, 2379), 'numpy.meshgrid', 'np.meshgrid', (['x', 'z'], {}), '(x, z)\n', (2373, 2379), True, 'import numpy as np\n'), ((2434, 2456), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x}"], {}), "({'x': x})\n", (2446, 2456), True, 'import pandas as pd\n'), ((2473, 2495), 'pandas.DataFrame', 'pd.DataFrame', (["{'z': z}"], {}), "({'z': z})\n", (2485, 2495), True, 'import pandas as pd\n'), ((2517, 2565), 'modnet.preprocessing.nmi_target', 'nmi_target', ([], {'df_feat': 'df_feat', 'df_target': 'df_target'}), '(df_feat=df_feat, df_target=df_target)\n', (2527, 2565), False, 'from modnet.preprocessing import nmi_target\n'), ((2729, 2764), 'numpy.linspace', 'np.linspace', ([], {'start': '(2)', 'stop': '(3)', 'num': '(5)'}), '(start=2, stop=3, num=5)\n', (2740, 2764), True, 'import numpy as np\n'), ((2773, 2808), 'numpy.linspace', 'np.linspace', ([], {'start': '(2)', 'stop': '(3)', 'num': '(8)'}), '(start=2, stop=3, num=8)\n', (2784, 2808), True, 'import numpy as np\n'), ((2823, 2845), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x}"], {}), "({'x': x})\n", (2835, 2845), True, 'import pandas as pd\n'), ((2862, 2884), 'pandas.DataFrame', 'pd.DataFrame', (["{'z': z}"], {}), "({'z': z})\n", (2874, 2884), True, 'import pandas as pd\n'), ((3207, 3242), 'numpy.linspace', 'np.linspace', ([], {'start': '(2)', 'stop': '(3)', 'num': '(5)'}), '(start=2, stop=3, num=5)\n', (3218, 3242), True, 'import numpy as np\n'), ((3251, 3286), 'numpy.linspace', 'np.linspace', ([], {'start': '(2)', 'stop': '(3)', 'num': '(5)'}), '(start=2, stop=3, num=5)\n', (3262, 3286), True, 'import numpy as np\n'), ((3301, 3323), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x}"], {}), "({'x': x})\n", (3313, 3323), True, 'import pandas as pd\n'), ((3340, 3371), 'pandas.DataFrame', 'pd.DataFrame', (["{'z2': z, 'z': z}"], {}), "({'z2': z, 'z': z})\n", (3352, 3371), True, 'import pandas as pd\n'), ((3626, 3644), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3640, 3644), True, 'import numpy as np\n'), ((3653, 3676), 'numpy.random.rand', 'np.random.rand', (['npoints'], {}), '(npoints)\n', (3667, 3676), True, 'import numpy as np\n'), ((3738, 3760), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x}"], {}), "({'x': x})\n", (3750, 3760), True, 'import pandas as pd\n'), ((3777, 3799), 'pandas.DataFrame', 'pd.DataFrame', (["{'z': z}"], {}), "({'z': z})\n", (3789, 3799), True, 'import pandas as pd\n'), ((3967, 4032), 'modnet.preprocessing.nmi_target', 'nmi_target', ([], {'df_feat': 'df_feat', 'df_target': 'df_target', 'random_state': '(42)'}), '(df_feat=df_feat, df_target=df_target, random_state=42)\n', (3977, 4032), False, 'from modnet.preprocessing import nmi_target\n'), ((4337, 4367), 'numpy.linspace', 'np.linspace', (['(0.5)', '(3.5)', 'npoints'], {}), '(0.5, 3.5, npoints)\n', (4348, 4367), True, 'import numpy as np\n'), ((4415, 4453), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y, 'z': z}"], {}), "({'x': x, 'y': y, 'z': z})\n", (4427, 4453), True, 'import pandas as pd\n'), ((4647, 4692), 'modnet.preprocessing.get_cross_nmi', 'get_cross_nmi', ([], {'df_feat': 'df_feat', 'n_neighbors': '(2)'}), '(df_feat=df_feat, n_neighbors=2)\n', (4660, 4692), False, 'from modnet.preprocessing import get_cross_nmi\n'), ((4947, 4965), 'numpy.arange', 'np.arange', (['npoints'], {}), '(npoints)\n', (4956, 4965), True, 'import numpy as np\n'), ((4970, 4988), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (4984, 4988), True, 'import numpy as np\n'), ((4993, 5019), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (5010, 5019), True, 'import numpy as np\n'), ((5110, 5151), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': xs, 'y': ys, 'z': zs}"], {}), "({'x': xs, 'y': ys, 'z': zs})\n", (5122, 5151), True, 'import pandas as pd\n'), ((5172, 5217), 'modnet.preprocessing.get_cross_nmi', 'get_cross_nmi', ([], {'df_feat': 'df_feat', 'n_neighbors': '(2)'}), '(df_feat=df_feat, n_neighbors=2)\n', (5185, 5217), False, 'from modnet.preprocessing import get_cross_nmi\n'), ((5486, 5532), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y, 'z': z, 'c': c}"], {}), "({'x': x, 'y': y, 'z': z, 'c': c})\n", (5498, 5532), True, 'import pandas as pd\n'), ((5553, 5598), 'modnet.preprocessing.get_cross_nmi', 'get_cross_nmi', ([], {'df_feat': 'df_feat', 'n_neighbors': '(2)'}), '(df_feat=df_feat, n_neighbors=2)\n', (5566, 5598), False, 'from modnet.preprocessing import get_cross_nmi\n'), ((5899, 5934), 'numpy.linspace', 'np.linspace', ([], {'start': '(2)', 'stop': '(5)', 'num': '(4)'}), '(start=2, stop=5, num=4)\n', (5910, 5934), True, 'import numpy as np\n'), ((5943, 5978), 'numpy.linspace', 'np.linspace', ([], {'start': '(3)', 'stop': '(7)', 'num': '(5)'}), '(start=3, stop=7, num=5)\n', (5954, 5978), True, 'import numpy as np\n'), ((5990, 6007), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (6001, 6007), True, 'import numpy as np\n'), ((6062, 6092), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y}"], {}), "({'x': x, 'y': y})\n", (6074, 6092), True, 'import pandas as pd\n'), ((6113, 6158), 'modnet.preprocessing.get_cross_nmi', 'get_cross_nmi', ([], {'df_feat': 'df_feat', 'n_neighbors': '(2)'}), '(df_feat=df_feat, n_neighbors=2)\n', (6126, 6158), False, 'from modnet.preprocessing import get_cross_nmi\n'), ((6417, 6435), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (6431, 6435), True, 'import numpy as np\n'), ((6444, 6467), 'numpy.random.rand', 'np.random.rand', (['npoints'], {}), '(npoints)\n', (6458, 6467), True, 'import numpy as np\n'), ((6529, 6559), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y}"], {}), "({'x': x, 'y': y})\n", (6541, 6559), True, 'import pandas as pd\n'), ((6726, 6773), 'modnet.preprocessing.get_cross_nmi', 'get_cross_nmi', ([], {'df_feat': 'df_feat', 'random_state': '(42)'}), '(df_feat=df_feat, random_state=42)\n', (6739, 6773), False, 'from modnet.preprocessing import get_cross_nmi\n'), ((842, 860), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (855, 860), False, 'import pytest\n'), ((903, 921), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (916, 921), False, 'import pytest\n'), ((1396, 1414), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (1409, 1414), False, 'import pytest\n'), ((1457, 1475), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (1470, 1475), False, 'import pytest\n'), ((1522, 1538), 'numpy.ones', 'np.ones', (['npoints'], {}), '(npoints)\n', (1529, 1538), True, 'import numpy as np\n'), ((1805, 1823), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (1818, 1823), False, 'import pytest\n'), ((1866, 1884), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (1879, 1884), False, 'import pytest\n'), ((2083, 2101), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (2096, 2101), False, 'import pytest\n'), ((2144, 2162), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (2157, 2162), False, 'import pytest\n'), ((2205, 2223), 'pytest.approx', 'pytest.approx', (['(0.0)'], {}), '(0.0)\n', (2218, 2223), False, 'import pytest\n'), ((2649, 2667), 'pytest.approx', 'pytest.approx', (['(0.0)'], {}), '(0.0)\n', (2662, 2667), False, 'import pytest\n'), ((2894, 3048), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""The input features DataFrame and the target variable DataFrame should contain the same number of data points."""'}), "(ValueError, match=\n 'The input features DataFrame and the target variable DataFrame should contain the same number of data points.'\n )\n", (2907, 3048), False, 'import pytest\n'), ((3094, 3142), 'modnet.preprocessing.nmi_target', 'nmi_target', ([], {'df_feat': 'df_feat', 'df_target': 'df_target'}), '(df_feat=df_feat, df_target=df_target)\n', (3104, 3142), False, 'from modnet.preprocessing import nmi_target\n'), ((3381, 3473), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""The target DataFrame should have exactly one column."""'}), "(ValueError, match=\n 'The target DataFrame should have exactly one column.')\n", (3394, 3473), False, 'import pytest\n'), ((3479, 3527), 'modnet.preprocessing.nmi_target', 'nmi_target', ([], {'df_feat': 'df_feat', 'df_target': 'df_target'}), '(df_feat=df_feat, df_target=df_target)\n', (3489, 3527), False, 'from modnet.preprocessing import nmi_target\n'), ((4116, 4149), 'pytest.approx', 'pytest.approx', (['(0.3417665092162398)'], {}), '(0.3417665092162398)\n', (4129, 4149), False, 'import pytest\n'), ((5449, 5465), 'numpy.ones', 'np.ones', (['npoints'], {}), '(npoints)\n', (5456, 5465), True, 'import numpy as np\n'), ((6240, 6258), 'pytest.approx', 'pytest.approx', (['(0.0)'], {}), '(0.0)\n', (6253, 6258), False, 'import pytest\n'), ((6300, 6318), 'pytest.approx', 'pytest.approx', (['(0.0)'], {}), '(0.0)\n', (6313, 6318), False, 'import pytest\n'), ((6855, 6873), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (6868, 6873), False, 'import pytest\n'), ((6915, 6933), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (6928, 6933), False, 'import pytest\n'), ((6975, 7008), 'pytest.approx', 'pytest.approx', (['(0.3417665092162398)'], {}), '(0.3417665092162398)\n', (6988, 7008), False, 'import pytest\n'), ((7050, 7083), 'pytest.approx', 'pytest.approx', (['(0.3417665092162398)'], {}), '(0.3417665092162398)\n', (7063, 7083), False, 'import pytest\n'), ((3699, 3722), 'numpy.random.rand', 'np.random.rand', (['npoints'], {}), '(npoints)\n', (3713, 3722), True, 'import numpy as np\n'), ((6490, 6513), 'numpy.random.rand', 'np.random.rand', (['npoints'], {}), '(npoints)\n', (6504, 6513), True, 'import numpy as np\n'), ((4859, 4877), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (4872, 4877), False, 'import pytest\n'), ((5384, 5402), 'pytest.approx', 'pytest.approx', (['(1.0)'], {}), '(1.0)\n', (5397, 5402), False, 'import pytest\n'), ((5828, 5851), 'pytest.approx', 'pytest.approx', (['expected'], {}), '(expected)\n', (5841, 5851), False, 'import pytest\n')] |
import numpy as np
from sklearn.metrics import roc_auc_score
from numba import jit
def array2str(tmp_array, sep = " "):
str_list = ["{:.3f}".format(tmp_item) for tmp_item in tmp_array]
return sep.join(str_list)
def generate_sorted_groups(pred, y, a):
a_idx = np.where(a == 0)
b_idx = np.where(a == 1)
b_score = pred[b_idx].reshape(-1)
b_index = np.argsort(-b_score)
b_score_sort = b_score[b_index]
b_label = y[b_idx]
b_label_sort = b_label[b_index]
a_score = pred[a_idx].reshape(-1)
a_index = np.argsort(-a_score)
a_score_sort = a_score[a_index]
a_label = y[a_idx]
a_label_sort = a_label[a_index]
return a_score_sort,b_score_sort,a_label_sort,b_label_sort
def cal_fairness_metric_by_groups(a_score, b_score, a_label, b_label, metric = "xauc"):
if metric == "xauc":
metric_ab, metric_ba, _ = xAUC_fast(a_score, b_score, a_label, b_label)
else:
metric_ab, metric_ba = pairwise_fast(a_score, b_score, a_label, b_label)
return abs(metric_ab - metric_ba),metric_ab,metric_ba
def cal_fairness_metric(pred, y, a, metric = "xauc"):
a_idx, b_idx = np.where(a == 0), np.where(a == 1)
a_score, b_score = pred[a_idx].reshape(-1), pred[b_idx].reshape(-1)
a_label, b_label = y[a_idx].reshape(-1), y[b_idx].reshape(-1)
if metric == "xauc":
metric_ab, metric_ba, _ = xAUC_fast(a_score, b_score, a_label, b_label)
else:
metric_ab, metric_ba = pairwise_fast(a_score, b_score, a_label, b_label)
return abs(metric_ab - metric_ba), metric_ab, metric_ba
def AUC(score, label):
###[from big to small]
sum_ = 0
num = len(label)
for i in range(num):
for j in range(num):
if label[i]==1 and label[j]==0:
if score[i]>score[j]:
sum_ += 1
return sum_/(np.sum(label)*(num-np.sum(label))), sum_
def xAUC(a_score, b_score, a_label, b_label):
sum_ab = 0
sum_ba = 0
numa = len(a_label)
numb = len(b_label)
a_num1 = np.sum(a_label)
a_num0 = len(a_label) - a_num1
b_num1 = np.sum(b_label)
b_num0 = len(b_label) - b_num1
for i in range(numa):
for j in range(numb):
if a_label[i] ==1 and b_label[j] ==0:
if a_score[i]>b_score[j]:
sum_ab+=1
elif a_label[i]==0 and b_label[j]==1:
if b_score[j]>a_score[i]:
sum_ba+=1
return sum_ab/(a_num1*b_num0), sum_ba/(b_num1*a_num0), sum_ab+sum_ba
def xAUC_fast(a_score, b_score, a_label, b_label):
a_num1 = np.sum(a_label)
a_num0 = len(a_label) - a_num1
b_num1 = np.sum(b_label)
b_num0 = len(b_label) - b_num1
a_score1,a_score0 = a_score[a_label == 1],a_score[a_label == 0]
b_score1,b_score0 = b_score[b_label == 1],b_score[b_label == 0]
ab_label = np.concatenate((np.ones(int(a_num1)),np.zeros(int(b_num0))))
ab_score = np.concatenate((a_score1,b_score0))
xauc_ab = roc_auc_score(ab_label,ab_score)
ba_label = np.concatenate((np.ones(int(b_num1)),np.zeros(int(a_num0))))
ba_score = np.concatenate((b_score1,a_score0))
xauc_ba = roc_auc_score(ba_label,ba_score)
return xauc_ab, xauc_ba, xauc_ab * a_num1 * b_num0 + xauc_ba * b_num1 * a_num0
def post_score(train_score, train_score_post, test_score):
tep_id = 0
bins = [[] for i in range(len(train_score)+1)]
for i in range(len(test_score)):
s = test_score[i]
if s>train_score[0]:
bins[0].append(s)
elif s<=train_score[-1]:
bins[-1].append(s)
else:
for j in range(tep_id,len(train_score)):
if train_score[j-1]>=s and train_score[j]<s:
bins[j].append(s)
tep_id = j
break
changed_b_score = []
for bin_ in range(len(bins)):
for item in range(len(bins[bin_])):
num = (len(bins[bin_]))
if bin_==0:
changed_b_score.append((item)*train_score_post[bin_]/num+(num-item)/num)
elif bin_==len(train_score_post):
changed_b_score.append((num -item)*train_score_post[bin_-1]/num)
else:
changed_b_score.append((item)*train_score_post[bin_]/num + (num-item)*train_score_post[bin_-1]/num)
return np.array(changed_b_score)
@jit(nopython=True)
def maxAUC(a_label, b_label):
M = len(a_label)-1
N = len(b_label)-1
a_1 = np.sum(a_label)
b_1 = np.sum(b_label)
path = np.zeros((M+1, N+1,2,2))
cost = np.zeros((M+1, N+1))
for i in range(1,M+1):
if a_label[i]==1:
cost[i,0] = N-b_1 + cost[i-1, 0]
else:
cost[i,0] = cost[i-1,0]
path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])
for i in range(1,N+1):
if b_label[i]==1:
cost[0, i] = cost[0,i-1]+ M - a_1
else:
cost[0, i] = cost[0,i-1]
path[0,i,:,:] = np.array([[0, i-1],[0, i]])
for i in range(2, M+1+N+1):
for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]
if i-j+1>N or a_label[j]==0:
tep_b = 0
else:
tep_b = N - (i-j) - np.sum(b_label[i-j+1:])
if j+1>M or b_label[i-j]==0:
tep_a = 0
else:
tep_a = M - j -np.sum(a_label[j+1:])
if cost[j-1, i-j] + tep_b > cost[j, i-j-1] + tep_a:
cost[j, i-j] = cost[j-1, i-j] + tep_b
path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])
else:
cost[j, i-j] = cost[j, i-j-1] + tep_a
path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])
return cost[M,N], path
@jit(nopython=True)
def xAUC_post(a_label, b_label, lamb):
M = len(a_label)-1
N = len(b_label)-1
a_1 = np.sum(a_label)
b_1 = np.sum(b_label)
a_1_b_0 = a_1*(N-b_1)
b_1_a_0 = b_1*(M - a_1)
path = np.zeros((M+1, N+1,2,2))
cost_unfair = np.zeros((M+1, N+1))
cost = np.zeros((M+1, N+1))
for i in range(1,M+1):
if a_label[i]==1:
cost_unfair[i, 0] = (N-b_1)/a_1_b_0*lamb + cost_unfair[i-1,0]
cost[i,0] = N-b_1 + cost[i-1, 0]
else:
cost_unfair[i, 0] = cost_unfair[i-1,0]
cost[i,0] = cost[i-1,0]
path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])
for i in range(1,N+1):
if b_label[i]==1:
cost_unfair[0,i] = -(M-a_1)/b_1_a_0*lamb + cost_unfair[0, i-1]
cost[0, i] = cost[0,i-1] + M - a_1
else:
cost[0, i] = cost[0,i-1]
cost_unfair[0, i] = cost_unfair[0,i-1]
path[0,i,:,:] = np.array([[0, i-1],[0, i]])
for i in range(2, M+1+N+1):
for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]
if i-j+1>N or a_label[j]==0:
tep_b = 0
tep_unfair_b = 0
else:
tep_b = N - (i-j) - np.sum(b_label[i-j+1:])
tep_unfair_b = tep_b/a_1_b_0*lamb
if j+1>M or b_label[i-j]==0:
tep_a = 0
tep_unfair_a = 0
else:
tep_a = M - j -np.sum(a_label[j+1:])
tep_unfair_a = -tep_a/b_1_a_0*lamb
if cost[j-1, i-j] + tep_b - abs(tep_unfair_b + cost_unfair[j-1, i-j]) > cost[j, i-j-1] + tep_a - abs(tep_unfair_a + cost_unfair[j, i-j-1]):
cost_unfair[j, i-j] = tep_unfair_b + cost_unfair[j-1, i-j]
cost[j, i-j] = cost[j-1, i-j] + tep_b
path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])
else:
cost_unfair[j, i-j] = tep_unfair_a + cost_unfair[j, i-j-1]
cost[j, i-j] = cost[j, i-j-1] + tep_a
path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])
return cost, path, cost_unfair
@jit(nopython=True)
def xAUC_post_(a_label, b_label, lamb):
M = len(a_label)-1
N = len(b_label)-1
a_1 = np.sum(a_label)
b_1 = np.sum(b_label)
a_1_b_0 = a_1*(N-b_1)
b_1_a_0 = b_1*(M - a_1)
path = np.zeros((M+1, N+1,2,2))
cost_unfair = np.zeros((M+1, N+1))
cost = np.zeros((M+1, N+1))
for i in range(1,M+1):
if a_label[i]==1:
cost_unfair[i, 0] = (N-b_1)/a_1_b_0 * lamb + cost_unfair[i-1,0]
cost[i,0] = N-b_1 + cost[i-1, 0]
else:
cost_unfair[i, 0] = cost_unfair[i-1,0]
cost[i,0] = cost[i-1,0]
path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])
for i in range(1,N+1):
if b_label[i]==1:
cost_unfair[0,i] = -(M - a_1) / b_1_a_0 * lamb + cost_unfair[0, i-1]
cost[0, i] = cost[0,i-1] + M - a_1
else:
cost[0, i] = cost[0,i-1]
cost_unfair[0, i] = cost_unfair[0,i-1]
path[0,i,:,:] = np.array([[0, i-1],[0, i]])
for i in range(2, M+1+N+1):
# print(i)
for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]
if a_label[j]==0:
tep_b = 0
tep_unfair_b = 0
else:
tep_b = N - (i-j) - np.sum(b_label[i-j+1:])
tep_unfair_b = tep_b/a_1_b_0*lamb
if b_label[i-j]==0:
tep_a = 0
tep_unfair_a = 0
else:
tep_a = M - j -np.sum(a_label[j+1:])
tep_unfair_a = -tep_a/b_1_a_0*lamb
if cost[j-1, i-j] + tep_b - abs(tep_unfair_b + cost_unfair[j-1, i-j]) > cost[j, i-j-1] + tep_a - abs(tep_unfair_a + cost_unfair[j, i-j-1]):
cost_unfair[j, i-j] = tep_unfair_b + cost_unfair[j-1, i-j]
cost[j, i-j] = cost[j-1, i-j] + tep_b
path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])
else:
cost_unfair[j, i-j] = tep_unfair_a + cost_unfair[j, i-j-1]
cost[j, i-j] = cost[j, i-j-1] + tep_a
path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])
return cost, path, cost_unfair
@jit(nopython=True)
def pairwise_post(a_label, b_label, lamb):
###a, b has been sorted decreasing sort.
M = len(a_label)-1
N = len(b_label)-1
a_1 = np.sum(a_label)
b_1 = np.sum(b_label)
a_1_0 = a_1*((N-b_1)+(M - a_1))
b_1_0 = b_1*((M - a_1)+(N-b_1))
path = np.zeros((M+1, N+1,2,2))
cost_unfair = np.zeros((M+1, N+1))
cost = np.zeros((M+1, N+1))
zeros_mat = np.zeros((M+1, N+1))
zeros_mat[0,0] = ((N-b_1)+(M - a_1))
for i in range(1,N+1):
if b_label[i]==1:
zeros_mat[0,i] = zeros_mat[0,i-1]
else:
zeros_mat[0,i] = zeros_mat[0,i-1]-1
for i in range(1,M+1):
if a_label[i]==0:
zeros_mat[i,0] = zeros_mat[i-1,0]-1
else:
zeros_mat[i,0] = zeros_mat[i-1,0]
for j in range(1,N+1):
if b_label[j]==0:
zeros_mat[i,j] = zeros_mat[i,j-1]-1
else:
zeros_mat[i,j] = zeros_mat[i,j-1]
for i in range(1,M+1):
if a_label[i]==1:
cost_unfair[i, 0] = zeros_mat[i,0]/a_1_0*lamb + cost_unfair[i-1,0]
cost[i,0] = N-b_1 + cost[i-1, 0]
else:
cost_unfair[i, 0] = cost_unfair[i-1,0]
cost[i,0] = cost[i-1,0]
path[i,0,:,:] = np.array([[i-1, 0], [ i, 0]])
for i in range(1,N+1):
if b_label[i]==1:
cost_unfair[0,i] = -zeros_mat[0,i]/b_1_0*lamb + cost_unfair[0, i-1]
cost[0, i] = cost[0,i-1] + M - a_1
else:
cost[0, i] = cost[0,i-1]
cost_unfair[0, i] = cost_unfair[0, i-1]
path[0,i,:,:] = np.array([[0, i-1],[0, i]])
for i in range(2, M+1+N+1):
for j in range(max(1, i-N), min(i, M+1)): # j[1, i-1]
if a_label[j]==0:
tep_b = 0
tep_unfair_b = 0
else:
tep_b = N - (i-j) - np.sum(b_label[i-j+1:])
tep_unfair_b = zeros_mat[j,i-j]/a_1_0*lamb
if b_label[i-j]==0:
tep_a = 0
tep_unfair_a = 0
else:
tep_a = M - j -np.sum(a_label[j+1:])
tep_unfair_a = -zeros_mat[j,i-j]/b_1_0*lamb
if cost[j-1, i-j] + tep_b - abs(tep_unfair_b + cost_unfair[j-1, i-j]) > cost[j, i-j-1] + tep_a - abs(tep_unfair_a + cost_unfair[j, i-j-1]):
cost_unfair[j, i-j] = tep_unfair_b + cost_unfair[j-1, i-j]
cost[j, i-j] = cost[j-1, i-j] + tep_b
path[j, i-j,:,:] = np.array([[j-1, i-j], [j, i-j]])
else:
cost_unfair[j, i-j] = tep_unfair_a + cost_unfair[j, i-j-1]
cost[j, i-j] = cost[j, i-j-1] + tep_a
path[j, i-j,:,:] = np.array([[j, i-j-1], [j, i-j]])
return cost, path, cost_unfair
def post_b_score(a_score, b_score, a_label, b_label, lamb = 0, _type="xauc"): ## score has to be decreasing.
M = len(a_score)
N = len(b_score)
if _type == "xauc":
cost, path_ , cost_unfair = xAUC_post(a_label, b_label, lamb = lamb)
elif _type=="AUC":
cost, path_ = maxAUC(a_label, b_label)
elif _type=="prf":
cost, path_ , cost_unfair = pairwise_post(a_label, b_label, lamb = lamb)
else:
print("Unknown type")
exit()
@jit(nopython=True)
def pathTrace(path):
trace = []
tep = path[M,N,:,:]
trace.append(tep[-1,:])
trace.append(tep[0,:])
for i in range(M+N-1):
tep = path[int(tep[0][0]), int(tep[0][1]), :,:]
trace.append(tep[0,:])
trace.reverse()
return trace
path = pathTrace(path_)
gap_a = [[] for i in range(M+1)]
for i in range(1,len(path)):
if int(path[i][0])==int(path[i-1][0]):
gap_a[int(path[i][0])].append(int(path[i][1]))
changed_b_score = []
for bin_ in range(len(gap_a)):
for item in range(len(gap_a[bin_])):
num = (len(gap_a[bin_])+1)
if bin_==0:
changed_b_score.append((item+1)*a_score[bin_]/num+(num-item-1)/num)
elif bin_==len(a_score):
changed_b_score.append((num -item-1)*a_score[bin_-1]/num)
else:
changed_b_score.append((item+1)*a_score[bin_]/num + (num-item-1)*a_score[bin_-1]/num)
if _type=="AUC":
return np.array(changed_b_score), 0
else:
return np.array(changed_b_score), cost_unfair[-1, -1]
def pairwise(a_score, b_score, a_label, b_label):
sum_ab = 0
sum_ba = 0
numa = len(a_label)
numb = len(b_label)
a_num1 = np.sum(a_label)
a_num0 = len(a_label) - a_num1
b_num1 = np.sum(b_label)
b_num0 = len(b_label) - b_num1
i_AUCa = roc_auc_score(a_label, a_score)
i_AUCb = roc_auc_score(b_label, b_score)
for i in range(numa):
for j in range(numb):
if a_label[i] ==1 and b_label[j] ==0:
if a_score[i]>b_score[j]:
sum_ab+=1
elif a_label[i]==0 and b_label[j]==1:
if b_score[j]>a_score[i]:
sum_ba+=1
return (sum_ab+i_AUCa*a_num0*a_num1)/(a_num1*(b_num0+a_num0)), (sum_ba+i_AUCb*b_num0*b_num1)/(b_num1*(a_num0+b_num0))
def pairwise_fast(a_score, b_score, a_label, b_label):
a_num1 = np.sum(a_label)
a_num0 = len(a_label) - a_num1
b_num1 = np.sum(b_label)
b_num0 = len(b_label) - b_num1
a_score1,a_score0 = a_score[a_label == 1],a_score[a_label == 0]
b_score1,b_score0 = b_score[b_label == 1],b_score[b_label == 0]
ab_label = np.concatenate((np.ones(int(a_num1)),np.zeros(int(b_num0+a_num0))))
ab_score = np.concatenate((a_score1,a_score0,b_score0))
pair_ab = roc_auc_score(ab_label,ab_score) #[a=1, 0]
ba_label = np.concatenate((np.ones(int(b_num1)),np.zeros(int(a_num0+b_num0))))
ba_score = np.concatenate((b_score1,b_score0, a_score0))
pair_ba = roc_auc_score(ba_label,ba_score) #[b=1, 0]
return pair_ab, pair_ba
def zeros_mat(a, b):
a_label = [0] + a
b_label = [0] + b
M = len(a_label)-1
N = len(b_label)-1
a_1 = np.sum(a)
b_1 = np.sum(b)
zeros_mat = np.zeros((M+1, N+1))
zeros_mat[0,0] = ((N-b_1)+(M - a_1))
for i in range(1,N+1):
if b_label[i]==1:
zeros_mat[0,i] = zeros_mat[0,i-1]
else:
zeros_mat[0,i] = zeros_mat[0,i-1]-1
for i in range(1,M+1):
if a_label[i]==0:
zeros_mat[i,0] = zeros_mat[i-1,0]-1
else:
zeros_mat[i,0] = zeros_mat[i-1,0]
for j in range(1,N+1):
if b_label[j]==0:
zeros_mat[i,j] = zeros_mat[i,j-1]-1
else:
zeros_mat[i,j] = zeros_mat[i,j-1]
return zeros_mat
| [
"numpy.where",
"sklearn.metrics.roc_auc_score",
"numpy.argsort",
"numpy.sum",
"numpy.array",
"numba.jit",
"numpy.zeros",
"numpy.concatenate"
] | [((4364, 4382), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (4367, 4382), False, 'from numba import jit\n'), ((5726, 5744), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (5729, 5744), False, 'from numba import jit\n'), ((7858, 7876), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (7861, 7876), False, 'from numba import jit\n'), ((10000, 10018), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (10003, 10018), False, 'from numba import jit\n'), ((275, 291), 'numpy.where', 'np.where', (['(a == 0)'], {}), '(a == 0)\n', (283, 291), True, 'import numpy as np\n'), ((304, 320), 'numpy.where', 'np.where', (['(a == 1)'], {}), '(a == 1)\n', (312, 320), True, 'import numpy as np\n'), ((373, 393), 'numpy.argsort', 'np.argsort', (['(-b_score)'], {}), '(-b_score)\n', (383, 393), True, 'import numpy as np\n'), ((542, 562), 'numpy.argsort', 'np.argsort', (['(-a_score)'], {}), '(-a_score)\n', (552, 562), True, 'import numpy as np\n'), ((2024, 2039), 'numpy.sum', 'np.sum', (['a_label'], {}), '(a_label)\n', (2030, 2039), True, 'import numpy as np\n'), ((2088, 2103), 'numpy.sum', 'np.sum', (['b_label'], {}), '(b_label)\n', (2094, 2103), True, 'import numpy as np\n'), ((2579, 2594), 'numpy.sum', 'np.sum', (['a_label'], {}), '(a_label)\n', (2585, 2594), True, 'import numpy as np\n'), ((2643, 2658), 'numpy.sum', 'np.sum', (['b_label'], {}), '(b_label)\n', (2649, 2658), True, 'import numpy as np\n'), ((2923, 2959), 'numpy.concatenate', 'np.concatenate', (['(a_score1, b_score0)'], {}), '((a_score1, b_score0))\n', (2937, 2959), True, 'import numpy as np\n'), ((2973, 3006), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['ab_label', 'ab_score'], {}), '(ab_label, ab_score)\n', (2986, 3006), False, 'from sklearn.metrics import roc_auc_score\n'), ((3098, 3134), 'numpy.concatenate', 'np.concatenate', (['(b_score1, a_score0)'], {}), '((b_score1, a_score0))\n', (3112, 3134), True, 'import numpy as np\n'), ((3148, 3181), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['ba_label', 'ba_score'], {}), '(ba_label, ba_score)\n', (3161, 3181), False, 'from sklearn.metrics import roc_auc_score\n'), ((4335, 4360), 'numpy.array', 'np.array', (['changed_b_score'], {}), '(changed_b_score)\n', (4343, 4360), True, 'import numpy as np\n'), ((4470, 4485), 'numpy.sum', 'np.sum', (['a_label'], {}), '(a_label)\n', (4476, 4485), True, 'import numpy as np\n'), ((4496, 4511), 'numpy.sum', 'np.sum', (['b_label'], {}), '(b_label)\n', (4502, 4511), True, 'import numpy as np\n'), ((4523, 4553), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1, 2, 2)'], {}), '((M + 1, N + 1, 2, 2))\n', (4531, 4553), True, 'import numpy as np\n'), ((4560, 4584), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1)'], {}), '((M + 1, N + 1))\n', (4568, 4584), True, 'import numpy as np\n'), ((5840, 5855), 'numpy.sum', 'np.sum', (['a_label'], {}), '(a_label)\n', (5846, 5855), True, 'import numpy as np\n'), ((5866, 5881), 'numpy.sum', 'np.sum', (['b_label'], {}), '(b_label)\n', (5872, 5881), True, 'import numpy as np\n'), ((5949, 5979), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1, 2, 2)'], {}), '((M + 1, N + 1, 2, 2))\n', (5957, 5979), True, 'import numpy as np\n'), ((5992, 6016), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1)'], {}), '((M + 1, N + 1))\n', (6000, 6016), True, 'import numpy as np\n'), ((6024, 6048), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1)'], {}), '((M + 1, N + 1))\n', (6032, 6048), True, 'import numpy as np\n'), ((7973, 7988), 'numpy.sum', 'np.sum', (['a_label'], {}), '(a_label)\n', (7979, 7988), True, 'import numpy as np\n'), ((7999, 8014), 'numpy.sum', 'np.sum', (['b_label'], {}), '(b_label)\n', (8005, 8014), True, 'import numpy as np\n'), ((8082, 8112), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1, 2, 2)'], {}), '((M + 1, N + 1, 2, 2))\n', (8090, 8112), True, 'import numpy as np\n'), ((8125, 8149), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1)'], {}), '((M + 1, N + 1))\n', (8133, 8149), True, 'import numpy as np\n'), ((8157, 8181), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1)'], {}), '((M + 1, N + 1))\n', (8165, 8181), True, 'import numpy as np\n'), ((10159, 10174), 'numpy.sum', 'np.sum', (['a_label'], {}), '(a_label)\n', (10165, 10174), True, 'import numpy as np\n'), ((10185, 10200), 'numpy.sum', 'np.sum', (['b_label'], {}), '(b_label)\n', (10191, 10200), True, 'import numpy as np\n'), ((10286, 10316), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1, 2, 2)'], {}), '((M + 1, N + 1, 2, 2))\n', (10294, 10316), True, 'import numpy as np\n'), ((10329, 10353), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1)'], {}), '((M + 1, N + 1))\n', (10337, 10353), True, 'import numpy as np\n'), ((10361, 10385), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1)'], {}), '((M + 1, N + 1))\n', (10369, 10385), True, 'import numpy as np\n'), ((10399, 10423), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1)'], {}), '((M + 1, N + 1))\n', (10407, 10423), True, 'import numpy as np\n'), ((13284, 13302), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (13287, 13302), False, 'from numba import jit\n'), ((14582, 14597), 'numpy.sum', 'np.sum', (['a_label'], {}), '(a_label)\n', (14588, 14597), True, 'import numpy as np\n'), ((14646, 14661), 'numpy.sum', 'np.sum', (['b_label'], {}), '(b_label)\n', (14652, 14661), True, 'import numpy as np\n'), ((14711, 14742), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['a_label', 'a_score'], {}), '(a_label, a_score)\n', (14724, 14742), False, 'from sklearn.metrics import roc_auc_score\n'), ((14756, 14787), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['b_label', 'b_score'], {}), '(b_label, b_score)\n', (14769, 14787), False, 'from sklearn.metrics import roc_auc_score\n'), ((15281, 15296), 'numpy.sum', 'np.sum', (['a_label'], {}), '(a_label)\n', (15287, 15296), True, 'import numpy as np\n'), ((15345, 15360), 'numpy.sum', 'np.sum', (['b_label'], {}), '(b_label)\n', (15351, 15360), True, 'import numpy as np\n'), ((15632, 15678), 'numpy.concatenate', 'np.concatenate', (['(a_score1, a_score0, b_score0)'], {}), '((a_score1, a_score0, b_score0))\n', (15646, 15678), True, 'import numpy as np\n'), ((15691, 15724), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['ab_label', 'ab_score'], {}), '(ab_label, ab_score)\n', (15704, 15724), False, 'from sklearn.metrics import roc_auc_score\n'), ((15833, 15879), 'numpy.concatenate', 'np.concatenate', (['(b_score1, b_score0, a_score0)'], {}), '((b_score1, b_score0, a_score0))\n', (15847, 15879), True, 'import numpy as np\n'), ((15893, 15926), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['ba_label', 'ba_score'], {}), '(ba_label, ba_score)\n', (15906, 15926), False, 'from sklearn.metrics import roc_auc_score\n'), ((16089, 16098), 'numpy.sum', 'np.sum', (['a'], {}), '(a)\n', (16095, 16098), True, 'import numpy as np\n'), ((16109, 16118), 'numpy.sum', 'np.sum', (['b'], {}), '(b)\n', (16115, 16118), True, 'import numpy as np\n'), ((16135, 16159), 'numpy.zeros', 'np.zeros', (['(M + 1, N + 1)'], {}), '((M + 1, N + 1))\n', (16143, 16159), True, 'import numpy as np\n'), ((1141, 1157), 'numpy.where', 'np.where', (['(a == 0)'], {}), '(a == 0)\n', (1149, 1157), True, 'import numpy as np\n'), ((1159, 1175), 'numpy.where', 'np.where', (['(a == 1)'], {}), '(a == 1)\n', (1167, 1175), True, 'import numpy as np\n'), ((4753, 4783), 'numpy.array', 'np.array', (['[[i - 1, 0], [i, 0]]'], {}), '([[i - 1, 0], [i, 0]])\n', (4761, 4783), True, 'import numpy as np\n'), ((4958, 4988), 'numpy.array', 'np.array', (['[[0, i - 1], [0, i]]'], {}), '([[0, i - 1], [0, i]])\n', (4966, 4988), True, 'import numpy as np\n'), ((6343, 6373), 'numpy.array', 'np.array', (['[[i - 1, 0], [i, 0]]'], {}), '([[i - 1, 0], [i, 0]])\n', (6351, 6373), True, 'import numpy as np\n'), ((6675, 6705), 'numpy.array', 'np.array', (['[[0, i - 1], [0, i]]'], {}), '([[0, i - 1], [0, i]])\n', (6683, 6705), True, 'import numpy as np\n'), ((8478, 8508), 'numpy.array', 'np.array', (['[[i - 1, 0], [i, 0]]'], {}), '([[i - 1, 0], [i, 0]])\n', (8486, 8508), True, 'import numpy as np\n'), ((8816, 8846), 'numpy.array', 'np.array', (['[[0, i - 1], [0, i]]'], {}), '([[0, i - 1], [0, i]])\n', (8824, 8846), True, 'import numpy as np\n'), ((11271, 11301), 'numpy.array', 'np.array', (['[[i - 1, 0], [i, 0]]'], {}), '([[i - 1, 0], [i, 0]])\n', (11279, 11301), True, 'import numpy as np\n'), ((11610, 11640), 'numpy.array', 'np.array', (['[[0, i - 1], [0, i]]'], {}), '([[0, i - 1], [0, i]])\n', (11618, 11640), True, 'import numpy as np\n'), ((14338, 14363), 'numpy.array', 'np.array', (['changed_b_score'], {}), '(changed_b_score)\n', (14346, 14363), True, 'import numpy as np\n'), ((14392, 14417), 'numpy.array', 'np.array', (['changed_b_score'], {}), '(changed_b_score)\n', (14400, 14417), True, 'import numpy as np\n'), ((1844, 1857), 'numpy.sum', 'np.sum', (['label'], {}), '(label)\n', (1850, 1857), True, 'import numpy as np\n'), ((5522, 5560), 'numpy.array', 'np.array', (['[[j - 1, i - j], [j, i - j]]'], {}), '([[j - 1, i - j], [j, i - j]])\n', (5530, 5560), True, 'import numpy as np\n'), ((5663, 5701), 'numpy.array', 'np.array', (['[[j, i - j - 1], [j, i - j]]'], {}), '([[j, i - j - 1], [j, i - j]])\n', (5671, 5701), True, 'import numpy as np\n'), ((7570, 7608), 'numpy.array', 'np.array', (['[[j - 1, i - j], [j, i - j]]'], {}), '([[j - 1, i - j], [j, i - j]])\n', (7578, 7608), True, 'import numpy as np\n'), ((7787, 7825), 'numpy.array', 'np.array', (['[[j, i - j - 1], [j, i - j]]'], {}), '([[j, i - j - 1], [j, i - j]])\n', (7795, 7825), True, 'import numpy as np\n'), ((9711, 9749), 'numpy.array', 'np.array', (['[[j - 1, i - j], [j, i - j]]'], {}), '([[j - 1, i - j], [j, i - j]])\n', (9719, 9749), True, 'import numpy as np\n'), ((9928, 9966), 'numpy.array', 'np.array', (['[[j, i - j - 1], [j, i - j]]'], {}), '([[j, i - j - 1], [j, i - j]])\n', (9936, 9966), True, 'import numpy as np\n'), ((12508, 12546), 'numpy.array', 'np.array', (['[[j - 1, i - j], [j, i - j]]'], {}), '([[j - 1, i - j], [j, i - j]])\n', (12516, 12546), True, 'import numpy as np\n'), ((12725, 12763), 'numpy.array', 'np.array', (['[[j, i - j - 1], [j, i - j]]'], {}), '([[j, i - j - 1], [j, i - j]])\n', (12733, 12763), True, 'import numpy as np\n'), ((1863, 1876), 'numpy.sum', 'np.sum', (['label'], {}), '(label)\n', (1869, 1876), True, 'import numpy as np\n'), ((5205, 5232), 'numpy.sum', 'np.sum', (['b_label[i - j + 1:]'], {}), '(b_label[i - j + 1:])\n', (5211, 5232), True, 'import numpy as np\n'), ((5346, 5369), 'numpy.sum', 'np.sum', (['a_label[j + 1:]'], {}), '(a_label[j + 1:])\n', (5352, 5369), True, 'import numpy as np\n'), ((6954, 6981), 'numpy.sum', 'np.sum', (['b_label[i - j + 1:]'], {}), '(b_label[i - j + 1:])\n', (6960, 6981), True, 'import numpy as np\n'), ((7179, 7202), 'numpy.sum', 'np.sum', (['a_label[j + 1:]'], {}), '(a_label[j + 1:])\n', (7185, 7202), True, 'import numpy as np\n'), ((9104, 9131), 'numpy.sum', 'np.sum', (['b_label[i - j + 1:]'], {}), '(b_label[i - j + 1:])\n', (9110, 9131), True, 'import numpy as np\n'), ((9320, 9343), 'numpy.sum', 'np.sum', (['a_label[j + 1:]'], {}), '(a_label[j + 1:])\n', (9326, 9343), True, 'import numpy as np\n'), ((11879, 11906), 'numpy.sum', 'np.sum', (['b_label[i - j + 1:]'], {}), '(b_label[i - j + 1:])\n', (11885, 11906), True, 'import numpy as np\n'), ((12107, 12130), 'numpy.sum', 'np.sum', (['a_label[j + 1:]'], {}), '(a_label[j + 1:])\n', (12113, 12130), True, 'import numpy as np\n')] |
# import os
import numpy as np
import scipy
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, cm
import statsmodels.api as sm
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
def plotBox(data, labelC=None, labelS=None, colorLst='rbkgcmy', title=None,
figsize=(8, 6), sharey=True):
nc = len(data)
fig, axes = plt.subplots(ncols=nc, sharey=sharey,
figsize=figsize)
for k in range(0, nc):
ax = axes[k] if nc > 1 else axes
bp = ax.boxplot(data[k], patch_artist=True,
notch=True, showfliers=False)
for kk in range(0, len(bp['boxes'])):
plt.setp(bp['boxes'][kk], facecolor=colorLst[kk])
if labelC is not None:
ax.set_xlabel(labelC[k])
else:
ax.set_xlabel(str(k))
ax.set_xticks([])
# ax.ticklabel_format(axis='y', style='sci')
if labelS is not None:
if nc == 1:
ax.legend(bp['boxes'], labelS, loc='best')
else:
axes[-1].legend(bp['boxes'], labelS, loc='best')
if title is not None:
fig.suptitle(title)
return fig
def plotVS(x, y, *, ax=None, title=None, xlabel=None, ylabel=None,
titleCorr=True, plot121=True, doRank=False, figsize=(8, 6)):
if doRank is True:
x = scipy.stats.rankdata(x)
y = scipy.stats.rankdata(y)
corr = scipy.stats.pearsonr(x, y)[0]
pLr = np.polyfit(x, y, 1)
xLr = np.array([np.min(x), np.max(x)])
yLr = np.poly1d(pLr)(xLr)
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.subplots()
else:
fig = None
if title is not None:
if titleCorr is True:
title = title+' '+r'$\rho$={:.2f}'.format(corr)
ax.set_title(title)
else:
if titleCorr is True:
ax.set_title(r'$\rho$='+'{:.2f}'.format(corr))
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
# corr = np.corrcoef(x, y)[0, 1]
ax.plot(x, y, 'b.')
ax.plot(xLr, yLr, 'r-')
if plot121 is True:
plot121Line(ax)
return fig, ax
def plot121Line(ax, spec='k-'):
xlim = ax.get_xlim()
ylim = ax.get_ylim()
vmin = np.min([xlim[0], ylim[0]])
vmax = np.max([xlim[1], ylim[1]])
ax.plot([vmin, vmax], [vmin, vmax], spec)
def plotMap(grid, *, crd, ax=None, lat=None, lon=None, title=None,
cRange=None, shape=None):
if lat is None and lon is None:
lat = crd[0]
lon = crd[1]
if cRange is not None:
vmin = cRange[0]
vmax = cRange[1]
else:
temp = flatData(grid)
vmin = np.percentile(temp, 5)
vmax = np.percentile(temp, 95)
if ax is None:
fig, ax = plt.figure(figsize=(8, 4))
mm = Basemap(llcrnrlat=lat[-1], urcrnrlat=lat[0],
llcrnrlon=lon[0], urcrnrlon=lon[-1],
projection='cyl', resolution='c', ax=ax)
mm.drawcoastlines()
# map.drawstates()
# map.drawcountries()
x, y = mm(lon, lat)
xx, yy = np.meshgrid(x, y)
cs = mm.pcolormesh(xx, yy, grid, cmap=plt.cm.viridis, vmin=vmin, vmax=vmax)
if shape is not None:
if type(shape) is not list:
shape = [shape]
for shp in shape:
crd = np.array(shp.points)
par = list(shp.parts)
par.append(len(crd))
if len(par) > 1:
for k in range(0, len(par)-1):
x = crd[par[k]:par[k+1], 0]
y = crd[par[k]:par[k+1], 1]
mm.plot(x, y, color='r', linewidth=2)
else:
x = crd[:, 0]
y = crd[:, 1]
mm.plot(x, y, color='r', linewidth=2)
cbar = mm.colorbar(cs, location='bottom', pad='5%')
if title is not None:
ax.set_title(title)
if ax is None:
return fig, ax, mm
else:
return mm
def plotCDF(xLst, *, ax=None, title=None, legendLst=None, figsize=(8, 6),
ref='121', cLst=None, xlabel=None, ylabel=None, showDiff='RMSE'):
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.subplots()
else:
fig = None
if cLst is None:
cmap = plt.cm.jet
cLst = cmap(np.linspace(0, 1, len(xLst)))
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
xSortLst = list()
rmseLst = list()
ksdLst = list()
for k in range(0, len(xLst)):
x = xLst[k]
xSort = flatData(x)
yRank = np.arange(len(xSort))/float(len(xSort)-1)
xSortLst.append(xSort)
if legendLst is None:
legStr = None
else:
legStr = legendLst[k]
if ref is '121':
yRef = yRank
elif ref is 'norm':
yRef = scipy.stats.norm.cdf(xSort, 0, 1)
rmse = np.sqrt(((xSort - yRef) ** 2).mean())
ksd = np.max(np.abs(xSort-yRef))
rmseLst.append(rmse)
ksdLst.append(ksd)
if showDiff is 'RMSE':
legStr = legStr+' RMSE='+'%.3f' % rmse
elif showDiff is 'KS':
legStr = legStr+' KS='+'%.3f' % ksd
ax.plot(xSort, yRank, color=cLst[k], label=legStr)
if ref is '121':
ax.plot([0, 1], [0, 1], 'k', label='y=x')
if ref is 'norm':
xNorm = np.linspace(-5, 5, 1000)
normCdf = scipy.stats.norm.cdf(xNorm, 0, 1)
ax.plot(xNorm, normCdf, 'k', label='Gaussian')
if legendLst is not None:
ax.legend(loc='best')
out = {'xSortLst': xSortLst, 'rmseLst': rmseLst, 'ksdLst': ksdLst}
return fig, ax, out
def distCDF(xLst):
rmseLst = list()
ksdLst = list()
for x in xLst:
xSort = flatData(x)
yRef = np.arange(len(xSort))/float(len(xSort)-1)
rmse = np.sqrt(((xSort - yRef) ** 2).mean())
ksd = np.max(np.abs(xSort-yRef))
rmseLst.append(rmse)
ksdLst.append(ksd)
return rmseLst, ksdLst
def flatData(x):
xArrayTemp = x.flatten()
xArray = xArrayTemp[~np.isnan(xArrayTemp)]
xSort = np.sort(xArray)
return(xSort)
def scaleSigma(s, u, y):
yNorm = (y-u)/s
_, sF = scipy.stats.norm.fit(flatData(yNorm))
return sF
def reCalSigma(s, u, y):
conf = scipy.special.erf(np.abs(y-u)/s/np.sqrt(2))
yNorm = (y-u)/s
return conf, yNorm
def regLinear(y, x):
ones = np.ones(len(x[0]))
X = sm.add_constant(np.column_stack((x[0], ones)))
for ele in x[1:]:
X = sm.add_constant(np.column_stack((ele, X)))
out = sm.OLS(y, X).fit()
return out
def plotTwinBox(ax, xdata, ydata, xerror, yerror, facecolor='r',
edgecolor='None', alpha=0.5):
# Create list for all the error patches
errorboxes = []
# Loop over data points; create box from errors at each point
for x, y, xe, ye in zip(xdata, ydata, xerror.T, yerror.T):
rect = Rectangle(
(x - xe[0], y - ye[0]), xe.sum(), ye.sum())
errorboxes.append(rect)
# Create patch collection with specified colour/alpha
pc = PatchCollection(
errorboxes, facecolor=facecolor, alpha=alpha, edgecolor=edgecolor)
# Add collection to axes
ax.add_collection(pc)
# Plot errorbars
artists = ax.errorbar(xdata, ydata, xerr=xerror, yerr=yerror,
fmt='None', ecolor='k')
return artists
| [
"numpy.sqrt",
"numpy.polyfit",
"numpy.column_stack",
"numpy.array",
"scipy.stats.pearsonr",
"numpy.poly1d",
"statsmodels.api.OLS",
"scipy.stats.norm.cdf",
"numpy.sort",
"numpy.max",
"numpy.linspace",
"numpy.min",
"numpy.meshgrid",
"numpy.abs",
"numpy.isnan",
"matplotlib.pyplot.setp",
... | [((397, 451), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': 'nc', 'sharey': 'sharey', 'figsize': 'figsize'}), '(ncols=nc, sharey=sharey, figsize=figsize)\n', (409, 451), True, 'import matplotlib.pyplot as plt\n'), ((1493, 1512), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (1503, 1512), True, 'import numpy as np\n'), ((2315, 2341), 'numpy.min', 'np.min', (['[xlim[0], ylim[0]]'], {}), '([xlim[0], ylim[0]])\n', (2321, 2341), True, 'import numpy as np\n'), ((2353, 2379), 'numpy.max', 'np.max', (['[xlim[1], ylim[1]]'], {}), '([xlim[1], ylim[1]])\n', (2359, 2379), True, 'import numpy as np\n'), ((2879, 3006), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'llcrnrlat': 'lat[-1]', 'urcrnrlat': 'lat[0]', 'llcrnrlon': 'lon[0]', 'urcrnrlon': 'lon[-1]', 'projection': '"""cyl"""', 'resolution': '"""c"""', 'ax': 'ax'}), "(llcrnrlat=lat[-1], urcrnrlat=lat[0], llcrnrlon=lon[0], urcrnrlon=\n lon[-1], projection='cyl', resolution='c', ax=ax)\n", (2886, 3006), False, 'from mpl_toolkits.basemap import Basemap, cm\n'), ((3146, 3163), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (3157, 3163), True, 'import numpy as np\n'), ((6255, 6270), 'numpy.sort', 'np.sort', (['xArray'], {}), '(xArray)\n', (6262, 6270), True, 'import numpy as np\n'), ((7244, 7331), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['errorboxes'], {'facecolor': 'facecolor', 'alpha': 'alpha', 'edgecolor': 'edgecolor'}), '(errorboxes, facecolor=facecolor, alpha=alpha, edgecolor=\n edgecolor)\n', (7259, 7331), False, 'from matplotlib.collections import PatchCollection\n'), ((1382, 1405), 'scipy.stats.rankdata', 'scipy.stats.rankdata', (['x'], {}), '(x)\n', (1402, 1405), False, 'import scipy\n'), ((1418, 1441), 'scipy.stats.rankdata', 'scipy.stats.rankdata', (['y'], {}), '(y)\n', (1438, 1441), False, 'import scipy\n'), ((1453, 1479), 'scipy.stats.pearsonr', 'scipy.stats.pearsonr', (['x', 'y'], {}), '(x, y)\n', (1473, 1479), False, 'import scipy\n'), ((1566, 1580), 'numpy.poly1d', 'np.poly1d', (['pLr'], {}), '(pLr)\n', (1575, 1580), True, 'import numpy as np\n'), ((1620, 1647), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1630, 1647), True, 'import matplotlib.pyplot as plt\n'), ((2743, 2765), 'numpy.percentile', 'np.percentile', (['temp', '(5)'], {}), '(temp, 5)\n', (2756, 2765), True, 'import numpy as np\n'), ((2781, 2804), 'numpy.percentile', 'np.percentile', (['temp', '(95)'], {}), '(temp, 95)\n', (2794, 2804), True, 'import numpy as np\n'), ((2843, 2869), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (2853, 2869), True, 'import matplotlib.pyplot as plt\n'), ((4215, 4242), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4225, 4242), True, 'import matplotlib.pyplot as plt\n'), ((5518, 5542), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(1000)'], {}), '(-5, 5, 1000)\n', (5529, 5542), True, 'import numpy as np\n'), ((5561, 5594), 'scipy.stats.norm.cdf', 'scipy.stats.norm.cdf', (['xNorm', '(0)', '(1)'], {}), '(xNorm, 0, 1)\n', (5581, 5594), False, 'import scipy\n'), ((6602, 6631), 'numpy.column_stack', 'np.column_stack', (['(x[0], ones)'], {}), '((x[0], ones))\n', (6617, 6631), True, 'import numpy as np\n'), ((714, 763), 'matplotlib.pyplot.setp', 'plt.setp', (["bp['boxes'][kk]"], {'facecolor': 'colorLst[kk]'}), "(bp['boxes'][kk], facecolor=colorLst[kk])\n", (722, 763), True, 'import matplotlib.pyplot as plt\n'), ((1533, 1542), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (1539, 1542), True, 'import numpy as np\n'), ((1544, 1553), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1550, 1553), True, 'import numpy as np\n'), ((3378, 3398), 'numpy.array', 'np.array', (['shp.points'], {}), '(shp.points)\n', (3386, 3398), True, 'import numpy as np\n'), ((5112, 5132), 'numpy.abs', 'np.abs', (['(xSort - yRef)'], {}), '(xSort - yRef)\n', (5118, 5132), True, 'import numpy as np\n'), ((6045, 6065), 'numpy.abs', 'np.abs', (['(xSort - yRef)'], {}), '(xSort - yRef)\n', (6051, 6065), True, 'import numpy as np\n'), ((6221, 6241), 'numpy.isnan', 'np.isnan', (['xArrayTemp'], {}), '(xArrayTemp)\n', (6229, 6241), True, 'import numpy as np\n'), ((6470, 6480), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6477, 6480), True, 'import numpy as np\n'), ((6683, 6708), 'numpy.column_stack', 'np.column_stack', (['(ele, X)'], {}), '((ele, X))\n', (6698, 6708), True, 'import numpy as np\n'), ((6720, 6732), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (6726, 6732), True, 'import statsmodels.api as sm\n'), ((5004, 5037), 'scipy.stats.norm.cdf', 'scipy.stats.norm.cdf', (['xSort', '(0)', '(1)'], {}), '(xSort, 0, 1)\n', (5024, 5037), False, 'import scipy\n'), ((6456, 6469), 'numpy.abs', 'np.abs', (['(y - u)'], {}), '(y - u)\n', (6462, 6469), True, 'import numpy as np\n')] |
import numpy as np
class History():
"""
Keeps the history of evaluations, but only of the TRUE MODEL.
Additionally keeps wheter the model or the surrogate was used.
This enables us to guarantee a certain amount of past model evaluations
being available for the training set.
Every line is of the form:
[coords_0, ..., coords_(dim - 1), value]
It keeps a fix amount of space and overwrites old entries when full.
Managed by the '_write_index' so only use safe access to get correctly
ordered entries.
It also keeps model usage rate as a separate history (with size of
`use_size`) for the dynamic relevator threshold for faster recalculation.
All methods and attributes that start with '_' should be treated as
private.
"""
def __init__(self, metamodel, kwargs):
self.metamodel = metamodel
self.size = kwargs.get("size", 500)
self.use_size = kwargs.get("use_size", 200)
self._dimension = metamodel.model.get_dimension() + 1
# Objects for keeping data
self._data = np.zeros((self.size, self._dimension))
self._use_data = np.zeros(self.use_size)
# Indices of current position to write
self._write_index = 0
self._use_write_index = 0
# An indicator if we need to remove empty entries when returning the
# history.
self._is_full = False
self._use_is_full = False
return
def update(self, coords, prediction, relevance, is_relevant):
"""
Add entries to history, overwritting the old ones if needed.
Readjust the write indices.
"""
# Update the history of model usage.
self._use_data[self._use_write_index] = float(is_relevant)
self._use_write_index = (self._use_write_index + 1) % self.use_size
if not self._use_is_full and self._use_write_index == 0:
self._use_is_full = True
# Update the history of model evaluations (if needed).
if is_relevant:
line = self._write_index
self._data[line, 0:len(coords)] = np.array(coords)
self._data[line, -1] = prediction
self._write_index = (self._write_index + 1) % self.size
if not self._is_full and self._write_index == 0:
self._is_full = True
return
def _ordered_data(self, data, write_index):
"""
Due to reusing old entries, our data is shifted and needs to be
correctly ordered before use.
"""
roll = self.size - write_index
ordered = np.roll(data, roll, axis=0)
return ordered
def get_model_evaluations(self):
"""
Returns only the coords and values for evaluations of the actual model.
"""
if self._is_full:
# Correct the order of data and return.
return self._ordered_data(self._data, self._write_index)
else:
# Select non-empty entries. No ordering needed.
return self._data[:self._write_index, :]
def get_model_usage_rate(self):
"""
Calculates and returns the rate at which the true model is currently
being used.
"""
if self._use_is_full:
return np.mean(self._use_data)
else:
return np.mean(self._use_data[:self._use_write_index])
| [
"numpy.array",
"numpy.zeros",
"numpy.mean",
"numpy.roll"
] | [((1083, 1121), 'numpy.zeros', 'np.zeros', (['(self.size, self._dimension)'], {}), '((self.size, self._dimension))\n', (1091, 1121), True, 'import numpy as np\n'), ((1147, 1170), 'numpy.zeros', 'np.zeros', (['self.use_size'], {}), '(self.use_size)\n', (1155, 1170), True, 'import numpy as np\n'), ((2598, 2625), 'numpy.roll', 'np.roll', (['data', 'roll'], {'axis': '(0)'}), '(data, roll, axis=0)\n', (2605, 2625), True, 'import numpy as np\n'), ((2114, 2130), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (2122, 2130), True, 'import numpy as np\n'), ((3272, 3295), 'numpy.mean', 'np.mean', (['self._use_data'], {}), '(self._use_data)\n', (3279, 3295), True, 'import numpy as np\n'), ((3329, 3376), 'numpy.mean', 'np.mean', (['self._use_data[:self._use_write_index]'], {}), '(self._use_data[:self._use_write_index])\n', (3336, 3376), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import networkx as nx
import numpy as np
import sys
input_lines = [list(y) for y in [x.strip() for x in open(sys.argv[1], 'r').readlines()]]
maze_arr = np.array(input_lines, dtype=int)
# Need to turn the maze into a graph
maze = nx.DiGraph()
for idx, x in np.ndenumerate(maze_arr):
me = "{0:03d}_{1:03d}".format(idx[0], idx[1])
# Calculate costs to enter each of the neighbors. Note that we
# don't need to add edges up or to the left, as we would have
# already added them from previous passes through the array.
#
# NOTE: The above comment is WRONG, since the edge costs are
# not symmetric! I'd be much better at this if I could read
if idx[1] > 0:
up = "{0:03d}_{1:03d}".format(idx[0], idx[1]-1)
maze.add_edge(me, up, weight=maze_arr[idx[0], idx[1]-1])
if idx[1] < (maze_arr.shape[1]-1):
right = "{0:03d}_{1:03d}".format(idx[0], idx[1]+1)
maze.add_edge(me, right, weight=maze_arr[idx[0], idx[1]+1])
if idx[0] < (maze_arr.shape[0]-1):
down = "{0:03d}_{1:03d}".format(idx[0]+1, idx[1])
maze.add_edge(me, down, weight=maze_arr[idx[0]+1, idx[1]])
if idx[0] > 0:
left = "{0:03d}_{1:03d}".format(idx[0]-1, idx[1])
maze.add_edge(me, left, weight=maze_arr[idx[0]-1, idx[1]])
start = "{0:03d}_{1:03d}".format(0, 0)
end = "{0:03d}_{1:03d}".format(*[x-1 for x in [*maze_arr.shape]])
# Cheating, since I'm so far behind. This is Dijkstra's, by default,
# and I know I can look the algorithm up if I need to.
shortest_path = nx.shortest_path(maze, source=start, target=end, weight='weight')
indices = [(int(x),int(y)) for x,y in [n.split("_") for n in shortest_path]]
cost = 0
for pos in indices[1:]:
cost += maze_arr[ pos[0], pos[1] ]
print(cost) | [
"numpy.array",
"networkx.DiGraph",
"networkx.shortest_path",
"numpy.ndenumerate"
] | [((178, 210), 'numpy.array', 'np.array', (['input_lines'], {'dtype': 'int'}), '(input_lines, dtype=int)\n', (186, 210), True, 'import numpy as np\n'), ((256, 268), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (266, 268), True, 'import networkx as nx\n'), ((283, 307), 'numpy.ndenumerate', 'np.ndenumerate', (['maze_arr'], {}), '(maze_arr)\n', (297, 307), True, 'import numpy as np\n'), ((1560, 1625), 'networkx.shortest_path', 'nx.shortest_path', (['maze'], {'source': 'start', 'target': 'end', 'weight': '"""weight"""'}), "(maze, source=start, target=end, weight='weight')\n", (1576, 1625), True, 'import networkx as nx\n')] |
import sys, getopt
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import os
import ICA_support_lib as sup
import ICA_coupling_pattern as cp
import ICA_ising as ising
class astro_pp_ising_creator:
def __init__(self):
self.main_Path = os.getcwd()
self.ising_model_Path = self.main_Path + '/Ising_models/'
self.data_Path = self.main_Path + '/dataFiles/'
sup.check_create_save_dir(self.ising_model_Path)
def create_ising_model(self, ising_num, size_exp,shift,shrink,stretch,scaler,clusters = 500,diam_mean_var = (6, 1),amp_mean_var = (.1, 1)):
'''
:param ising_num: ISING MODEL NUMBER TO GENERATE
:param size_exp: CONTROLS SIZE OF ISING LATTICE
:param shift: SURFACE GENERATION PARAMETER, CONSTANT AT 0
:param shrink: SURFACE GENERATION PARAMETER, CONSTANT AT 0
:param stretch: SURFACE GENERATION PARAMETER, CONSTANT AT 0
:param scaler: SURFACE GENERATION PARAMETER, CONSTANT AT 0
:param clusters: SURFACE GENERATION PARAMETER, CONSTANT AT 500
:param diam_mean_var: MEAN AND VARIANCE FOR THE DIAMETER OF EACH RADIAL BASIS FUNCTION FORMING THE SURFACE
:param amp_mean_var: MEAN AND VARIANCE FOR THE AMPLITUDE OF EACH RADIAL BASIS FUNCTION FORMING THE SURFACE
:return: SAVES ISING MODEL DATA
'''
np.random.seed(222)
# CREATE MODEL DIRECTORY
dataPath_model = self.ising_model_Path + '/Model_' + str(ising_num)
sup.check_create_save_dir(dataPath_model)
# SET SHAPE OF ISING 2D LATTICE
shp = (2 ** size_exp, 2 ** size_exp) # syn_space_size
# INITIALIZED NEED CLASSES
isi = ising.astro_pp_model_ising(synaptic_matrix_size=shp,shift=shift,shrink=shrink,stretch=stretch,scaler=scaler)
pat = cp.astro_pp_pattern_generator(space_dims=shp)
# CREATE LOG FOR MODEL PARAMETERS AND STATS
log_filename = 'Log_for_Ising_Model_' + str(ising_num)
log_fn = os.path.abspath(os.path.join(dataPath_model, log_filename))
with open(log_fn, 'w') as f:
f.write('LOG___ISING_MODEL_' + str(ising_num)+ '\n\n')
f.write('DATA PATH: ' + str(dataPath_model) + '\n\n\n')
f.write('INPUT PARAMETERS:\n\n')
f.write(' size_exp = ' + str(size_exp) + '\n')
f.write(' shape = ' + str(shp) + '\n\n')
f.write(' clusters = ' + str(clusters) + '\n')
f.write(' diam_mean_var = ' + str(diam_mean_var) + '\n')
f.write(' amp_mean_var = ' + str(amp_mean_var) + '\n')
f.write(' shift = ' + str(shift) + '\n')
f.write(' shrink = ' + str(shrink) + '\n')
f.write(' stretch = ' + str(stretch) + '\n')
f.write(' scaler = ' + str(scaler) + '\n')
# GENERATE 3D LANDSCAPE USING RADIAL BASIS FUNCTIONS
params = pat.generate_pattern_landscape_parameters_normal_dist(num_of_clusters=clusters,
diam_min_max=diam_mean_var,
amp_min_max=amp_mean_var)
out = pat.space_func_2d(pat.X, pat.Y, params[0], params[1], params[2], params[3])
f.write('Initial Out Landscape <M>, Min, Max : ' + str(
len(np.where(out >= 0)[0]) / np.size(out)) + ' , ' + str(np.amin(out)) + ' , ' + str(
np.amax(out)) + '\n')
# RESCALING SURFACE
out_rescaled = np.multiply(out, np.divide(1.0, np.maximum(np.absolute(np.amin(out)),
np.absolute(np.amax(out)))))
f.write('Initial Out_rescaled Landscape <M>, Min, Max : ' + str(
len(np.where(out_rescaled >= 0)[0]) / np.size(out_rescaled)) + ' , ' + str(
np.amin(out_rescaled)) + ' , '
+ str(np.amax(out_rescaled)) + '\n\n')
# BINARIZE LANDSCAPE WITH THRESHOLD AT 0
spins_matrix_rescaled = np.add(1, np.multiply(2, np.floor(np.clip(out_rescaled, -1, 0))))
f.write(
'spin Initialization <M> = ' + str(np.average(np.clip(spins_matrix_rescaled, 0, 1))) + '\n\n')
# INITIALIZE VARIABLES FOR ISING MODEL GENERATION USING SURFACE AND SPINS DATA FROM ABOVE
ind1, ind2, main_spins, param_T, J, spin_dist, feed_temp_scalar, spin_feeder = isi.initialize_vars(
initial_spins=spins_matrix_rescaled, initial_spin_dist=out_rescaled)
tf.set_random_seed(1234)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# GENERATE ISING COUPLINGS
sess.run(isi.set_custom_coupling_v4(landscape=spin_dist, J=J, power_scaling=10))
# COLLECT STATS ON COUPLINGS
J_out = sess.run(J)
f.write('Ising Coupling Metrics:\n')
f.write(' Average: ' + str(np.average(J_out)) + '\n')
f.write(' Maximum: ' + str(np.amax(J_out)) + '\n')
f.write(' Minimum: ' + str(np.amin(J_out)) + '\n\n')
#### SAVING ISING MODEL ####
data_name = 'ISING_Model_' + str(ising_num)
sup.save_non_tf_data( names = ['ind1','ind2','main_spins','param_T','J','spin_dist','out_rescaled']
, data = [isi.list1, isi.list2, sess.run(main_spins), sess.run(param_T), sess.run(J), sess.run(spin_dist), out_rescaled]
, filename = data_name
, savePath = dataPath_model
)
print('Ising Model number ' + str(ising_num) + ' saved.')
sess.close()
def main(argv):
try:
opts, args = getopt.getopt(argv, "", ["model_num="])
except getopt.GetoptError:
print('Incorrect arguments')
sys.exit(2)
for opt, arg in opts:
if opt == '--model_num':
m = int(arg)
else:
print('Error, exiting')
sys.exit()
aic = astro_pp_ising_creator()
aic.create_ising_model(m,8,shift=0.0,shrink=0.35,stretch=4.0,scaler=1.0,clusters=500,diam_mean_var=(6,1))
if __name__ == '__main__':
main(sys.argv[1:])
| [
"numpy.clip",
"ICA_coupling_pattern.astro_pp_pattern_generator",
"getopt.getopt",
"ICA_ising.astro_pp_model_ising",
"tensorflow.compat.v1.disable_v2_behavior",
"numpy.amin",
"tensorflow.compat.v1.global_variables_initializer",
"numpy.average",
"numpy.where",
"numpy.size",
"os.path.join",
"os.g... | [((72, 96), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (94, 96), True, 'import tensorflow.compat.v1 as tf\n'), ((279, 290), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (288, 290), False, 'import os\n'), ((422, 470), 'ICA_support_lib.check_create_save_dir', 'sup.check_create_save_dir', (['self.ising_model_Path'], {}), '(self.ising_model_Path)\n', (447, 470), True, 'import ICA_support_lib as sup\n'), ((1392, 1411), 'numpy.random.seed', 'np.random.seed', (['(222)'], {}), '(222)\n', (1406, 1411), True, 'import numpy as np\n'), ((1530, 1571), 'ICA_support_lib.check_create_save_dir', 'sup.check_create_save_dir', (['dataPath_model'], {}), '(dataPath_model)\n', (1555, 1571), True, 'import ICA_support_lib as sup\n'), ((1726, 1843), 'ICA_ising.astro_pp_model_ising', 'ising.astro_pp_model_ising', ([], {'synaptic_matrix_size': 'shp', 'shift': 'shift', 'shrink': 'shrink', 'stretch': 'stretch', 'scaler': 'scaler'}), '(synaptic_matrix_size=shp, shift=shift, shrink=\n shrink, stretch=stretch, scaler=scaler)\n', (1752, 1843), True, 'import ICA_ising as ising\n'), ((1849, 1894), 'ICA_coupling_pattern.astro_pp_pattern_generator', 'cp.astro_pp_pattern_generator', ([], {'space_dims': 'shp'}), '(space_dims=shp)\n', (1878, 1894), True, 'import ICA_coupling_pattern as cp\n'), ((5913, 5952), 'getopt.getopt', 'getopt.getopt', (['argv', '""""""', "['model_num=']"], {}), "(argv, '', ['model_num='])\n", (5926, 5952), False, 'import sys, getopt\n'), ((2044, 2086), 'os.path.join', 'os.path.join', (['dataPath_model', 'log_filename'], {}), '(dataPath_model, log_filename)\n', (2056, 2086), False, 'import os\n'), ((4680, 4704), 'tensorflow.compat.v1.set_random_seed', 'tf.set_random_seed', (['(1234)'], {}), '(1234)\n', (4698, 4704), True, 'import tensorflow.compat.v1 as tf\n'), ((4724, 4736), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (4734, 4736), True, 'import tensorflow.compat.v1 as tf\n'), ((6030, 6041), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (6038, 6041), False, 'import sys, getopt\n'), ((6190, 6200), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6198, 6200), False, 'import sys, getopt\n'), ((4758, 4791), 'tensorflow.compat.v1.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4789, 4791), True, 'import tensorflow.compat.v1 as tf\n'), ((4202, 4230), 'numpy.clip', 'np.clip', (['out_rescaled', '(-1)', '(0)'], {}), '(out_rescaled, -1, 0)\n', (4209, 4230), True, 'import numpy as np\n'), ((3551, 3563), 'numpy.amax', 'np.amax', (['out'], {}), '(out)\n', (3558, 3563), True, 'import numpy as np\n'), ((3688, 3700), 'numpy.amin', 'np.amin', (['out'], {}), '(out)\n', (3695, 3700), True, 'import numpy as np\n'), ((3785, 3797), 'numpy.amax', 'np.amax', (['out'], {}), '(out)\n', (3792, 3797), True, 'import numpy as np\n'), ((4045, 4066), 'numpy.amax', 'np.amax', (['out_rescaled'], {}), '(out_rescaled)\n', (4052, 4066), True, 'import numpy as np\n'), ((5091, 5108), 'numpy.average', 'np.average', (['J_out'], {}), '(J_out)\n', (5101, 5108), True, 'import numpy as np\n'), ((5159, 5173), 'numpy.amax', 'np.amax', (['J_out'], {}), '(J_out)\n', (5166, 5173), True, 'import numpy as np\n'), ((5224, 5238), 'numpy.amin', 'np.amin', (['J_out'], {}), '(J_out)\n', (5231, 5238), True, 'import numpy as np\n'), ((4318, 4354), 'numpy.clip', 'np.clip', (['spins_matrix_rescaled', '(0)', '(1)'], {}), '(spins_matrix_rescaled, 0, 1)\n', (4325, 4354), True, 'import numpy as np\n'), ((3506, 3518), 'numpy.amin', 'np.amin', (['out'], {}), '(out)\n', (3513, 3518), True, 'import numpy as np\n'), ((3988, 4009), 'numpy.amin', 'np.amin', (['out_rescaled'], {}), '(out_rescaled)\n', (3995, 4009), True, 'import numpy as np\n'), ((3478, 3490), 'numpy.size', 'np.size', (['out'], {}), '(out)\n', (3485, 3490), True, 'import numpy as np\n'), ((3934, 3955), 'numpy.size', 'np.size', (['out_rescaled'], {}), '(out_rescaled)\n', (3941, 3955), True, 'import numpy as np\n'), ((3453, 3471), 'numpy.where', 'np.where', (['(out >= 0)'], {}), '(out >= 0)\n', (3461, 3471), True, 'import numpy as np\n'), ((3900, 3927), 'numpy.where', 'np.where', (['(out_rescaled >= 0)'], {}), '(out_rescaled >= 0)\n', (3908, 3927), True, 'import numpy as np\n')] |
import pickle
import math
import numpy as np
import copy
class AdaBoostClassifier:
'''A simple AdaBoost Classifier.'''
__base_classifier__ = None
__classifiers__ = None
__max_base__ = 0
__n_base__ = 0
__alpha__ = None
def __init__(self, weak_classifier, n_weakers_limit):
self.__base_classifier__ = weak_classifier
self.__max_base__ = n_weakers_limit
def fit(self,X,y):
self.__alpha__ = []
self.__classifiers__ = []
W = np.zeros([self.__max_base__, X.shape[0]])
W[0, :] = 1 / X.shape[0]
for m in range(0, self.__max_base__):
#train m-th classifier
print ("train the " + str(m + 1) + " base classifier")
base = copy.deepcopy(self.__base_classifier__)
base = base.fit(X, y, W[m])
self.__classifiers__.append(base)
#predict through the m-th classifier
y_predict = base.predict(X)
#calculate error
h = np.zeros(y_predict.shape)
for i in range(0, y.shape[0]):
if y_predict[i] != y[i]:
h[i] = 1
else:
h[i] = 0
h[i] = h[i] * W[m, i]
epsilon = np.sum(h)
#calculate alpha value
self.__alpha__.append(0.5 * math.log(1 / epsilon - 1))
#reach max number of classifiers or good enough
if m >= self.__max_base__ - 1 or epsilon < 0.1:
self.__n_base__ = m + 1
break
#calculate weights for the next classifier
w = np.zeros([X.shape[0]])
for i in range(0, X.shape[0]):
w[i] = W[m, i] * math.exp(-self.__alpha__[m] * y[i] * y_predict[i])
z = np.sum(w)
for i in range(0, X.shape[0]):
W[m+1, i] = w[i] / z
def predict(self, X, threshold=0):
#sum prediction of all classifiers by their alpha
alpha = np.array(self.__alpha__)
h = np.zeros([self.__n_base__, X.shape[0]])
for m in range(0, self.__n_base__):
h[m] = alpha[m] * self.__classifiers__[m].predict(X)
return np.sum(h, axis=0)
| [
"math.log",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"copy.deepcopy",
"math.exp"
] | [((495, 536), 'numpy.zeros', 'np.zeros', (['[self.__max_base__, X.shape[0]]'], {}), '([self.__max_base__, X.shape[0]])\n', (503, 536), True, 'import numpy as np\n'), ((1986, 2010), 'numpy.array', 'np.array', (['self.__alpha__'], {}), '(self.__alpha__)\n', (1994, 2010), True, 'import numpy as np\n'), ((2023, 2062), 'numpy.zeros', 'np.zeros', (['[self.__n_base__, X.shape[0]]'], {}), '([self.__n_base__, X.shape[0]])\n', (2031, 2062), True, 'import numpy as np\n'), ((2187, 2204), 'numpy.sum', 'np.sum', (['h'], {'axis': '(0)'}), '(h, axis=0)\n', (2193, 2204), True, 'import numpy as np\n'), ((737, 776), 'copy.deepcopy', 'copy.deepcopy', (['self.__base_classifier__'], {}), '(self.__base_classifier__)\n', (750, 776), False, 'import copy\n'), ((998, 1023), 'numpy.zeros', 'np.zeros', (['y_predict.shape'], {}), '(y_predict.shape)\n', (1006, 1023), True, 'import numpy as np\n'), ((1248, 1257), 'numpy.sum', 'np.sum', (['h'], {}), '(h)\n', (1254, 1257), True, 'import numpy as np\n'), ((1616, 1638), 'numpy.zeros', 'np.zeros', (['[X.shape[0]]'], {}), '([X.shape[0]])\n', (1624, 1638), True, 'import numpy as np\n'), ((1782, 1791), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (1788, 1791), True, 'import numpy as np\n'), ((1334, 1359), 'math.log', 'math.log', (['(1 / epsilon - 1)'], {}), '(1 / epsilon - 1)\n', (1342, 1359), False, 'import math\n'), ((1715, 1765), 'math.exp', 'math.exp', (['(-self.__alpha__[m] * y[i] * y_predict[i])'], {}), '(-self.__alpha__[m] * y[i] * y_predict[i])\n', (1723, 1765), False, 'import math\n')] |
# Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
import model_compression_toolkit as cmo
keras = tf.keras
layers = keras.layers
hw_model = cmo.hardware_representation
class UniformRangeSelectionActivationTest(BaseKerasFeatureNetworkTest):
def __init__(self, unit_test, activation_threshold_method):
super().__init__(unit_test)
self.activation_threshold_method = activation_threshold_method
def generate_inputs(self):
return [np.random.uniform(low=-7, high=7, size=in_shape) for in_shape in self.get_input_shapes()]
def get_quantization_config(self):
return cmo.QuantizationConfig(activation_error_method=self.activation_threshold_method,
activation_n_bits=8)
def get_fw_hw_model(self):
qco = hw_model.QuantizationConfigOptions([hw_model.OpQuantizationConfig(activation_quantization_method=hw_model.QuantizationMethod.UNIFORM,
weights_quantization_method=hw_model.QuantizationMethod.POWER_OF_TWO,
activation_n_bits=8,
weights_n_bits=8,
weights_per_channel_threshold=True,
enable_weights_quantization=True,
enable_activation_quantization=True)])
return hw_model.FrameworkHardwareModel(hw_model.HardwareModel(qco))
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = layers.ReLU()(inputs)
outputs = tf.add(x, -1) # to get negative values in activation to test signed symmetric quantization
return keras.Model(inputs=inputs, outputs=outputs)
def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
# verify quantization range contains zero
fake_layer_input_args = quantized_model.layers[1].inbound_nodes[0].call_kwargs
fake_layer_add_args = quantized_model.layers[5].inbound_nodes[0].call_kwargs
input_layer_min, input_layer_max = fake_layer_input_args['min'], fake_layer_input_args['max']
add_layer_min, add_layer_max = fake_layer_add_args['min'], fake_layer_add_args['max']
self.unit_test.assertTrue(input_layer_min <= 0.0 <= input_layer_max,
msg=f"0.0 is not within the quantization range ({input_layer_min}, {input_layer_max}) "
f"for Input layer.")
self.unit_test.assertTrue(add_layer_min <= 0.0 <= add_layer_max,
msg=f"0.0 is not within the quantization range ({add_layer_min}, {add_layer_max}) "
f"for Relu layer.")
| [
"model_compression_toolkit.QuantizationConfig",
"tensorflow.add",
"numpy.random.uniform"
] | [((1406, 1512), 'model_compression_toolkit.QuantizationConfig', 'cmo.QuantizationConfig', ([], {'activation_error_method': 'self.activation_threshold_method', 'activation_n_bits': '(8)'}), '(activation_error_method=self.\n activation_threshold_method, activation_n_bits=8)\n', (1428, 1512), True, 'import model_compression_toolkit as cmo\n'), ((2592, 2605), 'tensorflow.add', 'tf.add', (['x', '(-1)'], {}), '(x, -1)\n', (2598, 2605), True, 'import tensorflow as tf\n'), ((1261, 1309), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-7)', 'high': '(7)', 'size': 'in_shape'}), '(low=-7, high=7, size=in_shape)\n', (1278, 1309), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import numpy.testing as npt
from textworld.generator import make_world, make_small_map, make_world_with
from textworld.logic import Variable, Proposition
def test_make_world_no_rng():
world = make_world(1)
assert world is not None
def test_make_small_map_too_big():
# small maps have max size
npt.assert_raises(ValueError, make_small_map, n_rooms=6)
def test_make_small_map():
world = make_small_map(n_rooms=4)
assert world is not None
def test_make_world_with():
r1 = Variable("r_0", "r")
P = Variable('P')
world = make_world_with(rooms=[r1])
assert Proposition('at', [P, r1]) in world.facts
| [
"textworld.generator.make_world",
"numpy.testing.assert_raises",
"textworld.generator.make_small_map",
"textworld.generator.make_world_with",
"textworld.logic.Variable",
"textworld.logic.Proposition"
] | [((296, 309), 'textworld.generator.make_world', 'make_world', (['(1)'], {}), '(1)\n', (306, 309), False, 'from textworld.generator import make_world, make_small_map, make_world_with\n'), ((411, 467), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError', 'make_small_map'], {'n_rooms': '(6)'}), '(ValueError, make_small_map, n_rooms=6)\n', (428, 467), True, 'import numpy.testing as npt\n'), ((509, 534), 'textworld.generator.make_small_map', 'make_small_map', ([], {'n_rooms': '(4)'}), '(n_rooms=4)\n', (523, 534), False, 'from textworld.generator import make_world, make_small_map, make_world_with\n'), ((603, 623), 'textworld.logic.Variable', 'Variable', (['"""r_0"""', '"""r"""'], {}), "('r_0', 'r')\n", (611, 623), False, 'from textworld.logic import Variable, Proposition\n'), ((632, 645), 'textworld.logic.Variable', 'Variable', (['"""P"""'], {}), "('P')\n", (640, 645), False, 'from textworld.logic import Variable, Proposition\n'), ((658, 685), 'textworld.generator.make_world_with', 'make_world_with', ([], {'rooms': '[r1]'}), '(rooms=[r1])\n', (673, 685), False, 'from textworld.generator import make_world, make_small_map, make_world_with\n'), ((697, 723), 'textworld.logic.Proposition', 'Proposition', (['"""at"""', '[P, r1]'], {}), "('at', [P, r1])\n", (708, 723), False, 'from textworld.logic import Variable, Proposition\n')] |
from __future__ import absolute_import, division, print_function
import numpy as np
__all__ = ['scale_mag_as_flux', 'flux_to_mag', 'mag_to_flux',]
def scale_mag_as_flux(mag, flux_scale=1.0):
"""
Identical to flux_to_mag(mag_to_flux(mag)*flux_scale)
"""
return mag - 2.5*np.log10(flux_scale)
def flux_to_mag(flux, zeropoint_mag=0.0, from_unit=None, to_unit=None):
if from_unit=='nMgy':
zeropoint_mag=22.5
return zeropoint_mag-2.5*np.log10(flux)
def mag_to_flux(mag, zeropoint_mag=0.0, from_unit=None, to_unit=None):
if to_unit=='nMgy':
zeropoint_mag=22.5
return np.power(10.0, -0.4*(mag - zeropoint_mag)) | [
"numpy.log10",
"numpy.power"
] | [((614, 658), 'numpy.power', 'np.power', (['(10.0)', '(-0.4 * (mag - zeropoint_mag))'], {}), '(10.0, -0.4 * (mag - zeropoint_mag))\n', (622, 658), True, 'import numpy as np\n'), ((289, 309), 'numpy.log10', 'np.log10', (['flux_scale'], {}), '(flux_scale)\n', (297, 309), True, 'import numpy as np\n'), ((465, 479), 'numpy.log10', 'np.log10', (['flux'], {}), '(flux)\n', (473, 479), True, 'import numpy as np\n')] |
import os
import pickle
import numpy as np
import pandas as pd
from utils import data
def preprocess(df: pd.DataFrame, shuffle: bool = True, scale_label: bool = True):
"""Apply preprocessing steps on the pandas dataframe.
Arguments:
df {pd.DataFrame} -- Catalog dataframe to preprocess
Returns:
pd.DataFrame -- Preprocessed dataframe
"""
df = df.replace('nan',np.NaN)
if scale_label:
df = normalize_ghi(df)
if shuffle:
df = shuffle_df(df)
return df
def normalize_ghi(df: pd.DataFrame):
"""Standardize the GHI values using the mean and standard deviation
of the observed GHI values.
Arguments:
df {pd.DataFrame} -- Catalog dataframe to standardize.
Returns:
pd.DataFrame -- Dataframe with standardized GHI values
"""
df_ghi = df.filter(regex=("_GHI")) # Select all GHI columns
normalized_df=(df_ghi-data.GHI_MEAN)/data.GHI_STD
pd.options.mode.chained_assignment = None # Disable chained_assignment warning for the update operation
df.update(normalized_df) # Replace normalized columns in the original dataframe
pd.options.mode.chained_assignment = 'warn' # Turn warning back on
return df
def unnormalize_ghi(ghis: np.ndarray):
"""Unstandardize the GHI values using the mean and standard deviation
of the observed GHI values.
Arguments:
ghis {np.ndarray} -- Array of GHI values to unstandardize.
Returns:
np.ndarray -- Array of GHI values with unstandardized GHI values
"""
return ghis * data.GHI_STD + data.GHI_MEAN
def shuffle_df(df: pd.DataFrame):
"""Shuffle the dataframe while keeping days together
Arguments:
df {pd.DataFrame} -- Catalog dataframe to standardize.
Returns:
pd.DataFrame -- Shuffled dataframe
"""
df['just_date'] = df.index.date
groups = [df for _, df in df.groupby('just_date')]
np.random.shuffle(groups)
df = pd.concat(groups).reset_index(drop=False)
df = df.drop('just_date', axis=1).set_index('iso-datetime')
return df
| [
"pandas.concat",
"numpy.random.shuffle"
] | [((1969, 1994), 'numpy.random.shuffle', 'np.random.shuffle', (['groups'], {}), '(groups)\n', (1986, 1994), True, 'import numpy as np\n'), ((2004, 2021), 'pandas.concat', 'pd.concat', (['groups'], {}), '(groups)\n', (2013, 2021), True, 'import pandas as pd\n')] |
import torch
import numpy as np
from loss_functions.ND_Crossentropy import CrossentropyND
from loss_functions.topk_loss import TopKLoss
from torch import nn
def softmax_helper(x):
rpt = [1 for _ in range(len(x.size()))]
rpt[1] = x.size(1)
x_max = x.max(1, keepdim=True)[0].repeat(*rpt)
e_x = torch.exp(x - x_max)
return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)
def get_tp_fp_fn(net_output, gt, axes=None, mask=None):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes:
:param mask:
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)
tp = sum_tensor(tp, axes, keepdim=False)
fp = sum_tensor(fp, axes, keepdim=False)
fn = sum_tensor(fn, axes, keepdim=False)
return tp, fp, fn
def sum_tensor(inp, axes, keepdim=False):
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
def mean_tensor(inp, axes, keepdim=False):
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.mean(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.mean(int(ax))
return inp
class SoftDiceLoss(nn.Module):
def __init__(self, smooth=1., apply_nonlin=None, batch_dice=False, do_bg=True, smooth_in_nom=True, background_weight=1, rebalance_weights=None):
"""
hahaa no documentation for you today
:param smooth:
:param apply_nonlin:
:param batch_dice:
:param do_bg:
:param smooth_in_nom:
:param background_weight:
:param rebalance_weights:
"""
super(SoftDiceLoss, self).__init__()
if not do_bg:
assert background_weight == 1, "if there is no bg, then set background weight to 1 you dummy"
self.rebalance_weights = rebalance_weights
self.background_weight = background_weight
if smooth_in_nom:
self.smooth_in_nom = smooth
else:
self.smooth_in_nom = 0
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.y_onehot = None
def forward(self, x, y):
with torch.no_grad():
y = y.long()
shp_x = x.shape
shp_y = y.shape
print("shp_x :",shp_x,"shp_y:",shp_y )
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
if len(shp_x) != len(shp_y):
y = y.view((shp_y[0], 1, *shp_y[1:]))
# now x and y should have shape (B, C, X, Y(, Z))) and (B, 1, X, Y(, Z))), respectively
y_onehot = torch.zeros(shp_x)
if x.device.type == "cuda":
y_onehot = y_onehot.cuda(x.device.index)
y_onehot.scatter_(1, y, 1)
if not self.do_bg:
x = x[:, 1:]
y_onehot = y_onehot[:, 1:]
if not self.batch_dice:
if self.background_weight != 1 or (self.rebalance_weights is not None):
raise NotImplementedError("nah son")
l = soft_dice(x, y_onehot, self.smooth, self.smooth_in_nom)
else:
l = soft_dice_per_batch_2(x, y_onehot, self.smooth, self.smooth_in_nom,
background_weight=self.background_weight,
rebalance_weights=self.rebalance_weights)
return l
def soft_dice_per_batch(net_output, gt, smooth=1., smooth_in_nom=1., background_weight=1):
axes = tuple([0] + list(range(2, len(net_output.size()))))
intersect = sum_tensor(net_output * gt, axes, keepdim=False)
denom = sum_tensor(net_output + gt, axes, keepdim=False)
weights = torch.ones(intersect.shape)
weights[0] = background_weight
if net_output.device.type == "cuda":
weights = weights.cuda(net_output.device.index)
result = (- ((2 * intersect + smooth_in_nom) / (denom + smooth)) * weights).mean()
return result
def soft_dice_per_batch_2(net_output, gt, smooth=1., smooth_in_nom=1., background_weight=1, rebalance_weights=None):
if rebalance_weights is not None and len(rebalance_weights) != gt.shape[1]:
rebalance_weights = rebalance_weights[1:] # this is the case when use_bg=False
axes = tuple([0] + list(range(2, len(net_output.size()))))
tp = sum_tensor(net_output * gt, axes, keepdim=False)
fn = sum_tensor((1 - net_output) * gt, axes, keepdim=False)
fp = sum_tensor(net_output * (1 - gt), axes, keepdim=False)
weights = torch.ones(tp.shape)
weights[0] = background_weight
if net_output.device.type == "cuda":
weights = weights.cuda(net_output.device.index)
if rebalance_weights is not None:
rebalance_weights = torch.from_numpy(rebalance_weights).float()
if net_output.device.type == "cuda":
rebalance_weights = rebalance_weights.cuda(net_output.device.index)
tp = tp * rebalance_weights
fn = fn * rebalance_weights
result = (- ((2 * tp + smooth_in_nom) / (2 * tp + fp + fn + smooth)) * weights).mean()
return result
def soft_dice_per_batch_3(net_output, gt, smooth=1., smooth_in_nom=1., background_weight=1, rebalance_weights=None):
if rebalance_weights is not None and len(rebalance_weights) != gt.shape[1]:
rebalance_weights = rebalance_weights[1:] # this is the case when use_bg=False
axes = tuple([0] + list(range(2, len(net_output.size()))))
tp = sum_tensor(net_output * gt, axes, keepdim=False)
fn = sum_tensor((1 - net_output) * gt, axes, keepdim=False)
fp = sum_tensor(net_output * (1 - gt), axes, keepdim=False)
weights = torch.ones(tp.shape)
weights[0] = background_weight
if net_output.device.type == "cuda":
weights = weights.cuda(net_output.device.index)
if rebalance_weights is not None:
rebalance_weights = torch.from_numpy(rebalance_weights).float()
if net_output.device.type == "cuda":
rebalance_weights = rebalance_weights.cuda(net_output.device.index)
tp = tp * rebalance_weights
fn = fn * rebalance_weights
result = (-((2 * tp + smooth_in_nom) / (2 * tp + fp + fn + smooth)) * weights).mean()
return result
def soft_dice(net_output, gt, smooth=1., smooth_in_nom=1.):
axes = tuple(range(2, len(net_output.size())))
intersect = sum_tensor(net_output * gt, axes, keepdim=False)
denom = sum_tensor(net_output + gt, axes, keepdim=False)
result = (- ((2 * intersect + smooth_in_nom) / (denom + smooth))).mean()
return result
class MultipleOutputLoss(nn.Module):
def __init__(self, loss, weight_factors=None):
"""
use this if you have several outputs that should predict the same y
:param loss:
:param weight_factors:
"""
super(MultipleOutputLoss, self).__init__()
self.weight_factors = weight_factors
self.loss = loss
def forward(self, x, y):
assert isinstance(x, (tuple, list)), "x must be either tuple or list"
if self.weight_factors is None:
weights = [1] * len(x)
else:
weights = self.weight_factors
l = weights[0] * self.loss(x[0], y)
for i in range(1, len(x)):
l += weights[i] * self.loss(x[i], y)
return l
class DC_and_CE_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum"):
super(DC_and_CE_loss, self).__init__()
self.aggregate = aggregate
self.ce = CrossentropyND(**ce_kwargs)
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class DC_and_topk_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum"):
super(DC_and_topk_loss, self).__init__()
self.aggregate = aggregate
self.ce = TopKLoss(**ce_kwargs)
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later?)
return result
class CrossentropyWithLossMask(nn.CrossEntropyLoss):
def __init__(self, k=None):
"""
This implementation ignores weight, ignore_index (use loss mask!) and reduction!
:param k:
"""
super(CrossentropyWithLossMask, self).__init__(weight=None, ignore_index=-100, reduction='none')
self.k = k
def forward(self, inp, target, loss_mask=None):
target = target.long()
inp = inp.float()
if loss_mask is not None:
loss_mask = loss_mask.float()
num_classes = inp.size()[1]
i0 = 1
i1 = 2
while i1 < len(inp.shape): # this is ugly but torch only allows to transpose two axes at once
inp = inp.transpose(i0, i1)
i0 += 1
i1 += 1
if not inp.is_contiguous():
inp = inp.contiguous()
inp = inp.view(target.shape[0], -1, num_classes)
target = target.view(target.shape[0], -1)
if loss_mask is not None:
loss_mask = loss_mask.view(target.shape[0], -1)
if self.k is not None:
if loss_mask is not None:
num_sel = torch.stack(tuple([i.sum() / self.k for i in torch.unbind(loss_mask, 0)]), 0).long()
loss = torch.stack(tuple([
torch.topk(super(CrossentropyWithLossMask, self).forward(inp[i], target[i])[loss_mask[i].byte()],
num_sel[i], sorted=False)[0].mean()
for i in range(target.shape[0])
])
)
else:
num_sel = [np.prod(inp.shape[2:]) / self.k] * inp.shape[0]
loss = torch.stack(tuple([
torch.topk(super(CrossentropyWithLossMask, self).forward(inp[i], target[i]),
num_sel[i], sorted=False)[0].mean()
for i in range(target.shape[0])
])
)
else:
if loss_mask is not None:
loss = torch.stack(tuple([
super(CrossentropyWithLossMask, self).forward(inp[i], target[i])[loss_mask[i].byte()].mean()
for i in range(target.shape[0])
])
)
else:
loss = torch.stack(tuple([
super(CrossentropyWithLossMask, self).forward(inp[i], target[i]).mean()
for i in range(target.shape[0])
])
)
loss = loss.mean()
return loss
| [
"numpy.prod",
"numpy.unique",
"torch.exp",
"torch.from_numpy",
"torch.unbind",
"loss_functions.topk_loss.TopKLoss",
"loss_functions.ND_Crossentropy.CrossentropyND",
"torch.no_grad",
"torch.zeros",
"torch.ones"
] | [((310, 330), 'torch.exp', 'torch.exp', (['(x - x_max)'], {}), '(x - x_max)\n', (319, 330), False, 'import torch\n'), ((5061, 5088), 'torch.ones', 'torch.ones', (['intersect.shape'], {}), '(intersect.shape)\n', (5071, 5088), False, 'import torch\n'), ((5875, 5895), 'torch.ones', 'torch.ones', (['tp.shape'], {}), '(tp.shape)\n', (5885, 5895), False, 'import torch\n'), ((6992, 7012), 'torch.ones', 'torch.ones', (['tp.shape'], {}), '(tp.shape)\n', (7002, 7012), False, 'import torch\n'), ((901, 916), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (914, 916), False, 'import torch\n'), ((4015, 4033), 'torch.zeros', 'torch.zeros', (['shp_x'], {}), '(shp_x)\n', (4026, 4033), False, 'import torch\n'), ((8846, 8873), 'loss_functions.ND_Crossentropy.CrossentropyND', 'CrossentropyND', ([], {}), '(**ce_kwargs)\n', (8860, 8873), False, 'from loss_functions.ND_Crossentropy import CrossentropyND\n'), ((9494, 9515), 'loss_functions.topk_loss.TopKLoss', 'TopKLoss', ([], {}), '(**ce_kwargs)\n', (9502, 9515), False, 'from loss_functions.topk_loss import TopKLoss\n'), ((1249, 1267), 'torch.zeros', 'torch.zeros', (['shp_x'], {}), '(shp_x)\n', (1260, 1267), False, 'import torch\n'), ((2045, 2060), 'numpy.unique', 'np.unique', (['axes'], {}), '(axes)\n', (2054, 2060), True, 'import numpy as np\n'), ((2324, 2339), 'numpy.unique', 'np.unique', (['axes'], {}), '(axes)\n', (2333, 2339), True, 'import numpy as np\n'), ((3595, 3610), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3608, 3610), False, 'import torch\n'), ((6094, 6129), 'torch.from_numpy', 'torch.from_numpy', (['rebalance_weights'], {}), '(rebalance_weights)\n', (6110, 6129), False, 'import torch\n'), ((7211, 7246), 'torch.from_numpy', 'torch.from_numpy', (['rebalance_weights'], {}), '(rebalance_weights)\n', (7227, 7246), False, 'import torch\n'), ((1614, 1637), 'torch.unbind', 'torch.unbind', (['tp'], {'dim': '(1)'}), '(tp, dim=1)\n', (1626, 1637), False, 'import torch\n'), ((1706, 1729), 'torch.unbind', 'torch.unbind', (['fp'], {'dim': '(1)'}), '(fp, dim=1)\n', (1718, 1729), False, 'import torch\n'), ((1798, 1821), 'torch.unbind', 'torch.unbind', (['fn'], {'dim': '(1)'}), '(fn, dim=1)\n', (1810, 1821), False, 'import torch\n'), ((11524, 11546), 'numpy.prod', 'np.prod', (['inp.shape[2:]'], {}), '(inp.shape[2:])\n', (11531, 11546), True, 'import numpy as np\n'), ((11122, 11148), 'torch.unbind', 'torch.unbind', (['loss_mask', '(0)'], {}), '(loss_mask, 0)\n', (11134, 11148), False, 'import torch\n')] |
from typing import Deque
from random import sample
from matplotlib import pyplot as plt
import numpy as np
from torch import nn
class ReplayBuffer:
def __init__(self, capacity: int) -> None:
self.buffer = Deque([], maxlen=capacity)
def save(self, obs):
self.buffer.append(obs)
def get_batch(self, dim=256):
return sample(self.buffer, dim)
def __len__(self):
return len(self.buffer)
class DQN(nn.Module):
def __init__(self):
super(DQN, self).__init__()
k = 256
self.linear_relu_stack = nn.Sequential(
nn.Linear(2, k),
nn.BatchNorm1d(k),
nn.ReLU(),
nn.Linear(k, k),
nn.BatchNorm1d(k),
nn.ReLU(),
nn.Linear(k, 2),
)
def forward(self, x):
x[:, 0] = (x[:, 0] - (7.5 / 2)) / 7.5
x[:, 1] = (x[:, 1] - (50)) / 100
logits = self.linear_relu_stack(x)
return logits
def eps(ep):
eps_start = 0.4
eps_end = 0.001
n_episodes = 250000
eps_decay = int(np.ceil(n_episodes / 3)) # /3
decay_ep = n_episodes - n_episodes / 5
no_eps = False
return eps_end + max(
(eps_start - eps_end) * (1 - np.exp((ep - decay_ep) / eps_decay)),
0,
)
data = [eps(i) for i in range(0, 250000)]
plt.title("eps")
plt.xlabel("ep")
plt.ylabel("eps")
plt.plot([i for i in range(0, len(data))], data)
plt.show()
| [
"typing.Deque",
"random.sample",
"numpy.ceil",
"torch.nn.ReLU",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.exp",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((1322, 1338), 'matplotlib.pyplot.title', 'plt.title', (['"""eps"""'], {}), "('eps')\n", (1331, 1338), True, 'from matplotlib import pyplot as plt\n'), ((1339, 1355), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ep"""'], {}), "('ep')\n", (1349, 1355), True, 'from matplotlib import pyplot as plt\n'), ((1356, 1373), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""eps"""'], {}), "('eps')\n", (1366, 1373), True, 'from matplotlib import pyplot as plt\n'), ((1423, 1433), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1431, 1433), True, 'from matplotlib import pyplot as plt\n'), ((219, 245), 'typing.Deque', 'Deque', (['[]'], {'maxlen': 'capacity'}), '([], maxlen=capacity)\n', (224, 245), False, 'from typing import Deque\n'), ((354, 378), 'random.sample', 'sample', (['self.buffer', 'dim'], {}), '(self.buffer, dim)\n', (360, 378), False, 'from random import sample\n'), ((1066, 1089), 'numpy.ceil', 'np.ceil', (['(n_episodes / 3)'], {}), '(n_episodes / 3)\n', (1073, 1089), True, 'import numpy as np\n'), ((595, 610), 'torch.nn.Linear', 'nn.Linear', (['(2)', 'k'], {}), '(2, k)\n', (604, 610), False, 'from torch import nn\n'), ((624, 641), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['k'], {}), '(k)\n', (638, 641), False, 'from torch import nn\n'), ((655, 664), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (662, 664), False, 'from torch import nn\n'), ((678, 693), 'torch.nn.Linear', 'nn.Linear', (['k', 'k'], {}), '(k, k)\n', (687, 693), False, 'from torch import nn\n'), ((707, 724), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['k'], {}), '(k)\n', (721, 724), False, 'from torch import nn\n'), ((738, 747), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (745, 747), False, 'from torch import nn\n'), ((761, 776), 'torch.nn.Linear', 'nn.Linear', (['k', '(2)'], {}), '(k, 2)\n', (770, 776), False, 'from torch import nn\n'), ((1222, 1257), 'numpy.exp', 'np.exp', (['((ep - decay_ep) / eps_decay)'], {}), '((ep - decay_ep) / eps_decay)\n', (1228, 1257), True, 'import numpy as np\n')] |
import suspect
import numpy
def test_null_transform():
fid = numpy.ones(128, 'complex')
data = suspect.MRSData(fid, 1.0 / 128, 123)
transformed_data = suspect.processing.frequency_correction.transform_fid(data, 0, 0)
assert type(transformed_data) == suspect.MRSData
def test_water_peak_alignment_misshape():
spectrum = numpy.zeros(128, 'complex')
spectrum[0] = 1
fids = suspect.MRSData(numpy.zeros((16, 128), 'complex'), 1.0 / 128, 123)
for i in range(fids.shape[0]):
rolled_spectrum = numpy.roll(spectrum, i)
fids[i] = numpy.fft.ifft(rolled_spectrum)
current_fid = numpy.reshape(fids[i], (1, 128))
frequency_shift = suspect.processing.frequency_correction.residual_water_alignment(current_fid)
numpy.testing.assert_almost_equal(frequency_shift, i)
def test_water_peak_alignment():
spectrum = numpy.zeros(128, 'complex')
spectrum[0] = 1
fids = suspect.MRSData(numpy.zeros((16, 128), 'complex'), 1.0 / 128, 123)
for i in range(fids.shape[0]):
rolled_spectrum = numpy.roll(spectrum, i)
fids[i] = numpy.fft.ifft(rolled_spectrum)
frequency_shift = suspect.processing.frequency_correction.residual_water_alignment(fids[i])
numpy.testing.assert_almost_equal(frequency_shift, i)
def test_spectral_registration():
time_axis = numpy.arange(0, 0.512, 5e-4)
target_fid = suspect.MRSData(suspect.basis.gaussian(time_axis, 0, 0, 50.0), 5e-4, 123)
for i in range(1, 15):
input_fid = suspect.MRSData(suspect.basis.gaussian(time_axis, i, 0, 50.0), 5e-4, 123)
frequency_shift, phase_shift = suspect.processing.frequency_correction.spectral_registration(input_fid, target_fid)
numpy.testing.assert_allclose(frequency_shift, i)
def test_compare_frequency_correction():
test_data = suspect.io.load_twix("tests/test_data/siemens/twix_vb.dat")
test_data = test_data.inherit(numpy.average(test_data, axis=1, weights=suspect.processing.channel_combination.svd_weighting(numpy.average(test_data, axis=0))))
sr_target = test_data[0]
for i in range(test_data.shape[0]):
current_fid = test_data[i]
wpa_fs = suspect.processing.frequency_correction.residual_water_alignment(current_fid)
sr_fs = suspect.processing.frequency_correction.spectral_registration(current_fid, sr_target)[0]
numpy.testing.assert_allclose(wpa_fs, sr_fs, atol=current_fid.df)
def test_frequency_transform():
spectrum = numpy.zeros(128, 'complex')
spectrum[0] = 1
for i in range(16):
rolled_spectrum = numpy.roll(spectrum, i)
fid = suspect.MRSData(numpy.fft.ifft(rolled_spectrum), 1.0 / 128, 123)
transformed_fid = suspect.processing.frequency_correction.transform_fid(fid, -i, 0)
transformed_spectrum = numpy.fft.fft(transformed_fid)
numpy.testing.assert_almost_equal(transformed_spectrum, spectrum)
def test_apodize():
data = suspect.MRSData(numpy.ones(1024), 5e-4, 123.456)
raw_spectrum = numpy.fft.fft(data)
apodized_data = suspect.processing.apodize(data, suspect.processing.gaussian_window, {"line_broadening": data.df * 8})
spectrum = numpy.fft.fft(apodized_data)
numpy.testing.assert_allclose(spectrum[4].real, 0.5 * numpy.amax(spectrum), rtol=0.01)
numpy.testing.assert_allclose(numpy.sum(spectrum), numpy.sum(raw_spectrum))
def test_gaussian_denoising():
# constant signal denoised should be the same as original
data = numpy.ones(128)
denoised_data = suspect.processing.denoising.sliding_gaussian(data, 11)
numpy.testing.assert_almost_equal(data, denoised_data)
def test_water_suppression():
data = suspect.io.load_twix("tests/test_data/siemens/twix_vb.dat")
channel_combined_data = data.inherit(numpy.average(data, axis=1))
components = suspect.processing.water_suppression.hsvd(channel_combined_data[10], 4, int(data.np / 2))
fid = suspect.processing.water_suppression.construct_fid(components, data.time_axis())
assert len(components) == 4
| [
"suspect.processing.frequency_correction.transform_fid",
"numpy.arange",
"numpy.reshape",
"suspect.basis.gaussian",
"numpy.testing.assert_allclose",
"numpy.fft.fft",
"suspect.MRSData",
"numpy.testing.assert_almost_equal",
"suspect.processing.apodize",
"suspect.processing.frequency_correction.spect... | [((68, 94), 'numpy.ones', 'numpy.ones', (['(128)', '"""complex"""'], {}), "(128, 'complex')\n", (78, 94), False, 'import numpy\n'), ((106, 142), 'suspect.MRSData', 'suspect.MRSData', (['fid', '(1.0 / 128)', '(123)'], {}), '(fid, 1.0 / 128, 123)\n', (121, 142), False, 'import suspect\n'), ((166, 231), 'suspect.processing.frequency_correction.transform_fid', 'suspect.processing.frequency_correction.transform_fid', (['data', '(0)', '(0)'], {}), '(data, 0, 0)\n', (219, 231), False, 'import suspect\n'), ((344, 371), 'numpy.zeros', 'numpy.zeros', (['(128)', '"""complex"""'], {}), "(128, 'complex')\n", (355, 371), False, 'import numpy\n'), ((876, 903), 'numpy.zeros', 'numpy.zeros', (['(128)', '"""complex"""'], {}), "(128, 'complex')\n", (887, 903), False, 'import numpy\n'), ((1351, 1381), 'numpy.arange', 'numpy.arange', (['(0)', '(0.512)', '(0.0005)'], {}), '(0, 0.512, 0.0005)\n', (1363, 1381), False, 'import numpy\n'), ((1833, 1892), 'suspect.io.load_twix', 'suspect.io.load_twix', (['"""tests/test_data/siemens/twix_vb.dat"""'], {}), "('tests/test_data/siemens/twix_vb.dat')\n", (1853, 1892), False, 'import suspect\n'), ((2484, 2511), 'numpy.zeros', 'numpy.zeros', (['(128)', '"""complex"""'], {}), "(128, 'complex')\n", (2495, 2511), False, 'import numpy\n'), ((3015, 3034), 'numpy.fft.fft', 'numpy.fft.fft', (['data'], {}), '(data)\n', (3028, 3034), False, 'import numpy\n'), ((3055, 3162), 'suspect.processing.apodize', 'suspect.processing.apodize', (['data', 'suspect.processing.gaussian_window', "{'line_broadening': data.df * 8}"], {}), "(data, suspect.processing.gaussian_window, {\n 'line_broadening': data.df * 8})\n", (3081, 3162), False, 'import suspect\n'), ((3173, 3201), 'numpy.fft.fft', 'numpy.fft.fft', (['apodized_data'], {}), '(apodized_data)\n', (3186, 3201), False, 'import numpy\n'), ((3479, 3494), 'numpy.ones', 'numpy.ones', (['(128)'], {}), '(128)\n', (3489, 3494), False, 'import numpy\n'), ((3515, 3570), 'suspect.processing.denoising.sliding_gaussian', 'suspect.processing.denoising.sliding_gaussian', (['data', '(11)'], {}), '(data, 11)\n', (3560, 3570), False, 'import suspect\n'), ((3575, 3629), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['data', 'denoised_data'], {}), '(data, denoised_data)\n', (3608, 3629), False, 'import numpy\n'), ((3673, 3732), 'suspect.io.load_twix', 'suspect.io.load_twix', (['"""tests/test_data/siemens/twix_vb.dat"""'], {}), "('tests/test_data/siemens/twix_vb.dat')\n", (3693, 3732), False, 'import suspect\n'), ((419, 452), 'numpy.zeros', 'numpy.zeros', (['(16, 128)', '"""complex"""'], {}), "((16, 128), 'complex')\n", (430, 452), False, 'import numpy\n'), ((531, 554), 'numpy.roll', 'numpy.roll', (['spectrum', 'i'], {}), '(spectrum, i)\n', (541, 554), False, 'import numpy\n'), ((573, 604), 'numpy.fft.ifft', 'numpy.fft.ifft', (['rolled_spectrum'], {}), '(rolled_spectrum)\n', (587, 604), False, 'import numpy\n'), ((627, 659), 'numpy.reshape', 'numpy.reshape', (['fids[i]', '(1, 128)'], {}), '(fids[i], (1, 128))\n', (640, 659), False, 'import numpy\n'), ((686, 763), 'suspect.processing.frequency_correction.residual_water_alignment', 'suspect.processing.frequency_correction.residual_water_alignment', (['current_fid'], {}), '(current_fid)\n', (750, 763), False, 'import suspect\n'), ((772, 825), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['frequency_shift', 'i'], {}), '(frequency_shift, i)\n', (805, 825), False, 'import numpy\n'), ((951, 984), 'numpy.zeros', 'numpy.zeros', (['(16, 128)', '"""complex"""'], {}), "((16, 128), 'complex')\n", (962, 984), False, 'import numpy\n'), ((1063, 1086), 'numpy.roll', 'numpy.roll', (['spectrum', 'i'], {}), '(spectrum, i)\n', (1073, 1086), False, 'import numpy\n'), ((1105, 1136), 'numpy.fft.ifft', 'numpy.fft.ifft', (['rolled_spectrum'], {}), '(rolled_spectrum)\n', (1119, 1136), False, 'import numpy\n'), ((1163, 1236), 'suspect.processing.frequency_correction.residual_water_alignment', 'suspect.processing.frequency_correction.residual_water_alignment', (['fids[i]'], {}), '(fids[i])\n', (1227, 1236), False, 'import suspect\n'), ((1245, 1298), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['frequency_shift', 'i'], {}), '(frequency_shift, i)\n', (1278, 1298), False, 'import numpy\n'), ((1413, 1458), 'suspect.basis.gaussian', 'suspect.basis.gaussian', (['time_axis', '(0)', '(0)', '(50.0)'], {}), '(time_axis, 0, 0, 50.0)\n', (1435, 1458), False, 'import suspect\n'), ((1631, 1719), 'suspect.processing.frequency_correction.spectral_registration', 'suspect.processing.frequency_correction.spectral_registration', (['input_fid', 'target_fid'], {}), '(input_fid,\n target_fid)\n', (1692, 1719), False, 'import suspect\n'), ((1724, 1773), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['frequency_shift', 'i'], {}), '(frequency_shift, i)\n', (1753, 1773), False, 'import numpy\n'), ((2178, 2255), 'suspect.processing.frequency_correction.residual_water_alignment', 'suspect.processing.frequency_correction.residual_water_alignment', (['current_fid'], {}), '(current_fid)\n', (2242, 2255), False, 'import suspect\n'), ((2369, 2434), 'numpy.testing.assert_allclose', 'numpy.testing.assert_allclose', (['wpa_fs', 'sr_fs'], {'atol': 'current_fid.df'}), '(wpa_fs, sr_fs, atol=current_fid.df)\n', (2398, 2434), False, 'import numpy\n'), ((2583, 2606), 'numpy.roll', 'numpy.roll', (['spectrum', 'i'], {}), '(spectrum, i)\n', (2593, 2606), False, 'import numpy\n'), ((2712, 2777), 'suspect.processing.frequency_correction.transform_fid', 'suspect.processing.frequency_correction.transform_fid', (['fid', '(-i)', '(0)'], {}), '(fid, -i, 0)\n', (2765, 2777), False, 'import suspect\n'), ((2809, 2839), 'numpy.fft.fft', 'numpy.fft.fft', (['transformed_fid'], {}), '(transformed_fid)\n', (2822, 2839), False, 'import numpy\n'), ((2848, 2913), 'numpy.testing.assert_almost_equal', 'numpy.testing.assert_almost_equal', (['transformed_spectrum', 'spectrum'], {}), '(transformed_spectrum, spectrum)\n', (2881, 2913), False, 'import numpy\n'), ((2963, 2979), 'numpy.ones', 'numpy.ones', (['(1024)'], {}), '(1024)\n', (2973, 2979), False, 'import numpy\n'), ((3327, 3346), 'numpy.sum', 'numpy.sum', (['spectrum'], {}), '(spectrum)\n', (3336, 3346), False, 'import numpy\n'), ((3348, 3371), 'numpy.sum', 'numpy.sum', (['raw_spectrum'], {}), '(raw_spectrum)\n', (3357, 3371), False, 'import numpy\n'), ((3774, 3801), 'numpy.average', 'numpy.average', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (3787, 3801), False, 'import numpy\n'), ((1534, 1579), 'suspect.basis.gaussian', 'suspect.basis.gaussian', (['time_axis', 'i', '(0)', '(50.0)'], {}), '(time_axis, i, 0, 50.0)\n', (1556, 1579), False, 'import suspect\n'), ((2272, 2361), 'suspect.processing.frequency_correction.spectral_registration', 'suspect.processing.frequency_correction.spectral_registration', (['current_fid', 'sr_target'], {}), '(current_fid,\n sr_target)\n', (2333, 2361), False, 'import suspect\n'), ((2637, 2668), 'numpy.fft.ifft', 'numpy.fft.ifft', (['rolled_spectrum'], {}), '(rolled_spectrum)\n', (2651, 2668), False, 'import numpy\n'), ((3260, 3280), 'numpy.amax', 'numpy.amax', (['spectrum'], {}), '(spectrum)\n', (3270, 3280), False, 'import numpy\n'), ((2021, 2053), 'numpy.average', 'numpy.average', (['test_data'], {'axis': '(0)'}), '(test_data, axis=0)\n', (2034, 2053), False, 'import numpy\n')] |
# import the necessary libraries
import cv2
import imutils
import numpy as np
class AspectAwarePreprocessor:
def __init__(self, width: int, height: int, inter: int = cv2.INTER_AREA):
# store the target image width, height, and interpolation
# method used when resizing
self.width = width
self.height = height
self.inter = inter
def preprocess(self, image: np.ndarray) -> np.ndarray:
# grab the dimensions of the image and then initialize
# the deltas to use when caspropping
(h, w, depth) = image.shape
dW = 0
dH = 0
# if the width is smaller than the height, then resize
# along the width (i.e., the smaller dimension) and then
# update the deltas to crop the height to the desired
# dimension
if w < h:
if depth == 3:
image = imutils.resize(image, width=self.width, inter=self.inter)
dH = int((image.shape[0] - self.height) / 2.0)
elif depth == 4:
image_rgb = imutils.resize(image[:3], width=self.width, inter=self.inter)
image_nir = imutils.resize(image[3], width=self.width, inter=self.inter)
dH = int((image_rgb.shape[0] - self.height) / 2.0)
elif depth == 5:
image_rgb = imutils.resize(image[:3], width=self.width, inter=self.inter)
image_nir = imutils.resize(image[3], width=self.width, inter=self.inter)
image_re = imutils.resize(image[4], width=self.width, inter=self.inter)
dH = int((image_rgb.shape[0] - self.height) / 2.0)
# otherwise, the height is smaller than the width so
# resize along the height and then update the deltas
# crop along the width
else:
if depth == 3:
image = imutils.resize(image, height=self.height, inter=self.inter)
dW = int((image.shape[1] - self.width) / 2.0)
elif depth == 4:
image_rgb = imutils.resize(image[:, :, :3], height=self.height, inter=self.inter)
image_nir = imutils.resize(image[:, :, 3], height=self.height, inter=self.inter)
dW = int((image_rgb.shape[1] - self.width) / 2.0)
elif depth == 5:
image_rgb = imutils.resize(image[:, :, :3], height=self.height, inter=self.inter)
image_nir = imutils.resize(image[:, :, 3], height=self.height, inter=self.inter)
image_re = imutils.resize(image[:, :, :4], height=self.height, inter=self.inter)
dW = int((image_rgb.shape[1] - self.width) / 2.0)
# now that our images have been resized, we need to
# re-grab the width and height, followed by performing
# the crop
if depth == 3:
(h, w) = image.shape[:2]
image = image[dH:h - dH, dW:w - dW]
elif depth == 4:
(h, w) = image_rgb.shape[:2]
image_rgb = image_rgb[dH:h - dH, dW:w - dW]
image_nir = image_nir[dH:h - dH, dW:w - dW]
elif depth == 5:
(h, w) = image_rgb.shape[:2]
image_rgb = image_rgb[dH:h - dH, dW:w - dW]
image_nir = image_nir[dH:h - dH, dW:w - dW]
image_re = image_re[dH:h - dH, dW:w - dW]
# finally, resize the image to the provided spatial
# dimensions to ensure our output image is always a fixed
# size
if depth == 3:
image_final = cv2.resize(image, (self.width, self.height), interpolation=self.inter)
elif depth == 4:
image_rgb = cv2.resize(image_rgb, (self.width, self.height), interpolation=self.inter)
image_nir = np.expand_dims(
cv2.resize(image_nir, (self.width, self.height), interpolation=self.inter),
axis=-1,
)
image_final = np.concatenate([image_rgb, image_nir], axis=-1)
elif depth == 5:
image_rgb = cv2.resize(image_rgb, (self.width, self.height), interpolation=self.inter)
image_nir = np.expand_dims(
cv2.resize(image_nir, (self.width, self.height), interpolation=self.inter),
axis=-1,
)
image_re = np.expand_dims(
cv2.resize(image_re, (self.width, self.height), interpolation=self.inter),
axis=-1,
)
image_final = np.concatenate([image_rgb, image_nir, image_re], axis=-1)
return image_final
| [
"imutils.resize",
"cv2.resize",
"numpy.concatenate"
] | [((3515, 3585), 'cv2.resize', 'cv2.resize', (['image', '(self.width, self.height)'], {'interpolation': 'self.inter'}), '(image, (self.width, self.height), interpolation=self.inter)\n', (3525, 3585), False, 'import cv2\n'), ((888, 945), 'imutils.resize', 'imutils.resize', (['image'], {'width': 'self.width', 'inter': 'self.inter'}), '(image, width=self.width, inter=self.inter)\n', (902, 945), False, 'import imutils\n'), ((1865, 1924), 'imutils.resize', 'imutils.resize', (['image'], {'height': 'self.height', 'inter': 'self.inter'}), '(image, height=self.height, inter=self.inter)\n', (1879, 1924), False, 'import imutils\n'), ((3635, 3709), 'cv2.resize', 'cv2.resize', (['image_rgb', '(self.width, self.height)'], {'interpolation': 'self.inter'}), '(image_rgb, (self.width, self.height), interpolation=self.inter)\n', (3645, 3709), False, 'import cv2\n'), ((3907, 3954), 'numpy.concatenate', 'np.concatenate', (['[image_rgb, image_nir]'], {'axis': '(-1)'}), '([image_rgb, image_nir], axis=-1)\n', (3921, 3954), True, 'import numpy as np\n'), ((1066, 1127), 'imutils.resize', 'imutils.resize', (['image[:3]'], {'width': 'self.width', 'inter': 'self.inter'}), '(image[:3], width=self.width, inter=self.inter)\n', (1080, 1127), False, 'import imutils\n'), ((1156, 1216), 'imutils.resize', 'imutils.resize', (['image[3]'], {'width': 'self.width', 'inter': 'self.inter'}), '(image[3], width=self.width, inter=self.inter)\n', (1170, 1216), False, 'import imutils\n'), ((2044, 2113), 'imutils.resize', 'imutils.resize', (['image[:, :, :3]'], {'height': 'self.height', 'inter': 'self.inter'}), '(image[:, :, :3], height=self.height, inter=self.inter)\n', (2058, 2113), False, 'import imutils\n'), ((2142, 2210), 'imutils.resize', 'imutils.resize', (['image[:, :, 3]'], {'height': 'self.height', 'inter': 'self.inter'}), '(image[:, :, 3], height=self.height, inter=self.inter)\n', (2156, 2210), False, 'import imutils\n'), ((3766, 3840), 'cv2.resize', 'cv2.resize', (['image_nir', '(self.width, self.height)'], {'interpolation': 'self.inter'}), '(image_nir, (self.width, self.height), interpolation=self.inter)\n', (3776, 3840), False, 'import cv2\n'), ((4004, 4078), 'cv2.resize', 'cv2.resize', (['image_rgb', '(self.width, self.height)'], {'interpolation': 'self.inter'}), '(image_rgb, (self.width, self.height), interpolation=self.inter)\n', (4014, 4078), False, 'import cv2\n'), ((4445, 4502), 'numpy.concatenate', 'np.concatenate', (['[image_rgb, image_nir, image_re]'], {'axis': '(-1)'}), '([image_rgb, image_nir, image_re], axis=-1)\n', (4459, 4502), True, 'import numpy as np\n'), ((1341, 1402), 'imutils.resize', 'imutils.resize', (['image[:3]'], {'width': 'self.width', 'inter': 'self.inter'}), '(image[:3], width=self.width, inter=self.inter)\n', (1355, 1402), False, 'import imutils\n'), ((1431, 1491), 'imutils.resize', 'imutils.resize', (['image[3]'], {'width': 'self.width', 'inter': 'self.inter'}), '(image[3], width=self.width, inter=self.inter)\n', (1445, 1491), False, 'import imutils\n'), ((1519, 1579), 'imutils.resize', 'imutils.resize', (['image[4]'], {'width': 'self.width', 'inter': 'self.inter'}), '(image[4], width=self.width, inter=self.inter)\n', (1533, 1579), False, 'import imutils\n'), ((2334, 2403), 'imutils.resize', 'imutils.resize', (['image[:, :, :3]'], {'height': 'self.height', 'inter': 'self.inter'}), '(image[:, :, :3], height=self.height, inter=self.inter)\n', (2348, 2403), False, 'import imutils\n'), ((2432, 2500), 'imutils.resize', 'imutils.resize', (['image[:, :, 3]'], {'height': 'self.height', 'inter': 'self.inter'}), '(image[:, :, 3], height=self.height, inter=self.inter)\n', (2446, 2500), False, 'import imutils\n'), ((2528, 2597), 'imutils.resize', 'imutils.resize', (['image[:, :, :4]'], {'height': 'self.height', 'inter': 'self.inter'}), '(image[:, :, :4], height=self.height, inter=self.inter)\n', (2542, 2597), False, 'import imutils\n'), ((4135, 4209), 'cv2.resize', 'cv2.resize', (['image_nir', '(self.width, self.height)'], {'interpolation': 'self.inter'}), '(image_nir, (self.width, self.height), interpolation=self.inter)\n', (4145, 4209), False, 'import cv2\n'), ((4305, 4378), 'cv2.resize', 'cv2.resize', (['image_re', '(self.width, self.height)'], {'interpolation': 'self.inter'}), '(image_re, (self.width, self.height), interpolation=self.inter)\n', (4315, 4378), False, 'import cv2\n')] |
import pytest
from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer
from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer
from lasagne.layers import helper
import theano
import theano.tensor as T
import numpy as np
import lasagne
from mock import Mock
def test_recurrent_return_shape():
num_batch, seq_len, n_features1, n_features2 = 5, 3, 10, 11
num_units = 6
x = T.tensor4()
in_shp = (num_batch, seq_len, n_features1, n_features2)
l_inp = InputLayer(in_shp)
l_rec = RecurrentLayer(l_inp, num_units=num_units)
x_in = np.random.random(in_shp).astype('float32')
output = helper.get_output(l_rec, x)
output_val = output.eval({x: x_in})
assert helper.get_output_shape(l_rec, x_in.shape) == output_val.shape
assert output_val.shape == (num_batch, seq_len, num_units)
def test_recurrent_grad():
num_batch, seq_len, n_features = 5, 3, 10
num_units = 6
l_inp = InputLayer((num_batch, seq_len, n_features))
l_rec = RecurrentLayer(l_inp,
num_units=num_units)
output = helper.get_output(l_rec)
g = T.grad(T.mean(output), lasagne.layers.get_all_params(l_rec))
assert isinstance(g, (list, tuple))
def test_recurrent_nparams():
l_inp = InputLayer((2, 2, 3))
l_rec = RecurrentLayer(l_inp, 5, learn_init=False, nonlinearity=None)
# b, W_hid_to_hid and W_in_to_hid
assert len(lasagne.layers.get_all_params(l_rec, trainable=True)) == 3
# b + hid_init
assert len(lasagne.layers.get_all_params(l_rec, regularizable=False)) == 2
def test_recurrent_nparams_learn_init():
l_inp = InputLayer((2, 2, 3))
l_rec = RecurrentLayer(l_inp, 5, learn_init=True)
# b, W_hid_to_hid and W_in_to_hid + hid_init
assert len(lasagne.layers.get_all_params(l_rec, trainable=True)) == 4
# b + hid_init
assert len(lasagne.layers.get_all_params(l_rec, regularizable=False)) == 2
def test_recurrent_hid_init_layer():
# test that you can set hid_init to be a layer
l_inp = InputLayer((2, 2, 3))
l_inp_h = InputLayer((2, 5))
l_rec = RecurrentLayer(l_inp, 5, hid_init=l_inp_h)
x = T.tensor3()
h = T.matrix()
output = lasagne.layers.get_output(l_rec, {l_inp: x, l_inp_h: h})
def test_recurrent_nparams_hid_init_layer():
# test that you can see layers through hid_init
l_inp = InputLayer((2, 2, 3))
l_inp_h = InputLayer((2, 5))
l_inp_h_de = DenseLayer(l_inp_h, 7)
l_rec = RecurrentLayer(l_inp, 7, hid_init=l_inp_h_de)
# directly check the layers can be seen through hid_init
assert lasagne.layers.get_all_layers(l_rec) == [l_inp, l_inp_h, l_inp_h_de,
l_rec]
# b, W_hid_to_hid and W_in_to_hid + W + b
assert len(lasagne.layers.get_all_params(l_rec, trainable=True)) == 5
# b (recurrent) + b (dense)
assert len(lasagne.layers.get_all_params(l_rec, regularizable=False)) == 2
def test_recurrent_hid_init_mask():
# test that you can set hid_init to be a layer when a mask is provided
l_inp = InputLayer((2, 2, 3))
l_inp_h = InputLayer((2, 5))
l_inp_msk = InputLayer((2, 2))
l_rec = RecurrentLayer(l_inp, 5, hid_init=l_inp_h, mask_input=l_inp_msk)
x = T.tensor3()
h = T.matrix()
msk = T.matrix()
inputs = {l_inp: x, l_inp_h: h, l_inp_msk: msk}
output = lasagne.layers.get_output(l_rec, inputs)
def test_recurrent_hid_init_layer_eval():
# Test `hid_init` as a `Layer` with some dummy input. Compare the output of
# a network with a `Layer` as input to `hid_init` to a network with a
# `np.array` as input to `hid_init`
n_units = 7
n_test_cases = 2
in_shp = (n_test_cases, 2, 3)
in_h_shp = (1, n_units)
# dummy inputs
X_test = np.ones(in_shp, dtype=theano.config.floatX)
Xh_test = np.ones(in_h_shp, dtype=theano.config.floatX)
Xh_test_batch = np.tile(Xh_test, (n_test_cases, 1))
# network with `Layer` initializer for hid_init
l_inp = InputLayer(in_shp)
l_inp_h = InputLayer(in_h_shp)
l_rec_inp_layer = RecurrentLayer(l_inp, n_units, hid_init=l_inp_h,
nonlinearity=None)
# network with `np.array` initializer for hid_init
l_rec_nparray = RecurrentLayer(l_inp, n_units, hid_init=Xh_test,
nonlinearity=None)
# copy network parameters from l_rec_inp_layer to l_rec_nparray
l_il_param = dict([(p.name, p) for p in l_rec_inp_layer.get_params()])
l_rn_param = dict([(p.name, p) for p in l_rec_nparray.get_params()])
for k, v in l_rn_param.items():
if k in l_il_param:
v.set_value(l_il_param[k].get_value())
# build the theano functions
X = T.tensor3()
Xh = T.matrix()
output_inp_layer = lasagne.layers.get_output(l_rec_inp_layer,
{l_inp: X, l_inp_h: Xh})
output_nparray = lasagne.layers.get_output(l_rec_nparray, {l_inp: X})
# test both nets with dummy input
output_val_inp_layer = output_inp_layer.eval({X: X_test,
Xh: Xh_test_batch})
output_val_nparray = output_nparray.eval({X: X_test})
# check output given `Layer` is the same as with `np.array`
assert np.allclose(output_val_inp_layer, output_val_nparray)
def test_recurrent_incoming_tuple():
input_shape = (2, 3, 4)
l_rec = lasagne.layers.RecurrentLayer(input_shape, 5)
assert l_rec.input_shapes[0] == input_shape
def test_recurrent_name():
l_in = lasagne.layers.InputLayer((2, 3, 4))
layer_name = 'l_rec'
l_rec = lasagne.layers.RecurrentLayer(l_in, 4, name=layer_name)
assert l_rec.b.name == layer_name + '.input_to_hidden.b'
assert l_rec.W_in_to_hid.name == layer_name + '.input_to_hidden.W'
assert l_rec.W_hid_to_hid.name == layer_name + '.hidden_to_hidden.W'
def test_custom_recurrent_arbitrary_shape():
# Check that the custom recurrent layer can handle more than 1 feature dim
n_batch, n_steps, n_channels, width, height = (2, 3, 4, 5, 6)
n_out_filters = 7
filter_shape = (3, 3)
l_in = lasagne.layers.InputLayer(
(n_batch, n_steps, n_channels, width, height))
l_in_to_hid = lasagne.layers.Conv2DLayer(
lasagne.layers.InputLayer((None, n_channels, width, height)),
n_out_filters, filter_shape, pad='same')
l_hid_to_hid = lasagne.layers.Conv2DLayer(
lasagne.layers.InputLayer((None, n_out_filters, width, height)),
n_out_filters, filter_shape, pad='same')
l_rec = lasagne.layers.CustomRecurrentLayer(
l_in, l_in_to_hid, l_hid_to_hid)
assert l_rec.output_shape == (n_batch, n_steps, n_out_filters, width,
height)
out = theano.function([l_in.input_var], lasagne.layers.get_output(l_rec))
out_shape = out(np.zeros((n_batch, n_steps, n_channels, width, height),
dtype=theano.config.floatX)).shape
assert out_shape == (n_batch, n_steps, n_out_filters, width, height)
def test_custom_recurrent_arbitrary_depth():
# Check that the custom recurrent layer can handle a hidden-to-hidden
# network with an arbitrary depth
n_batch, n_steps, n_channels, width, height = (2, 3, 4, 5, 6)
n_out_filters = 7
n_in_hid_filters_0 = 11
n_hid_hid_filters_0 = 13
filter_shape = (3, 3)
l_in = lasagne.layers.InputLayer(
(n_batch, n_steps, n_channels, width, height))
# Expect the output shape of `l_in` as input shape for input-to-hidden
l_in_to_hid = lasagne.layers.InputLayer((None, n_channels, width, height))
# Two conv layers; first to `n_hid_filters_0` channels
l_in_to_hid = lasagne.layers.Conv2DLayer(
l_in_to_hid, n_in_hid_filters_0, filter_shape, pad='same')
# then to `n_out_filters` channels
l_in_to_hid = lasagne.layers.Conv2DLayer(
l_in_to_hid, n_out_filters, filter_shape, pad='same')
# Expect the output shape of `l_in_to_hid` as input shape for
# hidden-to-hidden
l_hid_to_hid = lasagne.layers.InputLayer((None, n_out_filters,
width, height))
# Two conv layers; first to `n_hid_hid_filters_0` channels
l_hid_to_hid = lasagne.layers.Conv2DLayer(
l_hid_to_hid, n_hid_hid_filters_0, filter_shape, pad='same')
# then to `n_out_filters` channels
l_hid_to_hid = lasagne.layers.Conv2DLayer(
l_hid_to_hid, n_out_filters, filter_shape, pad='same')
l_rec = lasagne.layers.CustomRecurrentLayer(
l_in, l_in_to_hid, l_hid_to_hid)
assert l_rec.output_shape == (n_batch, n_steps, n_out_filters, width,
height)
out = theano.function([l_in.input_var], lasagne.layers.get_output(l_rec))
out_shape = out(np.zeros((n_batch, n_steps, n_channels, width, height),
dtype=theano.config.floatX)).shape
assert out_shape == (n_batch, n_steps, n_out_filters, width, height)
def test_custom_recurrent_non_unique_inputs():
# Check that the custom recurrent layer constructor detects non-unique
# input layers within the input-to-hidden and hidden-to-hidden graphs
# and raises ValueError
n_batch, n_steps, n_channels, width, height = (2, 3, 4, 5, 6)
n_out_filters = 7
n_in_hid_filters_0 = 11
n_hid_hid_filters_0 = 13
filter_shape = (3, 3)
l_in = lasagne.layers.InputLayer(
(n_batch, n_steps, n_channels, width, height))
# Bad input-to-hidden graph with multiple input layers
# Expect the output shape of `l_in` as input shape for input-to-hidden
l_in_to_hid_bad_0 = lasagne.layers.InputLayer(
(None, n_channels, width, height))
l_in_to_hid_bad_1 = lasagne.layers.InputLayer(
(None, n_channels, width, height))
l_in_to_hid_bad = lasagne.layers.ConcatLayer(
[l_in_to_hid_bad_0, l_in_to_hid_bad_1], axis=1)
# Two conv layers; first to `n_hid_filters_0` channels
l_in_to_hid_bad = lasagne.layers.Conv2DLayer(
l_in_to_hid_bad, n_in_hid_filters_0, filter_shape, pad='same')
# then to `n_out_filters` channels
l_in_to_hid_bad = lasagne.layers.Conv2DLayer(
l_in_to_hid_bad, n_out_filters, filter_shape, pad='same')
# Expect the output shape of `l_in` as input shape for input-to-hidden
l_in_to_hid = lasagne.layers.InputLayer((None, n_channels, width, height))
# Two conv layers; first to `n_hid_filters_0` channels
l_in_to_hid = lasagne.layers.Conv2DLayer(
l_in_to_hid, n_in_hid_filters_0, filter_shape, pad='same')
# then to `n_out_filters` channels
l_in_to_hid = lasagne.layers.Conv2DLayer(
l_in_to_hid, n_out_filters, filter_shape, pad='same')
# Bad hidden-to-hidden graph with multiple input layers
# Expect the output shape of `l_in_to_hid` as input shape for
# hidden-to-hidden
l_hid_to_hid_bad_0 = lasagne.layers.InputLayer(
(None, n_out_filters, width, height))
l_hid_to_hid_bad_1 = lasagne.layers.InputLayer(
(None, n_out_filters, width, height))
l_hid_to_hid_bad = lasagne.layers.ConcatLayer(
[l_hid_to_hid_bad_0, l_hid_to_hid_bad_1], axis=1)
# Two conv layers; first to `n_hid_hid_filters_0` channels
l_hid_to_hid_bad = lasagne.layers.Conv2DLayer(
l_hid_to_hid_bad, n_hid_hid_filters_0, filter_shape, pad='same')
# then to `n_out_filters` channels
l_hid_to_hid_bad = lasagne.layers.Conv2DLayer(
l_hid_to_hid_bad, n_out_filters, filter_shape, pad='same')
# Expect the output shape of `l_in_to_hid` as input shape for
# hidden-to-hidden
l_hid_to_hid = lasagne.layers.InputLayer((None, n_out_filters,
width, height))
# Two conv layers; first to `n_hid_hid_filters_0` channels
l_hid_to_hid = lasagne.layers.Conv2DLayer(
l_hid_to_hid, n_hid_hid_filters_0, filter_shape, pad='same')
# then to `n_out_filters` channels
l_hid_to_hid = lasagne.layers.Conv2DLayer(
l_hid_to_hid, n_out_filters, filter_shape, pad='same')
# Ensure that trying to use either 'bad' graph raises ValueError
with pytest.raises(ValueError):
l_rec = lasagne.layers.CustomRecurrentLayer(
l_in, l_in_to_hid_bad, l_hid_to_hid)
with pytest.raises(ValueError):
l_rec = lasagne.layers.CustomRecurrentLayer(
l_in, l_in_to_hid, l_hid_to_hid_bad)
with pytest.raises(ValueError):
l_rec = lasagne.layers.CustomRecurrentLayer(
l_in, l_in_to_hid_bad, l_hid_to_hid_bad)
l_rec = lasagne.layers.CustomRecurrentLayer(
l_in, l_in_to_hid, l_hid_to_hid)
def test_custom_recurrent_init_shape_error():
# Check that the custom recurrent layer throws errors for invalid shapes
n_batch, n_steps, n_channels, width, height = (2, 3, 4, 5, 6)
n_out_filters = 7
filter_shape = (3, 3)
l_in = lasagne.layers.InputLayer(
(n_batch, n_steps, n_channels, width, height))
l_hid_to_hid = lasagne.layers.Conv2DLayer(
lasagne.layers.InputLayer((n_batch, n_out_filters, width, height)),
n_out_filters, filter_shape, pad='same')
# When precompute_input == True, input_to_hidden.shape[0] must be None
# or n_batch*n_steps
l_in_to_hid = lasagne.layers.Conv2DLayer(
lasagne.layers.InputLayer((n_batch, n_channels, width, height)),
n_out_filters, filter_shape, pad='same')
with pytest.raises(ValueError):
l_rec = lasagne.layers.CustomRecurrentLayer(
l_in, l_in_to_hid, l_hid_to_hid, precompute_input=True)
# When precompute_input = False, input_to_hidden.shape[1] must be None
# or hidden_to_hidden.shape[1]
l_in_to_hid = lasagne.layers.Conv2DLayer(
lasagne.layers.InputLayer((n_batch + 1, n_channels, width, height)),
n_out_filters, filter_shape, pad='same')
with pytest.raises(ValueError):
l_rec = lasagne.layers.CustomRecurrentLayer(
l_in, l_in_to_hid, l_hid_to_hid, precompute_input=False)
# In any case, input_to_hidden and hidden_to_hidden's output shapes after
# the first dimension must match
l_in_to_hid = lasagne.layers.Conv2DLayer(
lasagne.layers.InputLayer((None, n_channels, width + 1, height)),
n_out_filters, filter_shape, pad='same')
with pytest.raises(ValueError):
l_rec = lasagne.layers.CustomRecurrentLayer(
l_in, l_in_to_hid, l_hid_to_hid)
# And, the output shape of input_to_hidden must match the input shape
# of hidden_to_hidden past the first dimension. By not using padding,
# the output of l_in_to_hid will be cropped, which will make the
# shape inappropriate.
l_in_to_hid = lasagne.layers.Conv2DLayer(
lasagne.layers.InputLayer((None, n_channels, width, height)),
n_out_filters, filter_shape)
l_hid_to_hid = lasagne.layers.Conv2DLayer(
lasagne.layers.InputLayer((n_batch, n_out_filters, width, height)),
n_out_filters, filter_shape)
with pytest.raises(ValueError):
l_rec = lasagne.layers.CustomRecurrentLayer(
l_in, l_in_to_hid, l_hid_to_hid)
def test_recurrent_grad_clipping():
num_units = 5
batch_size = 3
seq_len = 2
n_inputs = 4
in_shp = (batch_size, seq_len, n_inputs)
l_inp = InputLayer(in_shp)
x = T.tensor3()
l_rec = RecurrentLayer(l_inp, num_units, grad_clipping=1.0)
output = lasagne.layers.get_output(l_rec, x)
def test_recurrent_bck():
num_batch, seq_len, n_features1 = 2, 3, 4
num_units = 2
x = T.tensor3()
in_shp = (num_batch, seq_len, n_features1)
l_inp = InputLayer(in_shp)
x_in = np.ones(in_shp).astype('float32')
# need to set random seed.
lasagne.random.get_rng().seed(1234)
l_rec_fwd = RecurrentLayer(l_inp, num_units=num_units, backwards=False)
lasagne.random.get_rng().seed(1234)
l_rec_bck = RecurrentLayer(l_inp, num_units=num_units, backwards=True)
l_out_fwd = helper.get_output(l_rec_fwd, x)
l_out_bck = helper.get_output(l_rec_bck, x)
output_fwd = l_out_fwd.eval({l_out_fwd: x_in})
output_bck = l_out_bck.eval({l_out_bck: x_in})
# test that the backwards model reverses its final input
np.testing.assert_almost_equal(output_fwd, output_bck[:, ::-1])
def test_recurrent_variable_input_size():
# check that seqlen and batchsize None works
num_batch, n_features1 = 6, 5
num_units = 13
x = T.tensor3()
in_shp = (None, None, n_features1)
l_inp = InputLayer(in_shp)
x_in1 = np.ones((num_batch+1, 10, n_features1)).astype('float32')
x_in2 = np.ones((num_batch, 15, n_features1)).astype('float32')
l_rec = RecurrentLayer(l_inp, num_units=num_units, backwards=False)
output = helper.get_output(l_rec, x)
output_val1 = output.eval({x: x_in1})
output_val2 = output.eval({x: x_in2})
def test_recurrent_unroll_scan_fwd():
num_batch, seq_len, n_features1 = 2, 3, 4
num_units = 2
in_shp = (num_batch, seq_len, n_features1)
l_inp = InputLayer(in_shp)
l_mask_inp = InputLayer(in_shp[:2])
x_in = np.random.random(in_shp).astype('float32')
mask_in = np.ones(in_shp[:2]).astype('float32')
# need to set random seed.
lasagne.random.get_rng().seed(1234)
l_rec_scan = RecurrentLayer(l_inp, num_units=num_units, backwards=False,
unroll_scan=False, mask_input=l_mask_inp)
lasagne.random.get_rng().seed(1234)
l_rec_unroll = RecurrentLayer(l_inp, num_units=num_units, backwards=False,
unroll_scan=True, mask_input=l_mask_inp)
output_scan = helper.get_output(l_rec_scan)
output_unrolled = helper.get_output(l_rec_unroll)
output_scan_val = output_scan.eval(
{l_inp.input_var: x_in, l_mask_inp.input_var: mask_in})
output_unrolled_val = output_unrolled.eval(
{l_inp.input_var: x_in, l_mask_inp.input_var: mask_in})
np.testing.assert_almost_equal(output_scan_val, output_unrolled_val)
def test_recurrent_unroll_scan_bck():
num_batch, seq_len, n_features1 = 2, 3, 4
num_units = 2
x = T.tensor3()
in_shp = (num_batch, seq_len, n_features1)
l_inp = InputLayer(in_shp)
x_in = np.random.random(in_shp).astype('float32')
# need to set random seed.
lasagne.random.get_rng().seed(1234)
l_rec_scan = RecurrentLayer(l_inp, num_units=num_units, backwards=True,
unroll_scan=False)
lasagne.random.get_rng().seed(1234)
l_rec_unroll = RecurrentLayer(l_inp, num_units=num_units, backwards=True,
unroll_scan=True)
output_scan = helper.get_output(l_rec_scan, x)
output_unrolled = helper.get_output(l_rec_unroll, x)
output_scan_val = output_scan.eval({x: x_in})
output_unrolled_val = output_unrolled.eval({x: x_in})
np.testing.assert_almost_equal(output_scan_val, output_unrolled_val)
def test_recurrent_precompute():
num_batch, seq_len, n_features1 = 2, 3, 4
num_units = 2
in_shp = (num_batch, seq_len, n_features1)
l_inp = InputLayer(in_shp)
l_mask_inp = InputLayer(in_shp[:2])
x_in = np.random.random(in_shp).astype('float32')
mask_in = np.ones((num_batch, seq_len), dtype='float32')
# need to set random seed.
lasagne.random.get_rng().seed(1234)
l_rec_precompute = RecurrentLayer(l_inp, num_units=num_units,
precompute_input=True,
mask_input=l_mask_inp)
lasagne.random.get_rng().seed(1234)
l_rec_no_precompute = RecurrentLayer(l_inp, num_units=num_units,
precompute_input=False,
mask_input=l_mask_inp)
output_precompute = helper.get_output(
l_rec_precompute).eval({l_inp.input_var: x_in,
l_mask_inp.input_var: mask_in})
output_no_precompute = helper.get_output(
l_rec_no_precompute).eval({l_inp.input_var: x_in,
l_mask_inp.input_var: mask_in})
np.testing.assert_almost_equal(output_precompute, output_no_precompute)
def test_recurrent_return_final():
num_batch, seq_len, n_features = 2, 3, 4
num_units = 2
in_shp = (num_batch, seq_len, n_features)
x_in = np.random.random(in_shp).astype('float32')
l_inp = InputLayer(in_shp)
lasagne.random.get_rng().seed(1234)
l_rec_final = RecurrentLayer(l_inp, num_units, only_return_final=True)
lasagne.random.get_rng().seed(1234)
l_rec_all = RecurrentLayer(l_inp, num_units, only_return_final=False)
output_final = helper.get_output(l_rec_final).eval({l_inp.input_var: x_in})
output_all = helper.get_output(l_rec_all).eval({l_inp.input_var: x_in})
assert output_final.shape == (output_all.shape[0], output_all.shape[2])
assert output_final.shape == lasagne.layers.get_output_shape(l_rec_final)
assert np.allclose(output_final, output_all[:, -1])
def test_lstm_return_shape():
num_batch, seq_len, n_features1, n_features2 = 5, 3, 10, 11
num_units = 6
x = T.tensor4()
in_shp = (num_batch, seq_len, n_features1, n_features2)
l_inp = InputLayer(in_shp)
x_in = np.random.random(in_shp).astype('float32')
l_lstm = LSTMLayer(l_inp, num_units=num_units)
output = helper.get_output(l_lstm, x)
output_val = output.eval({x: x_in})
assert helper.get_output_shape(l_lstm, x_in.shape) == output_val.shape
assert output_val.shape == (num_batch, seq_len, num_units)
def test_lstm_grad():
num_batch, seq_len, n_features = 5, 3, 10
num_units = 6
l_inp = InputLayer((num_batch, seq_len, n_features))
l_lstm = LSTMLayer(l_inp, num_units=num_units)
output = helper.get_output(l_lstm)
g = T.grad(T.mean(output), lasagne.layers.get_all_params(l_lstm))
assert isinstance(g, (list, tuple))
def test_lstm_nparams_no_peepholes():
l_inp = InputLayer((2, 2, 3))
l_lstm = LSTMLayer(l_inp, 5, peepholes=False, learn_init=False)
# 3*n_gates
# the 3 is because we have hid_to_gate, in_to_gate and bias for each gate
assert len(lasagne.layers.get_all_params(l_lstm, trainable=True)) == 12
# bias params + init params
assert len(lasagne.layers.get_all_params(l_lstm, regularizable=False)) == 6
def test_lstm_nparams_peepholes():
l_inp = InputLayer((2, 2, 3))
l_lstm = LSTMLayer(l_inp, 5, peepholes=True, learn_init=False)
# 3*n_gates + peepholes(3).
# the 3 is because we have hid_to_gate, in_to_gate and bias for each gate
assert len(lasagne.layers.get_all_params(l_lstm, trainable=True)) == 15
# bias params(4) + init params(2)
assert len(lasagne.layers.get_all_params(l_lstm, regularizable=False)) == 6
def test_lstm_nparams_learn_init():
l_inp = InputLayer((2, 2, 3))
l_lstm = LSTMLayer(l_inp, 5, peepholes=False, learn_init=True)
# 3*n_gates + inits(2).
# the 3 is because we have hid_to_gate, in_to_gate and bias for each gate
assert len(lasagne.layers.get_all_params(l_lstm, trainable=True)) == 14
# bias params(4) + init params(2)
assert len(lasagne.layers.get_all_params(l_lstm, regularizable=False)) == 6
def test_lstm_hid_init_layer():
# test that you can set hid_init to be a layer
l_inp = InputLayer((2, 2, 3))
l_inp_h = InputLayer((2, 5))
l_cell_h = InputLayer((2, 5))
l_lstm = LSTMLayer(l_inp, 5, hid_init=l_inp_h, cell_init=l_cell_h)
x = T.tensor3()
h = T.matrix()
output = lasagne.layers.get_output(l_lstm, {l_inp: x, l_inp_h: h})
def test_lstm_nparams_hid_init_layer():
# test that you can see layers through hid_init
l_inp = InputLayer((2, 2, 3))
l_inp_h = InputLayer((2, 5))
l_inp_h_de = DenseLayer(l_inp_h, 7)
l_inp_cell = InputLayer((2, 5))
l_inp_cell_de = DenseLayer(l_inp_cell, 7)
l_lstm = LSTMLayer(l_inp, 7, hid_init=l_inp_h_de, cell_init=l_inp_cell_de)
# directly check the layers can be seen through hid_init
layers_to_find = [l_inp, l_inp_h, l_inp_h_de, l_inp_cell, l_inp_cell_de,
l_lstm]
assert lasagne.layers.get_all_layers(l_lstm) == layers_to_find
# 3*n_gates + 4
# the 3 is because we have hid_to_gate, in_to_gate and bias for each gate
# 4 is for the W and b parameters in the two DenseLayer layers
assert len(lasagne.layers.get_all_params(l_lstm, trainable=True)) == 19
# GRU bias params(3) + Dense bias params(1) * 2
assert len(lasagne.layers.get_all_params(l_lstm, regularizable=False)) == 6
def test_lstm_hid_init_mask():
# test that you can set hid_init to be a layer when a mask is provided
l_inp = InputLayer((2, 2, 3))
l_inp_h = InputLayer((2, 5))
l_inp_msk = InputLayer((2, 2))
l_cell_h = InputLayer((2, 5))
l_lstm = LSTMLayer(l_inp, 5, hid_init=l_inp_h, mask_input=l_inp_msk,
cell_init=l_cell_h)
x = T.tensor3()
h = T.matrix()
msk = T.matrix()
inputs = {l_inp: x, l_inp_h: h, l_inp_msk: msk}
output = lasagne.layers.get_output(l_lstm, inputs)
def test_lstm_hid_init_layer_eval():
# Test `hid_init` as a `Layer` with some dummy input. Compare the output of
# a network with a `Layer` as input to `hid_init` to a network with a
# `np.array` as input to `hid_init`
n_units = 7
n_test_cases = 2
in_shp = (n_test_cases, 2, 3)
in_h_shp = (1, n_units)
in_cell_shp = (1, n_units)
# dummy inputs
X_test = np.ones(in_shp, dtype=theano.config.floatX)
Xh_test = np.ones(in_h_shp, dtype=theano.config.floatX)
Xc_test = np.ones(in_cell_shp, dtype=theano.config.floatX)
Xh_test_batch = np.tile(Xh_test, (n_test_cases, 1))
Xc_test_batch = np.tile(Xc_test, (n_test_cases, 1))
# network with `Layer` initializer for hid_init
l_inp = InputLayer(in_shp)
l_inp_h = InputLayer(in_h_shp)
l_inp_cell = InputLayer(in_cell_shp)
l_rec_inp_layer = LSTMLayer(l_inp, n_units, hid_init=l_inp_h,
cell_init=l_inp_cell, nonlinearity=None)
# network with `np.array` initializer for hid_init
l_rec_nparray = LSTMLayer(l_inp, n_units, hid_init=Xh_test,
cell_init=Xc_test, nonlinearity=None)
# copy network parameters from l_rec_inp_layer to l_rec_nparray
l_il_param = dict([(p.name, p) for p in l_rec_inp_layer.get_params()])
l_rn_param = dict([(p.name, p) for p in l_rec_nparray.get_params()])
for k, v in l_rn_param.items():
if k in l_il_param:
v.set_value(l_il_param[k].get_value())
# build the theano functions
X = T.tensor3()
Xh = T.matrix()
Xc = T.matrix()
output_inp_layer = lasagne.layers.get_output(l_rec_inp_layer,
{l_inp: X, l_inp_h:
Xh, l_inp_cell: Xc})
output_nparray = lasagne.layers.get_output(l_rec_nparray, {l_inp: X})
# test both nets with dummy input
output_val_inp_layer = output_inp_layer.eval({X: X_test, Xh: Xh_test_batch,
Xc: Xc_test_batch})
output_val_nparray = output_nparray.eval({X: X_test})
# check output given `Layer` is the same as with `np.array`
assert np.allclose(output_val_inp_layer, output_val_nparray)
def test_lstm_grad_clipping():
# test that you can set grad_clip variable
x = T.tensor3()
l_rec = LSTMLayer(InputLayer((2, 2, 3)), 5, grad_clipping=1)
output = lasagne.layers.get_output(l_rec, x)
def test_lstm_bck():
num_batch, seq_len, n_features1 = 2, 3, 4
num_units = 2
x = T.tensor3()
in_shp = (num_batch, seq_len, n_features1)
l_inp = InputLayer(in_shp)
x_in = np.ones(in_shp).astype('float32')
# need to set random seed.
lasagne.random.get_rng().seed(1234)
l_lstm_fwd = LSTMLayer(l_inp, num_units=num_units, backwards=False)
lasagne.random.get_rng().seed(1234)
l_lstm_bck = LSTMLayer(l_inp, num_units=num_units, backwards=True)
output_fwd = helper.get_output(l_lstm_fwd, x)
output_bck = helper.get_output(l_lstm_bck, x)
output_fwd_val = output_fwd.eval({x: x_in})
output_bck_val = output_bck.eval({x: x_in})
# test that the backwards model reverses its final input
np.testing.assert_almost_equal(output_fwd_val, output_bck_val[:, ::-1])
def test_lstm_precompute():
num_batch, seq_len, n_features1 = 2, 3, 4
num_units = 2
in_shp = (num_batch, seq_len, n_features1)
l_inp = InputLayer(in_shp)
l_mask_inp = InputLayer(in_shp[:2])
x_in = np.random.random(in_shp).astype('float32')
mask_in = np.ones((num_batch, seq_len), dtype='float32')
# need to set random seed.
lasagne.random.get_rng().seed(1234)
l_lstm_precompute = LSTMLayer(
l_inp, num_units=num_units, precompute_input=True,
mask_input=l_mask_inp)
lasagne.random.get_rng().seed(1234)
l_lstm_no_precompute = LSTMLayer(
l_inp, num_units=num_units, precompute_input=False,
mask_input=l_mask_inp)
output_precompute = helper.get_output(
l_lstm_precompute).eval({l_inp.input_var: x_in,
l_mask_inp.input_var: mask_in})
output_no_precompute = helper.get_output(
l_lstm_no_precompute).eval({l_inp.input_var: x_in,
l_mask_inp.input_var: mask_in})
# test that the backwards model reverses its final input
np.testing.assert_almost_equal(output_precompute, output_no_precompute)
def test_lstm_variable_input_size():
# that seqlen and batchsize None works
num_batch, n_features1 = 6, 5
num_units = 13
x = T.tensor3()
in_shp = (None, None, n_features1)
l_inp = InputLayer(in_shp)
x_in1 = np.ones((num_batch+1, 3+1, n_features1)).astype('float32')
x_in2 = np.ones((num_batch, 3, n_features1)).astype('float32')
l_rec = LSTMLayer(l_inp, num_units=num_units, backwards=False)
output = helper.get_output(l_rec, x)
output_val1 = output.eval({x: x_in1})
output_val2 = output.eval({x: x_in2})
def test_lstm_unroll_scan_fwd():
num_batch, seq_len, n_features1 = 2, 3, 4
num_units = 2
in_shp = (num_batch, seq_len, n_features1)
l_inp = InputLayer(in_shp)
l_mask_inp = InputLayer(in_shp[:2])
x_in = np.random.random(in_shp).astype('float32')
mask_in = np.ones(in_shp[:2]).astype('float32')
# need to set random seed.
lasagne.random.get_rng().seed(1234)
l_lstm_scan = LSTMLayer(l_inp, num_units=num_units, backwards=False,
unroll_scan=False, mask_input=l_mask_inp)
lasagne.random.get_rng().seed(1234)
l_lstm_unrolled = LSTMLayer(l_inp, num_units=num_units, backwards=False,
unroll_scan=True, mask_input=l_mask_inp)
output_scan = helper.get_output(l_lstm_scan)
output_unrolled = helper.get_output(l_lstm_unrolled)
output_scan_val = output_scan.eval({l_inp.input_var: x_in,
l_mask_inp.input_var: mask_in})
output_unrolled_val = output_unrolled.eval({l_inp.input_var: x_in,
l_mask_inp.input_var: mask_in})
np.testing.assert_almost_equal(output_scan_val, output_unrolled_val)
def test_lstm_unroll_scan_bck():
num_batch, seq_len, n_features1 = 2, 3, 4
num_units = 2
x = T.tensor3()
in_shp = (num_batch, seq_len, n_features1)
l_inp = InputLayer(in_shp)
x_in = np.random.random(in_shp).astype('float32')
# need to set random seed.
lasagne.random.get_rng().seed(1234)
l_lstm_scan = LSTMLayer(l_inp, num_units=num_units, backwards=True,
unroll_scan=False)
lasagne.random.get_rng().seed(1234)
l_lstm_unrolled = LSTMLayer(l_inp, num_units=num_units, backwards=True,
unroll_scan=True)
output_scan = helper.get_output(l_lstm_scan, x)
output_scan_unrolled = helper.get_output(l_lstm_unrolled, x)
output_scan_val = output_scan.eval({x: x_in})
output_unrolled_val = output_scan_unrolled.eval({x: x_in})
np.testing.assert_almost_equal(output_scan_val, output_unrolled_val)
def test_lstm_passthrough():
# Tests that the LSTM can simply pass through its input
l_in = InputLayer((4, 5, 6))
zero = lasagne.init.Constant(0.)
one = lasagne.init.Constant(1.)
pass_gate = Gate(zero, zero, zero, one, None)
no_gate = Gate(zero, zero, zero, zero, None)
in_pass_gate = Gate(
np.eye(6).astype(theano.config.floatX), zero, zero, zero, None)
l_rec = LSTMLayer(
l_in, 6, pass_gate, no_gate, in_pass_gate, pass_gate, None)
out = lasagne.layers.get_output(l_rec)
inp = np.arange(4*5*6).reshape(4, 5, 6).astype(theano.config.floatX)
np.testing.assert_almost_equal(out.eval({l_in.input_var: inp}), inp)
def test_lstm_return_final():
num_batch, seq_len, n_features = 2, 3, 4
num_units = 2
in_shp = (num_batch, seq_len, n_features)
x_in = np.random.random(in_shp).astype('float32')
l_inp = InputLayer(in_shp)
lasagne.random.get_rng().seed(1234)
l_rec_final = LSTMLayer(l_inp, num_units, only_return_final=True)
lasagne.random.get_rng().seed(1234)
l_rec_all = LSTMLayer(l_inp, num_units, only_return_final=False)
output_final = helper.get_output(l_rec_final).eval({l_inp.input_var: x_in})
output_all = helper.get_output(l_rec_all).eval({l_inp.input_var: x_in})
assert output_final.shape == (output_all.shape[0], output_all.shape[2])
assert output_final.shape == lasagne.layers.get_output_shape(l_rec_final)
assert np.allclose(output_final, output_all[:, -1])
def test_gru_return_shape():
num_batch, seq_len, n_features1, n_features2 = 5, 3, 10, 11
num_units = 6
x = T.tensor4()
in_shp = (num_batch, seq_len, n_features1, n_features2)
l_inp = InputLayer(in_shp)
l_rec = GRULayer(l_inp, num_units=num_units)
x_in = np.random.random(in_shp).astype('float32')
output = helper.get_output(l_rec, x)
output_val = output.eval({x: x_in})
assert helper.get_output_shape(l_rec, x_in.shape) == output_val.shape
assert output_val.shape == (num_batch, seq_len, num_units)
def test_gru_grad():
num_batch, seq_len, n_features = 5, 3, 10
num_units = 6
l_inp = InputLayer((num_batch, seq_len, n_features))
l_gru = GRULayer(l_inp,
num_units=num_units)
output = helper.get_output(l_gru)
g = T.grad(T.mean(output), lasagne.layers.get_all_params(l_gru))
assert isinstance(g, (list, tuple))
def test_gru_nparams_learn_init_false():
l_inp = InputLayer((2, 2, 3))
l_gru = GRULayer(l_inp, 5, learn_init=False)
# 3*n_gates
# the 3 is because we have hid_to_gate, in_to_gate and bias for each gate
assert len(lasagne.layers.get_all_params(l_gru, trainable=True)) == 9
# bias params(3) + hid_init
assert len(lasagne.layers.get_all_params(l_gru, regularizable=False)) == 4
def test_gru_nparams_learn_init_true():
l_inp = InputLayer((2, 2, 3))
l_gru = GRULayer(l_inp, 5, learn_init=True)
# 3*n_gates + hid_init
# the 3 is because we have hid_to_gate, in_to_gate and bias for each gate
assert len(lasagne.layers.get_all_params(l_gru, trainable=True)) == 10
# bias params(3) + init params(1)
assert len(lasagne.layers.get_all_params(l_gru, regularizable=False)) == 4
def test_gru_hid_init_layer():
# test that you can set hid_init to be a layer
l_inp = InputLayer((2, 2, 3))
l_inp_h = InputLayer((2, 5))
l_gru = GRULayer(l_inp, 5, hid_init=l_inp_h)
x = T.tensor3()
h = T.matrix()
output = lasagne.layers.get_output(l_gru, {l_inp: x, l_inp_h: h})
def test_gru_nparams_hid_init_layer():
# test that you can see layers through hid_init
l_inp = InputLayer((2, 2, 3))
l_inp_h = InputLayer((2, 5))
l_inp_h_de = DenseLayer(l_inp_h, 7)
l_gru = GRULayer(l_inp, 7, hid_init=l_inp_h_de)
# directly check the layers can be seen through hid_init
assert lasagne.layers.get_all_layers(l_gru) == [l_inp, l_inp_h, l_inp_h_de,
l_gru]
# 3*n_gates + 2
# the 3 is because we have hid_to_gate, in_to_gate and bias for each gate
# 2 is for the W and b parameters in the DenseLayer
assert len(lasagne.layers.get_all_params(l_gru, trainable=True)) == 11
# GRU bias params(3) + Dense bias params(1)
assert len(lasagne.layers.get_all_params(l_gru, regularizable=False)) == 4
def test_gru_hid_init_layer_eval():
# Test `hid_init` as a `Layer` with some dummy input. Compare the output of
# a network with a `Layer` as input to `hid_init` to a network with a
# `np.array` as input to `hid_init`
n_units = 7
n_test_cases = 2
in_shp = (n_test_cases, 2, 3)
in_h_shp = (1, n_units)
# dummy inputs
X_test = np.ones(in_shp, dtype=theano.config.floatX)
Xh_test = np.ones(in_h_shp, dtype=theano.config.floatX)
Xh_test_batch = np.tile(Xh_test, (n_test_cases, 1))
# network with `Layer` initializer for hid_init
l_inp = InputLayer(in_shp)
l_inp_h = InputLayer(in_h_shp)
l_rec_inp_layer = GRULayer(l_inp, n_units, hid_init=l_inp_h)
# network with `np.array` initializer for hid_init
l_rec_nparray = GRULayer(l_inp, n_units, hid_init=Xh_test)
# copy network parameters from l_rec_inp_layer to l_rec_nparray
l_il_param = dict([(p.name, p) for p in l_rec_inp_layer.get_params()])
l_rn_param = dict([(p.name, p) for p in l_rec_nparray.get_params()])
for k, v in l_rn_param.items():
if k in l_il_param:
v.set_value(l_il_param[k].get_value())
# build the theano functions
X = T.tensor3()
Xh = T.matrix()
output_inp_layer = lasagne.layers.get_output(l_rec_inp_layer,
{l_inp: X, l_inp_h: Xh})
output_nparray = lasagne.layers.get_output(l_rec_nparray, {l_inp: X})
# test both nets with dummy input
output_val_inp_layer = output_inp_layer.eval({X: X_test,
Xh: Xh_test_batch})
output_val_nparray = output_nparray.eval({X: X_test})
# check output given `Layer` is the same as with `np.array`
assert np.allclose(output_val_inp_layer, output_val_nparray)
def test_gru_hid_init_mask():
# test that you can set hid_init to be a layer when a mask is provided
l_inp = InputLayer((2, 2, 3))
l_inp_h = InputLayer((2, 5))
l_inp_msk = InputLayer((2, 2))
l_gru = GRULayer(l_inp, 5, hid_init=l_inp_h, mask_input=l_inp_msk)
x = T.tensor3()
h = T.matrix()
msk = T.matrix()
inputs = {l_inp: x, l_inp_h: h, l_inp_msk: msk}
output = lasagne.layers.get_output(l_gru, inputs)
def test_gru_grad_clipping():
# test that you can set grad_clip variable
x = T.tensor3()
l_rec = GRULayer(InputLayer((2, 2, 3)), 5, grad_clipping=1)
output = lasagne.layers.get_output(l_rec, x)
def test_gru_bck():
num_batch, seq_len, n_features1 = 2, 3, 4
num_units = 2
x = T.tensor3()
in_shp = (num_batch, seq_len, n_features1)
l_inp = InputLayer(in_shp)
x_in = np.ones(in_shp).astype('float32')
# need to set random seed.
lasagne.random.get_rng().seed(1234)
l_gru_fwd = GRULayer(l_inp, num_units=num_units, backwards=False)
lasagne.random.get_rng().seed(1234)
l_gru_bck = GRULayer(l_inp, num_units=num_units, backwards=True)
output_fwd = helper.get_output(l_gru_fwd, x)
output_bck = helper.get_output(l_gru_bck, x)
output_fwd_val = output_fwd.eval({x: x_in})
output_bck_val = output_bck.eval({x: x_in})
# test that the backwards model reverses its final input
np.testing.assert_almost_equal(output_fwd_val, output_bck_val[:, ::-1])
def test_gru_variable_input_size():
# that seqlen and batchsize None works
num_batch, n_features1 = 6, 5
num_units = 13
x = T.tensor3()
in_shp = (None, None, n_features1)
l_inp = InputLayer(in_shp)
x_in1 = np.ones((num_batch+1, 10, n_features1)).astype('float32')
x_in2 = np.ones((num_batch, 15, n_features1)).astype('float32')
l_rec = GRULayer(l_inp, num_units=num_units, backwards=False)
output = helper.get_output(l_rec, x)
output.eval({x: x_in1})
output.eval({x: x_in2})
def test_gru_unroll_scan_fwd():
num_batch, seq_len, n_features1 = 2, 3, 4
num_units = 2
in_shp = (num_batch, seq_len, n_features1)
l_inp = InputLayer(in_shp)
l_mask_inp = InputLayer(in_shp[:2])
x_in = np.random.random(in_shp).astype('float32')
mask_in = np.ones(in_shp[:2]).astype('float32')
# need to set random seed.
lasagne.random.get_rng().seed(1234)
l_gru_scan = GRULayer(l_inp, num_units=num_units, backwards=False,
unroll_scan=False, mask_input=l_mask_inp)
lasagne.random.get_rng().seed(1234)
l_gru_unrolled = GRULayer(l_inp, num_units=num_units, backwards=False,
unroll_scan=True, mask_input=l_mask_inp)
output_scan = helper.get_output(l_gru_scan)
output_unrolled = helper.get_output(l_gru_unrolled)
output_scan_val = output_scan.eval({l_inp.input_var: x_in,
l_mask_inp.input_var: mask_in})
output_unrolled_val = output_unrolled.eval({l_inp.input_var: x_in,
l_mask_inp.input_var: mask_in})
np.testing.assert_almost_equal(output_scan_val, output_unrolled_val)
def test_gru_unroll_scan_bck():
num_batch, seq_len, n_features1 = 2, 5, 4
num_units = 2
x = T.tensor3()
in_shp = (num_batch, seq_len, n_features1)
l_inp = InputLayer(in_shp)
x_in = np.random.random(in_shp).astype('float32')
# need to set random seed.
lasagne.random.get_rng().seed(1234)
l_gru_scan = GRULayer(l_inp, num_units=num_units, backwards=True,
unroll_scan=False)
lasagne.random.get_rng().seed(1234)
l_gru_unrolled = GRULayer(l_inp, num_units=num_units, backwards=True,
unroll_scan=True)
output_scan = helper.get_output(l_gru_scan, x)
output_unrolled = helper.get_output(l_gru_unrolled, x)
output_scan_val = output_scan.eval({x: x_in})
output_unrolled_val = output_unrolled.eval({x: x_in})
np.testing.assert_almost_equal(output_scan_val, output_unrolled_val)
def test_gru_precompute():
num_batch, seq_len, n_features1 = 2, 3, 4
num_units = 2
in_shp = (num_batch, seq_len, n_features1)
l_inp = InputLayer(in_shp)
l_mask_inp = InputLayer(in_shp[:2])
x_in = np.random.random(in_shp).astype('float32')
mask_in = np.ones((num_batch, seq_len), dtype='float32')
# need to set random seed.
lasagne.random.get_rng().seed(1234)
l_gru_precompute = GRULayer(l_inp, num_units=num_units,
precompute_input=True, mask_input=l_mask_inp)
lasagne.random.get_rng().seed(1234)
l_gru_no_precompute = GRULayer(l_inp, num_units=num_units,
precompute_input=False,
mask_input=l_mask_inp)
output_precompute = helper.get_output(
l_gru_precompute).eval({l_inp.input_var: x_in,
l_mask_inp.input_var: mask_in})
output_no_precompute = helper.get_output(
l_gru_no_precompute).eval({l_inp.input_var: x_in,
l_mask_inp.input_var: mask_in})
# test that the backwards model reverses its final input
np.testing.assert_almost_equal(output_precompute, output_no_precompute)
def test_gru_passthrough():
# Tests that the LSTM can simply pass through its input
l_in = InputLayer((4, 5, 6))
zero = lasagne.init.Constant(0.)
one = lasagne.init.Constant(1.)
pass_gate = Gate(zero, zero, None, one, None)
no_gate = Gate(zero, zero, None, zero, None)
in_pass_gate = Gate(
np.eye(6).astype(theano.config.floatX), zero, None, zero, None)
l_rec = GRULayer(l_in, 6, no_gate, pass_gate, in_pass_gate)
out = lasagne.layers.get_output(l_rec)
inp = np.arange(4*5*6).reshape(4, 5, 6).astype(theano.config.floatX)
np.testing.assert_almost_equal(out.eval({l_in.input_var: inp}), inp)
def test_gru_return_final():
num_batch, seq_len, n_features = 2, 3, 4
num_units = 2
in_shp = (num_batch, seq_len, n_features)
x_in = np.random.random(in_shp).astype('float32')
l_inp = InputLayer(in_shp)
lasagne.random.get_rng().seed(1234)
l_rec_final = GRULayer(l_inp, num_units, only_return_final=True)
lasagne.random.get_rng().seed(1234)
l_rec_all = GRULayer(l_inp, num_units, only_return_final=False)
output_final = helper.get_output(l_rec_final).eval({l_inp.input_var: x_in})
output_all = helper.get_output(l_rec_all).eval({l_inp.input_var: x_in})
assert output_final.shape == (output_all.shape[0], output_all.shape[2])
assert output_final.shape == lasagne.layers.get_output_shape(l_rec_final)
assert np.allclose(output_final, output_all[:, -1])
def test_gradient_steps_error():
# Check that error is raised if gradient_steps is not -1 and scan_unroll
# is true
l_in = InputLayer((2, 2, 3))
with pytest.raises(ValueError):
RecurrentLayer(l_in, 5, gradient_steps=3, unroll_scan=True)
with pytest.raises(ValueError):
LSTMLayer(l_in, 5, gradient_steps=3, unroll_scan=True)
with pytest.raises(ValueError):
GRULayer(l_in, 5, gradient_steps=3, unroll_scan=True)
def test_unroll_none_input_error():
# Test that a ValueError is raised if unroll scan is True and the input
# sequence length is specified as None.
l_in = InputLayer((2, None, 3))
with pytest.raises(ValueError):
RecurrentLayer(l_in, 5, unroll_scan=True)
with pytest.raises(ValueError):
LSTMLayer(l_in, 5, unroll_scan=True)
with pytest.raises(ValueError):
GRULayer(l_in, 5, unroll_scan=True)
def test_CustomRecurrentLayer_child_kwargs():
in_shape = (2, 3, 4)
n_hid = 5
# Construct mock for input-to-hidden layer
in_to_hid = Mock(
Layer,
output_shape=(in_shape[0]*in_shape[1], n_hid),
input_shape=(in_shape[0]*in_shape[1], in_shape[2]),
input_layer=InputLayer((in_shape[0]*in_shape[1], in_shape[2])),
get_output_kwargs=['foo'])
# These two functions get called, need to return dummy values for them
in_to_hid.get_output_for.return_value = T.matrix()
in_to_hid.get_params.return_value = []
# As above, for hidden-to-hidden layer
hid_to_hid = Mock(
Layer,
output_shape=(in_shape[0], n_hid),
input_shape=(in_shape[0], n_hid),
input_layer=InputLayer((in_shape[0], n_hid)),
get_output_kwargs=[])
hid_to_hid.get_output_for.return_value = T.matrix()
hid_to_hid.get_params.return_value = []
# Construct a CustomRecurrentLayer using these Mocks
l_rec = lasagne.layers.CustomRecurrentLayer(
InputLayer(in_shape), in_to_hid, hid_to_hid)
# Call get_output with a kwarg, should be passd to in_to_hid and hid_to_hid
helper.get_output(l_rec, foo='bar')
# Retrieve the arguments used to call in_to_hid.get_output_for
args, kwargs = in_to_hid.get_output_for.call_args
# Should be one argument - the Theano expression
assert len(args) == 1
# One keywould argument - should be 'foo' -> 'bar'
assert kwargs == {'foo': 'bar'}
# Same as with in_to_hid
args, kwargs = hid_to_hid.get_output_for.call_args
assert len(args) == 1
assert kwargs == {'foo': 'bar'}
| [
"theano.tensor.mean",
"lasagne.layers.GRULayer",
"numpy.arange",
"lasagne.layers.get_all_params",
"lasagne.layers.RecurrentLayer",
"lasagne.layers.LSTMLayer",
"lasagne.layers.get_output_shape",
"numpy.random.random",
"numpy.testing.assert_almost_equal",
"lasagne.layers.Conv2DLayer",
"lasagne.ini... | [((420, 431), 'theano.tensor.tensor4', 'T.tensor4', ([], {}), '()\n', (429, 431), True, 'import theano.tensor as T\n'), ((504, 522), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (514, 522), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((535, 577), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp'], {'num_units': 'num_units'}), '(l_inp, num_units=num_units)\n', (549, 577), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((646, 673), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec', 'x'], {}), '(l_rec, x)\n', (663, 673), False, 'from lasagne.layers import helper\n'), ((957, 1001), 'lasagne.layers.InputLayer', 'InputLayer', (['(num_batch, seq_len, n_features)'], {}), '((num_batch, seq_len, n_features))\n', (967, 1001), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((1014, 1056), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp'], {'num_units': 'num_units'}), '(l_inp, num_units=num_units)\n', (1028, 1056), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((1097, 1121), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec'], {}), '(l_rec)\n', (1114, 1121), False, 'from lasagne.layers import helper\n'), ((1275, 1296), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (1285, 1296), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((1309, 1370), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp', '(5)'], {'learn_init': '(False)', 'nonlinearity': 'None'}), '(l_inp, 5, learn_init=False, nonlinearity=None)\n', (1323, 1370), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((1638, 1659), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (1648, 1659), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((1672, 1713), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp', '(5)'], {'learn_init': '(True)'}), '(l_inp, 5, learn_init=True)\n', (1686, 1713), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((2039, 2060), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (2049, 2060), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((2075, 2093), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 5)'], {}), '((2, 5))\n', (2085, 2093), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((2106, 2148), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp', '(5)'], {'hid_init': 'l_inp_h'}), '(l_inp, 5, hid_init=l_inp_h)\n', (2120, 2148), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((2158, 2169), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (2167, 2169), True, 'import theano.tensor as T\n'), ((2178, 2188), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (2186, 2188), True, 'import theano.tensor as T\n'), ((2203, 2259), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec', '{l_inp: x, l_inp_h: h}'], {}), '(l_rec, {l_inp: x, l_inp_h: h})\n', (2228, 2259), False, 'import lasagne\n'), ((2371, 2392), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (2381, 2392), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((2407, 2425), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 5)'], {}), '((2, 5))\n', (2417, 2425), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((2443, 2465), 'lasagne.layers.DenseLayer', 'DenseLayer', (['l_inp_h', '(7)'], {}), '(l_inp_h, 7)\n', (2453, 2465), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((2478, 2523), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp', '(7)'], {'hid_init': 'l_inp_h_de'}), '(l_inp, 7, hid_init=l_inp_h_de)\n', (2492, 2523), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((3083, 3104), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (3093, 3104), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((3119, 3137), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 5)'], {}), '((2, 5))\n', (3129, 3137), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((3154, 3172), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2)'], {}), '((2, 2))\n', (3164, 3172), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((3185, 3249), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp', '(5)'], {'hid_init': 'l_inp_h', 'mask_input': 'l_inp_msk'}), '(l_inp, 5, hid_init=l_inp_h, mask_input=l_inp_msk)\n', (3199, 3249), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((3259, 3270), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (3268, 3270), True, 'import theano.tensor as T\n'), ((3279, 3289), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (3287, 3289), True, 'import theano.tensor as T\n'), ((3300, 3310), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (3308, 3310), True, 'import theano.tensor as T\n'), ((3377, 3417), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec', 'inputs'], {}), '(l_rec, inputs)\n', (3402, 3417), False, 'import lasagne\n'), ((3788, 3831), 'numpy.ones', 'np.ones', (['in_shp'], {'dtype': 'theano.config.floatX'}), '(in_shp, dtype=theano.config.floatX)\n', (3795, 3831), True, 'import numpy as np\n'), ((3846, 3891), 'numpy.ones', 'np.ones', (['in_h_shp'], {'dtype': 'theano.config.floatX'}), '(in_h_shp, dtype=theano.config.floatX)\n', (3853, 3891), True, 'import numpy as np\n'), ((3912, 3947), 'numpy.tile', 'np.tile', (['Xh_test', '(n_test_cases, 1)'], {}), '(Xh_test, (n_test_cases, 1))\n', (3919, 3947), True, 'import numpy as np\n'), ((4013, 4031), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (4023, 4031), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((4046, 4066), 'lasagne.layers.InputLayer', 'InputLayer', (['in_h_shp'], {}), '(in_h_shp)\n', (4056, 4066), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((4089, 4156), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp', 'n_units'], {'hid_init': 'l_inp_h', 'nonlinearity': 'None'}), '(l_inp, n_units, hid_init=l_inp_h, nonlinearity=None)\n', (4103, 4156), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((4270, 4337), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp', 'n_units'], {'hid_init': 'Xh_test', 'nonlinearity': 'None'}), '(l_inp, n_units, hid_init=Xh_test, nonlinearity=None)\n', (4284, 4337), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((4747, 4758), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (4756, 4758), True, 'import theano.tensor as T\n'), ((4768, 4778), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (4776, 4778), True, 'import theano.tensor as T\n'), ((4802, 4869), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec_inp_layer', '{l_inp: X, l_inp_h: Xh}'], {}), '(l_rec_inp_layer, {l_inp: X, l_inp_h: Xh})\n', (4827, 4869), False, 'import lasagne\n'), ((4940, 4992), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec_nparray', '{l_inp: X}'], {}), '(l_rec_nparray, {l_inp: X})\n', (4965, 4992), False, 'import lasagne\n'), ((5297, 5350), 'numpy.allclose', 'np.allclose', (['output_val_inp_layer', 'output_val_nparray'], {}), '(output_val_inp_layer, output_val_nparray)\n', (5308, 5350), True, 'import numpy as np\n'), ((5430, 5475), 'lasagne.layers.RecurrentLayer', 'lasagne.layers.RecurrentLayer', (['input_shape', '(5)'], {}), '(input_shape, 5)\n', (5459, 5475), False, 'import lasagne\n'), ((5564, 5600), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(2, 3, 4)'], {}), '((2, 3, 4))\n', (5589, 5600), False, 'import lasagne\n'), ((5638, 5693), 'lasagne.layers.RecurrentLayer', 'lasagne.layers.RecurrentLayer', (['l_in', '(4)'], {'name': 'layer_name'}), '(l_in, 4, name=layer_name)\n', (5667, 5693), False, 'import lasagne\n'), ((6150, 6222), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(n_batch, n_steps, n_channels, width, height)'], {}), '((n_batch, n_steps, n_channels, width, height))\n', (6175, 6222), False, 'import lasagne\n'), ((6578, 6646), 'lasagne.layers.CustomRecurrentLayer', 'lasagne.layers.CustomRecurrentLayer', (['l_in', 'l_in_to_hid', 'l_hid_to_hid'], {}), '(l_in, l_in_to_hid, l_hid_to_hid)\n', (6613, 6646), False, 'import lasagne\n'), ((7404, 7476), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(n_batch, n_steps, n_channels, width, height)'], {}), '((n_batch, n_steps, n_channels, width, height))\n', (7429, 7476), False, 'import lasagne\n'), ((7580, 7640), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(None, n_channels, width, height)'], {}), '((None, n_channels, width, height))\n', (7605, 7640), False, 'import lasagne\n'), ((7718, 7807), 'lasagne.layers.Conv2DLayer', 'lasagne.layers.Conv2DLayer', (['l_in_to_hid', 'n_in_hid_filters_0', 'filter_shape'], {'pad': '"""same"""'}), "(l_in_to_hid, n_in_hid_filters_0, filter_shape,\n pad='same')\n", (7744, 7807), False, 'import lasagne\n'), ((7870, 7955), 'lasagne.layers.Conv2DLayer', 'lasagne.layers.Conv2DLayer', (['l_in_to_hid', 'n_out_filters', 'filter_shape'], {'pad': '"""same"""'}), "(l_in_to_hid, n_out_filters, filter_shape, pad='same'\n )\n", (7896, 7955), False, 'import lasagne\n'), ((8069, 8132), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(None, n_out_filters, width, height)'], {}), '((None, n_out_filters, width, height))\n', (8094, 8132), False, 'import lasagne\n'), ((8261, 8352), 'lasagne.layers.Conv2DLayer', 'lasagne.layers.Conv2DLayer', (['l_hid_to_hid', 'n_hid_hid_filters_0', 'filter_shape'], {'pad': '"""same"""'}), "(l_hid_to_hid, n_hid_hid_filters_0, filter_shape,\n pad='same')\n", (8287, 8352), False, 'import lasagne\n'), ((8416, 8502), 'lasagne.layers.Conv2DLayer', 'lasagne.layers.Conv2DLayer', (['l_hid_to_hid', 'n_out_filters', 'filter_shape'], {'pad': '"""same"""'}), "(l_hid_to_hid, n_out_filters, filter_shape, pad=\n 'same')\n", (8442, 8502), False, 'import lasagne\n'), ((8520, 8588), 'lasagne.layers.CustomRecurrentLayer', 'lasagne.layers.CustomRecurrentLayer', (['l_in', 'l_in_to_hid', 'l_hid_to_hid'], {}), '(l_in, l_in_to_hid, l_hid_to_hid)\n', (8555, 8588), False, 'import lasagne\n'), ((9415, 9487), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(n_batch, n_steps, n_channels, width, height)'], {}), '((n_batch, n_steps, n_channels, width, height))\n', (9440, 9487), False, 'import lasagne\n'), ((9656, 9716), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(None, n_channels, width, height)'], {}), '((None, n_channels, width, height))\n', (9681, 9716), False, 'import lasagne\n'), ((9750, 9810), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(None, n_channels, width, height)'], {}), '((None, n_channels, width, height))\n', (9775, 9810), False, 'import lasagne\n'), ((9842, 9916), 'lasagne.layers.ConcatLayer', 'lasagne.layers.ConcatLayer', (['[l_in_to_hid_bad_0, l_in_to_hid_bad_1]'], {'axis': '(1)'}), '([l_in_to_hid_bad_0, l_in_to_hid_bad_1], axis=1)\n', (9868, 9916), False, 'import lasagne\n'), ((10007, 10100), 'lasagne.layers.Conv2DLayer', 'lasagne.layers.Conv2DLayer', (['l_in_to_hid_bad', 'n_in_hid_filters_0', 'filter_shape'], {'pad': '"""same"""'}), "(l_in_to_hid_bad, n_in_hid_filters_0,\n filter_shape, pad='same')\n", (10033, 10100), False, 'import lasagne\n'), ((10167, 10255), 'lasagne.layers.Conv2DLayer', 'lasagne.layers.Conv2DLayer', (['l_in_to_hid_bad', 'n_out_filters', 'filter_shape'], {'pad': '"""same"""'}), "(l_in_to_hid_bad, n_out_filters, filter_shape,\n pad='same')\n", (10193, 10255), False, 'import lasagne\n'), ((10355, 10415), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(None, n_channels, width, height)'], {}), '((None, n_channels, width, height))\n', (10380, 10415), False, 'import lasagne\n'), ((10493, 10582), 'lasagne.layers.Conv2DLayer', 'lasagne.layers.Conv2DLayer', (['l_in_to_hid', 'n_in_hid_filters_0', 'filter_shape'], {'pad': '"""same"""'}), "(l_in_to_hid, n_in_hid_filters_0, filter_shape,\n pad='same')\n", (10519, 10582), False, 'import lasagne\n'), ((10645, 10730), 'lasagne.layers.Conv2DLayer', 'lasagne.layers.Conv2DLayer', (['l_in_to_hid', 'n_out_filters', 'filter_shape'], {'pad': '"""same"""'}), "(l_in_to_hid, n_out_filters, filter_shape, pad='same'\n )\n", (10671, 10730), False, 'import lasagne\n'), ((10910, 10973), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(None, n_out_filters, width, height)'], {}), '((None, n_out_filters, width, height))\n', (10935, 10973), False, 'import lasagne\n'), ((11008, 11071), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(None, n_out_filters, width, height)'], {}), '((None, n_out_filters, width, height))\n', (11033, 11071), False, 'import lasagne\n'), ((11104, 11180), 'lasagne.layers.ConcatLayer', 'lasagne.layers.ConcatLayer', (['[l_hid_to_hid_bad_0, l_hid_to_hid_bad_1]'], {'axis': '(1)'}), '([l_hid_to_hid_bad_0, l_hid_to_hid_bad_1], axis=1)\n', (11130, 11180), False, 'import lasagne\n'), ((11276, 11371), 'lasagne.layers.Conv2DLayer', 'lasagne.layers.Conv2DLayer', (['l_hid_to_hid_bad', 'n_hid_hid_filters_0', 'filter_shape'], {'pad': '"""same"""'}), "(l_hid_to_hid_bad, n_hid_hid_filters_0,\n filter_shape, pad='same')\n", (11302, 11371), False, 'import lasagne\n'), ((11439, 11528), 'lasagne.layers.Conv2DLayer', 'lasagne.layers.Conv2DLayer', (['l_hid_to_hid_bad', 'n_out_filters', 'filter_shape'], {'pad': '"""same"""'}), "(l_hid_to_hid_bad, n_out_filters, filter_shape,\n pad='same')\n", (11465, 11528), False, 'import lasagne\n'), ((11643, 11706), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(None, n_out_filters, width, height)'], {}), '((None, n_out_filters, width, height))\n', (11668, 11706), False, 'import lasagne\n'), ((11835, 11926), 'lasagne.layers.Conv2DLayer', 'lasagne.layers.Conv2DLayer', (['l_hid_to_hid', 'n_hid_hid_filters_0', 'filter_shape'], {'pad': '"""same"""'}), "(l_hid_to_hid, n_hid_hid_filters_0, filter_shape,\n pad='same')\n", (11861, 11926), False, 'import lasagne\n'), ((11990, 12076), 'lasagne.layers.Conv2DLayer', 'lasagne.layers.Conv2DLayer', (['l_hid_to_hid', 'n_out_filters', 'filter_shape'], {'pad': '"""same"""'}), "(l_hid_to_hid, n_out_filters, filter_shape, pad=\n 'same')\n", (12016, 12076), False, 'import lasagne\n'), ((12584, 12652), 'lasagne.layers.CustomRecurrentLayer', 'lasagne.layers.CustomRecurrentLayer', (['l_in', 'l_in_to_hid', 'l_hid_to_hid'], {}), '(l_in, l_in_to_hid, l_hid_to_hid)\n', (12619, 12652), False, 'import lasagne\n'), ((12912, 12984), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(n_batch, n_steps, n_channels, width, height)'], {}), '((n_batch, n_steps, n_channels, width, height))\n', (12937, 12984), False, 'import lasagne\n'), ((15310, 15328), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (15320, 15328), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((15337, 15348), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (15346, 15348), True, 'import theano.tensor as T\n'), ((15361, 15412), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp', 'num_units'], {'grad_clipping': '(1.0)'}), '(l_inp, num_units, grad_clipping=1.0)\n', (15375, 15412), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((15426, 15461), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec', 'x'], {}), '(l_rec, x)\n', (15451, 15461), False, 'import lasagne\n'), ((15562, 15573), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (15571, 15573), True, 'import theano.tensor as T\n'), ((15633, 15651), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (15643, 15651), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((15786, 15845), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(False)'}), '(l_inp, num_units=num_units, backwards=False)\n', (15800, 15845), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((15902, 15960), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(True)'}), '(l_inp, num_units=num_units, backwards=True)\n', (15916, 15960), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((15977, 16008), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec_fwd', 'x'], {}), '(l_rec_fwd, x)\n', (15994, 16008), False, 'from lasagne.layers import helper\n'), ((16025, 16056), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec_bck', 'x'], {}), '(l_rec_bck, x)\n', (16042, 16056), False, 'from lasagne.layers import helper\n'), ((16226, 16289), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output_fwd', 'output_bck[:, ::-1]'], {}), '(output_fwd, output_bck[:, ::-1])\n', (16256, 16289), True, 'import numpy as np\n'), ((16444, 16455), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (16453, 16455), True, 'import theano.tensor as T\n'), ((16508, 16526), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (16518, 16526), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((16677, 16736), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(False)'}), '(l_inp, num_units=num_units, backwards=False)\n', (16691, 16736), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((16750, 16777), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec', 'x'], {}), '(l_rec, x)\n', (16767, 16777), False, 'from lasagne.layers import helper\n'), ((17025, 17043), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (17035, 17043), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((17061, 17083), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp[:2]'], {}), '(in_shp[:2])\n', (17071, 17083), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((17280, 17386), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(False)', 'unroll_scan': '(False)', 'mask_input': 'l_mask_inp'}), '(l_inp, num_units=num_units, backwards=False, unroll_scan=\n False, mask_input=l_mask_inp)\n', (17294, 17386), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((17473, 17578), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(False)', 'unroll_scan': '(True)', 'mask_input': 'l_mask_inp'}), '(l_inp, num_units=num_units, backwards=False, unroll_scan=\n True, mask_input=l_mask_inp)\n', (17487, 17578), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((17626, 17655), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec_scan'], {}), '(l_rec_scan)\n', (17643, 17655), False, 'from lasagne.layers import helper\n'), ((17678, 17709), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec_unroll'], {}), '(l_rec_unroll)\n', (17695, 17709), False, 'from lasagne.layers import helper\n'), ((17931, 17999), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output_scan_val', 'output_unrolled_val'], {}), '(output_scan_val, output_unrolled_val)\n', (17961, 17999), True, 'import numpy as np\n'), ((18112, 18123), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (18121, 18123), True, 'import theano.tensor as T\n'), ((18183, 18201), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (18193, 18201), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((18345, 18422), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(True)', 'unroll_scan': '(False)'}), '(l_inp, num_units=num_units, backwards=True, unroll_scan=False)\n', (18359, 18422), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((18514, 18590), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(True)', 'unroll_scan': '(True)'}), '(l_inp, num_units=num_units, backwards=True, unroll_scan=True)\n', (18528, 18590), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((18643, 18675), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec_scan', 'x'], {}), '(l_rec_scan, x)\n', (18660, 18675), False, 'from lasagne.layers import helper\n'), ((18698, 18732), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec_unroll', 'x'], {}), '(l_rec_unroll, x)\n', (18715, 18732), False, 'from lasagne.layers import helper\n'), ((18846, 18914), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output_scan_val', 'output_unrolled_val'], {}), '(output_scan_val, output_unrolled_val)\n', (18876, 18914), True, 'import numpy as np\n'), ((19073, 19091), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (19083, 19091), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((19109, 19131), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp[:2]'], {}), '(in_shp[:2])\n', (19119, 19131), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((19201, 19247), 'numpy.ones', 'np.ones', (['(num_batch, seq_len)'], {'dtype': '"""float32"""'}), "((num_batch, seq_len), dtype='float32')\n", (19208, 19247), True, 'import numpy as np\n'), ((19343, 19435), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp'], {'num_units': 'num_units', 'precompute_input': '(True)', 'mask_input': 'l_mask_inp'}), '(l_inp, num_units=num_units, precompute_input=True,\n mask_input=l_mask_inp)\n', (19357, 19435), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((19574, 19667), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp'], {'num_units': 'num_units', 'precompute_input': '(False)', 'mask_input': 'l_mask_inp'}), '(l_inp, num_units=num_units, precompute_input=False,\n mask_input=l_mask_inp)\n', (19588, 19667), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((20084, 20155), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output_precompute', 'output_no_precompute'], {}), '(output_precompute, output_no_precompute)\n', (20114, 20155), True, 'import numpy as np\n'), ((20369, 20387), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (20379, 20387), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((20446, 20502), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp', 'num_units'], {'only_return_final': '(True)'}), '(l_inp, num_units, only_return_final=True)\n', (20460, 20502), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((20559, 20616), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_inp', 'num_units'], {'only_return_final': '(False)'}), '(l_inp, num_units, only_return_final=False)\n', (20573, 20616), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((20940, 20984), 'numpy.allclose', 'np.allclose', (['output_final', 'output_all[:, -1]'], {}), '(output_final, output_all[:, -1])\n', (20951, 20984), True, 'import numpy as np\n'), ((21107, 21118), 'theano.tensor.tensor4', 'T.tensor4', ([], {}), '()\n', (21116, 21118), True, 'import theano.tensor as T\n'), ((21191, 21209), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (21201, 21209), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((21279, 21316), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp'], {'num_units': 'num_units'}), '(l_inp, num_units=num_units)\n', (21288, 21316), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((21330, 21358), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_lstm', 'x'], {}), '(l_lstm, x)\n', (21347, 21358), False, 'from lasagne.layers import helper\n'), ((21637, 21681), 'lasagne.layers.InputLayer', 'InputLayer', (['(num_batch, seq_len, n_features)'], {}), '((num_batch, seq_len, n_features))\n', (21647, 21681), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((21695, 21732), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp'], {'num_units': 'num_units'}), '(l_inp, num_units=num_units)\n', (21704, 21732), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((21746, 21771), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_lstm'], {}), '(l_lstm)\n', (21763, 21771), False, 'from lasagne.layers import helper\n'), ((21934, 21955), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (21944, 21955), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((21969, 22023), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp', '(5)'], {'peepholes': '(False)', 'learn_init': '(False)'}), '(l_inp, 5, peepholes=False, learn_init=False)\n', (21978, 22023), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((22358, 22379), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (22368, 22379), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((22393, 22446), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp', '(5)'], {'peepholes': '(True)', 'learn_init': '(False)'}), '(l_inp, 5, peepholes=True, learn_init=False)\n', (22402, 22446), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((22804, 22825), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (22814, 22825), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((22839, 22892), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp', '(5)'], {'peepholes': '(False)', 'learn_init': '(True)'}), '(l_inp, 5, peepholes=False, learn_init=True)\n', (22848, 22892), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((23293, 23314), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (23303, 23314), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((23329, 23347), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 5)'], {}), '((2, 5))\n', (23339, 23347), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((23363, 23381), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 5)'], {}), '((2, 5))\n', (23373, 23381), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((23395, 23452), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp', '(5)'], {'hid_init': 'l_inp_h', 'cell_init': 'l_cell_h'}), '(l_inp, 5, hid_init=l_inp_h, cell_init=l_cell_h)\n', (23404, 23452), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((23462, 23473), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (23471, 23473), True, 'import theano.tensor as T\n'), ((23482, 23492), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (23490, 23492), True, 'import theano.tensor as T\n'), ((23507, 23564), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_lstm', '{l_inp: x, l_inp_h: h}'], {}), '(l_lstm, {l_inp: x, l_inp_h: h})\n', (23532, 23564), False, 'import lasagne\n'), ((23671, 23692), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (23681, 23692), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((23707, 23725), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 5)'], {}), '((2, 5))\n', (23717, 23725), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((23743, 23765), 'lasagne.layers.DenseLayer', 'DenseLayer', (['l_inp_h', '(7)'], {}), '(l_inp_h, 7)\n', (23753, 23765), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((23783, 23801), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 5)'], {}), '((2, 5))\n', (23793, 23801), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((23822, 23847), 'lasagne.layers.DenseLayer', 'DenseLayer', (['l_inp_cell', '(7)'], {}), '(l_inp_cell, 7)\n', (23832, 23847), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((23861, 23926), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp', '(7)'], {'hid_init': 'l_inp_h_de', 'cell_init': 'l_inp_cell_de'}), '(l_inp, 7, hid_init=l_inp_h_de, cell_init=l_inp_cell_de)\n', (23870, 23926), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((24659, 24680), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (24669, 24680), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((24695, 24713), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 5)'], {}), '((2, 5))\n', (24705, 24713), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((24730, 24748), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2)'], {}), '((2, 2))\n', (24740, 24748), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((24764, 24782), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 5)'], {}), '((2, 5))\n', (24774, 24782), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((24796, 24875), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp', '(5)'], {'hid_init': 'l_inp_h', 'mask_input': 'l_inp_msk', 'cell_init': 'l_cell_h'}), '(l_inp, 5, hid_init=l_inp_h, mask_input=l_inp_msk, cell_init=l_cell_h)\n', (24805, 24875), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((24908, 24919), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (24917, 24919), True, 'import theano.tensor as T\n'), ((24928, 24938), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (24936, 24938), True, 'import theano.tensor as T\n'), ((24949, 24959), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (24957, 24959), True, 'import theano.tensor as T\n'), ((25026, 25067), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_lstm', 'inputs'], {}), '(l_lstm, inputs)\n', (25051, 25067), False, 'import lasagne\n'), ((25464, 25507), 'numpy.ones', 'np.ones', (['in_shp'], {'dtype': 'theano.config.floatX'}), '(in_shp, dtype=theano.config.floatX)\n', (25471, 25507), True, 'import numpy as np\n'), ((25522, 25567), 'numpy.ones', 'np.ones', (['in_h_shp'], {'dtype': 'theano.config.floatX'}), '(in_h_shp, dtype=theano.config.floatX)\n', (25529, 25567), True, 'import numpy as np\n'), ((25582, 25630), 'numpy.ones', 'np.ones', (['in_cell_shp'], {'dtype': 'theano.config.floatX'}), '(in_cell_shp, dtype=theano.config.floatX)\n', (25589, 25630), True, 'import numpy as np\n'), ((25651, 25686), 'numpy.tile', 'np.tile', (['Xh_test', '(n_test_cases, 1)'], {}), '(Xh_test, (n_test_cases, 1))\n', (25658, 25686), True, 'import numpy as np\n'), ((25707, 25742), 'numpy.tile', 'np.tile', (['Xc_test', '(n_test_cases, 1)'], {}), '(Xc_test, (n_test_cases, 1))\n', (25714, 25742), True, 'import numpy as np\n'), ((25808, 25826), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (25818, 25826), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((25841, 25861), 'lasagne.layers.InputLayer', 'InputLayer', (['in_h_shp'], {}), '(in_h_shp)\n', (25851, 25861), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((25879, 25902), 'lasagne.layers.InputLayer', 'InputLayer', (['in_cell_shp'], {}), '(in_cell_shp)\n', (25889, 25902), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((25925, 26013), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp', 'n_units'], {'hid_init': 'l_inp_h', 'cell_init': 'l_inp_cell', 'nonlinearity': 'None'}), '(l_inp, n_units, hid_init=l_inp_h, cell_init=l_inp_cell,\n nonlinearity=None)\n', (25934, 26013), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((26118, 26204), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp', 'n_units'], {'hid_init': 'Xh_test', 'cell_init': 'Xc_test', 'nonlinearity': 'None'}), '(l_inp, n_units, hid_init=Xh_test, cell_init=Xc_test, nonlinearity\n =None)\n', (26127, 26204), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((26604, 26615), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (26613, 26615), True, 'import theano.tensor as T\n'), ((26625, 26635), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (26633, 26635), True, 'import theano.tensor as T\n'), ((26645, 26655), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (26653, 26655), True, 'import theano.tensor as T\n'), ((26679, 26766), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec_inp_layer', '{l_inp: X, l_inp_h: Xh, l_inp_cell: Xc}'], {}), '(l_rec_inp_layer, {l_inp: X, l_inp_h: Xh,\n l_inp_cell: Xc})\n', (26704, 26766), False, 'import lasagne\n'), ((26883, 26935), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec_nparray', '{l_inp: X}'], {}), '(l_rec_nparray, {l_inp: X})\n', (26908, 26935), False, 'import lasagne\n'), ((27259, 27312), 'numpy.allclose', 'np.allclose', (['output_val_inp_layer', 'output_val_nparray'], {}), '(output_val_inp_layer, output_val_nparray)\n', (27270, 27312), True, 'import numpy as np\n'), ((27401, 27412), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (27410, 27412), True, 'import theano.tensor as T\n'), ((27491, 27526), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec', 'x'], {}), '(l_rec, x)\n', (27516, 27526), False, 'import lasagne\n'), ((27622, 27633), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (27631, 27633), True, 'import theano.tensor as T\n'), ((27693, 27711), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (27703, 27711), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((27847, 27901), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(False)'}), '(l_inp, num_units=num_units, backwards=False)\n', (27856, 27901), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((27959, 28012), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(True)'}), '(l_inp, num_units=num_units, backwards=True)\n', (27968, 28012), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((28030, 28062), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_lstm_fwd', 'x'], {}), '(l_lstm_fwd, x)\n', (28047, 28062), False, 'from lasagne.layers import helper\n'), ((28080, 28112), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_lstm_bck', 'x'], {}), '(l_lstm_bck, x)\n', (28097, 28112), False, 'from lasagne.layers import helper\n'), ((28276, 28347), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output_fwd_val', 'output_bck_val[:, ::-1]'], {}), '(output_fwd_val, output_bck_val[:, ::-1])\n', (28306, 28347), True, 'import numpy as np\n'), ((28501, 28519), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (28511, 28519), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((28537, 28559), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp[:2]'], {}), '(in_shp[:2])\n', (28547, 28559), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((28629, 28675), 'numpy.ones', 'np.ones', (['(num_batch, seq_len)'], {'dtype': '"""float32"""'}), "((num_batch, seq_len), dtype='float32')\n", (28636, 28675), True, 'import numpy as np\n'), ((28772, 28860), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp'], {'num_units': 'num_units', 'precompute_input': '(True)', 'mask_input': 'l_mask_inp'}), '(l_inp, num_units=num_units, precompute_input=True, mask_input=\n l_mask_inp)\n', (28781, 28860), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((28940, 29029), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp'], {'num_units': 'num_units', 'precompute_input': '(False)', 'mask_input': 'l_mask_inp'}), '(l_inp, num_units=num_units, precompute_input=False, mask_input=\n l_mask_inp)\n', (28949, 29029), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((29445, 29516), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output_precompute', 'output_no_precompute'], {}), '(output_precompute, output_no_precompute)\n', (29475, 29516), True, 'import numpy as np\n'), ((29660, 29671), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (29669, 29671), True, 'import theano.tensor as T\n'), ((29724, 29742), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (29734, 29742), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((29893, 29947), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(False)'}), '(l_inp, num_units=num_units, backwards=False)\n', (29902, 29947), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((29961, 29988), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec', 'x'], {}), '(l_rec, x)\n', (29978, 29988), False, 'from lasagne.layers import helper\n'), ((30231, 30249), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (30241, 30249), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((30267, 30289), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp[:2]'], {}), '(in_shp[:2])\n', (30277, 30289), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((30487, 30587), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(False)', 'unroll_scan': '(False)', 'mask_input': 'l_mask_inp'}), '(l_inp, num_units=num_units, backwards=False, unroll_scan=False,\n mask_input=l_mask_inp)\n', (30496, 30587), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((30674, 30773), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(False)', 'unroll_scan': '(True)', 'mask_input': 'l_mask_inp'}), '(l_inp, num_units=num_units, backwards=False, unroll_scan=True,\n mask_input=l_mask_inp)\n', (30683, 30773), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((30820, 30850), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_lstm_scan'], {}), '(l_lstm_scan)\n', (30837, 30850), False, 'from lasagne.layers import helper\n'), ((30873, 30907), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_lstm_unrolled'], {}), '(l_lstm_unrolled)\n', (30890, 30907), False, 'from lasagne.layers import helper\n'), ((31200, 31268), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output_scan_val', 'output_unrolled_val'], {}), '(output_scan_val, output_unrolled_val)\n', (31230, 31268), True, 'import numpy as np\n'), ((31376, 31387), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (31385, 31387), True, 'import theano.tensor as T\n'), ((31447, 31465), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (31457, 31465), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((31611, 31683), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(True)', 'unroll_scan': '(False)'}), '(l_inp, num_units=num_units, backwards=True, unroll_scan=False)\n', (31620, 31683), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((31774, 31845), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(True)', 'unroll_scan': '(True)'}), '(l_inp, num_units=num_units, backwards=True, unroll_scan=True)\n', (31783, 31845), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((31896, 31929), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_lstm_scan', 'x'], {}), '(l_lstm_scan, x)\n', (31913, 31929), False, 'from lasagne.layers import helper\n'), ((31957, 31994), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_lstm_unrolled', 'x'], {}), '(l_lstm_unrolled, x)\n', (31974, 31994), False, 'from lasagne.layers import helper\n'), ((32114, 32182), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output_scan_val', 'output_unrolled_val'], {}), '(output_scan_val, output_unrolled_val)\n', (32144, 32182), True, 'import numpy as np\n'), ((32285, 32306), 'lasagne.layers.InputLayer', 'InputLayer', (['(4, 5, 6)'], {}), '((4, 5, 6))\n', (32295, 32306), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((32318, 32344), 'lasagne.init.Constant', 'lasagne.init.Constant', (['(0.0)'], {}), '(0.0)\n', (32339, 32344), False, 'import lasagne\n'), ((32354, 32380), 'lasagne.init.Constant', 'lasagne.init.Constant', (['(1.0)'], {}), '(1.0)\n', (32375, 32380), False, 'import lasagne\n'), ((32396, 32429), 'lasagne.layers.Gate', 'Gate', (['zero', 'zero', 'zero', 'one', 'None'], {}), '(zero, zero, zero, one, None)\n', (32400, 32429), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((32444, 32478), 'lasagne.layers.Gate', 'Gate', (['zero', 'zero', 'zero', 'zero', 'None'], {}), '(zero, zero, zero, zero, None)\n', (32448, 32478), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((32588, 32657), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_in', '(6)', 'pass_gate', 'no_gate', 'in_pass_gate', 'pass_gate', 'None'], {}), '(l_in, 6, pass_gate, no_gate, in_pass_gate, pass_gate, None)\n', (32597, 32657), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((32677, 32709), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec'], {}), '(l_rec)\n', (32702, 32709), False, 'import lasagne\n'), ((33064, 33082), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (33074, 33082), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((33141, 33192), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp', 'num_units'], {'only_return_final': '(True)'}), '(l_inp, num_units, only_return_final=True)\n', (33150, 33192), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((33249, 33301), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_inp', 'num_units'], {'only_return_final': '(False)'}), '(l_inp, num_units, only_return_final=False)\n', (33258, 33301), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((33625, 33669), 'numpy.allclose', 'np.allclose', (['output_final', 'output_all[:, -1]'], {}), '(output_final, output_all[:, -1])\n', (33636, 33669), True, 'import numpy as np\n'), ((33791, 33802), 'theano.tensor.tensor4', 'T.tensor4', ([], {}), '()\n', (33800, 33802), True, 'import theano.tensor as T\n'), ((33875, 33893), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (33885, 33893), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((33906, 33942), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp'], {'num_units': 'num_units'}), '(l_inp, num_units=num_units)\n', (33914, 33942), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((34011, 34038), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec', 'x'], {}), '(l_rec, x)\n', (34028, 34038), False, 'from lasagne.layers import helper\n'), ((34316, 34360), 'lasagne.layers.InputLayer', 'InputLayer', (['(num_batch, seq_len, n_features)'], {}), '((num_batch, seq_len, n_features))\n', (34326, 34360), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((34373, 34409), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp'], {'num_units': 'num_units'}), '(l_inp, num_units=num_units)\n', (34381, 34409), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((34444, 34468), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_gru'], {}), '(l_gru)\n', (34461, 34468), False, 'from lasagne.layers import helper\n'), ((34633, 34654), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (34643, 34654), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((34667, 34703), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp', '(5)'], {'learn_init': '(False)'}), '(l_inp, 5, learn_init=False)\n', (34675, 34703), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((35040, 35061), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (35050, 35061), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((35074, 35109), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp', '(5)'], {'learn_init': '(True)'}), '(l_inp, 5, learn_init=True)\n', (35082, 35109), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((35506, 35527), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (35516, 35527), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((35542, 35560), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 5)'], {}), '((2, 5))\n', (35552, 35560), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((35573, 35609), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp', '(5)'], {'hid_init': 'l_inp_h'}), '(l_inp, 5, hid_init=l_inp_h)\n', (35581, 35609), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((35619, 35630), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (35628, 35630), True, 'import theano.tensor as T\n'), ((35639, 35649), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (35647, 35649), True, 'import theano.tensor as T\n'), ((35664, 35720), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_gru', '{l_inp: x, l_inp_h: h}'], {}), '(l_gru, {l_inp: x, l_inp_h: h})\n', (35689, 35720), False, 'import lasagne\n'), ((35826, 35847), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (35836, 35847), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((35862, 35880), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 5)'], {}), '((2, 5))\n', (35872, 35880), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((35898, 35920), 'lasagne.layers.DenseLayer', 'DenseLayer', (['l_inp_h', '(7)'], {}), '(l_inp_h, 7)\n', (35908, 35920), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((35933, 35972), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp', '(7)'], {'hid_init': 'l_inp_h_de'}), '(l_inp, 7, hid_init=l_inp_h_de)\n', (35941, 35972), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((36897, 36940), 'numpy.ones', 'np.ones', (['in_shp'], {'dtype': 'theano.config.floatX'}), '(in_shp, dtype=theano.config.floatX)\n', (36904, 36940), True, 'import numpy as np\n'), ((36955, 37000), 'numpy.ones', 'np.ones', (['in_h_shp'], {'dtype': 'theano.config.floatX'}), '(in_h_shp, dtype=theano.config.floatX)\n', (36962, 37000), True, 'import numpy as np\n'), ((37021, 37056), 'numpy.tile', 'np.tile', (['Xh_test', '(n_test_cases, 1)'], {}), '(Xh_test, (n_test_cases, 1))\n', (37028, 37056), True, 'import numpy as np\n'), ((37122, 37140), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (37132, 37140), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((37155, 37175), 'lasagne.layers.InputLayer', 'InputLayer', (['in_h_shp'], {}), '(in_h_shp)\n', (37165, 37175), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((37198, 37240), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp', 'n_units'], {'hid_init': 'l_inp_h'}), '(l_inp, n_units, hid_init=l_inp_h)\n', (37206, 37240), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((37317, 37359), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp', 'n_units'], {'hid_init': 'Xh_test'}), '(l_inp, n_units, hid_init=Xh_test)\n', (37325, 37359), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((37734, 37745), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (37743, 37745), True, 'import theano.tensor as T\n'), ((37755, 37765), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (37763, 37765), True, 'import theano.tensor as T\n'), ((37789, 37856), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec_inp_layer', '{l_inp: X, l_inp_h: Xh}'], {}), '(l_rec_inp_layer, {l_inp: X, l_inp_h: Xh})\n', (37814, 37856), False, 'import lasagne\n'), ((37927, 37979), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec_nparray', '{l_inp: X}'], {}), '(l_rec_nparray, {l_inp: X})\n', (37952, 37979), False, 'import lasagne\n'), ((38284, 38337), 'numpy.allclose', 'np.allclose', (['output_val_inp_layer', 'output_val_nparray'], {}), '(output_val_inp_layer, output_val_nparray)\n', (38295, 38337), True, 'import numpy as np\n'), ((38457, 38478), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (38467, 38478), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((38493, 38511), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 5)'], {}), '((2, 5))\n', (38503, 38511), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((38528, 38546), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2)'], {}), '((2, 2))\n', (38538, 38546), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((38559, 38617), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp', '(5)'], {'hid_init': 'l_inp_h', 'mask_input': 'l_inp_msk'}), '(l_inp, 5, hid_init=l_inp_h, mask_input=l_inp_msk)\n', (38567, 38617), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((38627, 38638), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (38636, 38638), True, 'import theano.tensor as T\n'), ((38647, 38657), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (38655, 38657), True, 'import theano.tensor as T\n'), ((38668, 38678), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (38676, 38678), True, 'import theano.tensor as T\n'), ((38745, 38785), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_gru', 'inputs'], {}), '(l_gru, inputs)\n', (38770, 38785), False, 'import lasagne\n'), ((38873, 38884), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (38882, 38884), True, 'import theano.tensor as T\n'), ((38962, 38997), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec', 'x'], {}), '(l_rec, x)\n', (38987, 38997), False, 'import lasagne\n'), ((39092, 39103), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (39101, 39103), True, 'import theano.tensor as T\n'), ((39163, 39181), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (39173, 39181), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((39316, 39369), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(False)'}), '(l_inp, num_units=num_units, backwards=False)\n', (39324, 39369), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((39426, 39478), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(True)'}), '(l_inp, num_units=num_units, backwards=True)\n', (39434, 39478), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((39496, 39527), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_gru_fwd', 'x'], {}), '(l_gru_fwd, x)\n', (39513, 39527), False, 'from lasagne.layers import helper\n'), ((39545, 39576), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_gru_bck', 'x'], {}), '(l_gru_bck, x)\n', (39562, 39576), False, 'from lasagne.layers import helper\n'), ((39740, 39811), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output_fwd_val', 'output_bck_val[:, ::-1]'], {}), '(output_fwd_val, output_bck_val[:, ::-1])\n', (39770, 39811), True, 'import numpy as np\n'), ((39954, 39965), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (39963, 39965), True, 'import theano.tensor as T\n'), ((40018, 40036), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (40028, 40036), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((40187, 40240), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(False)'}), '(l_inp, num_units=num_units, backwards=False)\n', (40195, 40240), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((40254, 40281), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec', 'x'], {}), '(l_rec, x)\n', (40271, 40281), False, 'from lasagne.layers import helper\n'), ((40496, 40514), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (40506, 40514), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((40532, 40554), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp[:2]'], {}), '(in_shp[:2])\n', (40542, 40554), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((40751, 40850), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(False)', 'unroll_scan': '(False)', 'mask_input': 'l_mask_inp'}), '(l_inp, num_units=num_units, backwards=False, unroll_scan=False,\n mask_input=l_mask_inp)\n', (40759, 40850), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((40934, 41032), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(False)', 'unroll_scan': '(True)', 'mask_input': 'l_mask_inp'}), '(l_inp, num_units=num_units, backwards=False, unroll_scan=True,\n mask_input=l_mask_inp)\n', (40942, 41032), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((41077, 41106), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_gru_scan'], {}), '(l_gru_scan)\n', (41094, 41106), False, 'from lasagne.layers import helper\n'), ((41129, 41162), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_gru_unrolled'], {}), '(l_gru_unrolled)\n', (41146, 41162), False, 'from lasagne.layers import helper\n'), ((41455, 41523), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output_scan_val', 'output_unrolled_val'], {}), '(output_scan_val, output_unrolled_val)\n', (41485, 41523), True, 'import numpy as np\n'), ((41630, 41641), 'theano.tensor.tensor3', 'T.tensor3', ([], {}), '()\n', (41639, 41641), True, 'import theano.tensor as T\n'), ((41701, 41719), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (41711, 41719), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((41863, 41934), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(True)', 'unroll_scan': '(False)'}), '(l_inp, num_units=num_units, backwards=True, unroll_scan=False)\n', (41871, 41934), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((42022, 42092), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp'], {'num_units': 'num_units', 'backwards': '(True)', 'unroll_scan': '(True)'}), '(l_inp, num_units=num_units, backwards=True, unroll_scan=True)\n', (42030, 42092), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((42141, 42173), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_gru_scan', 'x'], {}), '(l_gru_scan, x)\n', (42158, 42173), False, 'from lasagne.layers import helper\n'), ((42196, 42232), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_gru_unrolled', 'x'], {}), '(l_gru_unrolled, x)\n', (42213, 42232), False, 'from lasagne.layers import helper\n'), ((42347, 42415), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output_scan_val', 'output_unrolled_val'], {}), '(output_scan_val, output_unrolled_val)\n', (42377, 42415), True, 'import numpy as np\n'), ((42568, 42586), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (42578, 42586), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((42604, 42626), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp[:2]'], {}), '(in_shp[:2])\n', (42614, 42626), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((42696, 42742), 'numpy.ones', 'np.ones', (['(num_batch, seq_len)'], {'dtype': '"""float32"""'}), "((num_batch, seq_len), dtype='float32')\n", (42703, 42742), True, 'import numpy as np\n'), ((42838, 42925), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp'], {'num_units': 'num_units', 'precompute_input': '(True)', 'mask_input': 'l_mask_inp'}), '(l_inp, num_units=num_units, precompute_input=True, mask_input=\n l_mask_inp)\n', (42846, 42925), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((43019, 43107), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp'], {'num_units': 'num_units', 'precompute_input': '(False)', 'mask_input': 'l_mask_inp'}), '(l_inp, num_units=num_units, precompute_input=False, mask_input=\n l_mask_inp)\n', (43027, 43107), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((43572, 43643), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['output_precompute', 'output_no_precompute'], {}), '(output_precompute, output_no_precompute)\n', (43602, 43643), True, 'import numpy as np\n'), ((43745, 43766), 'lasagne.layers.InputLayer', 'InputLayer', (['(4, 5, 6)'], {}), '((4, 5, 6))\n', (43755, 43766), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((43778, 43804), 'lasagne.init.Constant', 'lasagne.init.Constant', (['(0.0)'], {}), '(0.0)\n', (43799, 43804), False, 'import lasagne\n'), ((43814, 43840), 'lasagne.init.Constant', 'lasagne.init.Constant', (['(1.0)'], {}), '(1.0)\n', (43835, 43840), False, 'import lasagne\n'), ((43856, 43889), 'lasagne.layers.Gate', 'Gate', (['zero', 'zero', 'None', 'one', 'None'], {}), '(zero, zero, None, one, None)\n', (43860, 43889), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((43904, 43938), 'lasagne.layers.Gate', 'Gate', (['zero', 'zero', 'None', 'zero', 'None'], {}), '(zero, zero, None, zero, None)\n', (43908, 43938), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((44048, 44099), 'lasagne.layers.GRULayer', 'GRULayer', (['l_in', '(6)', 'no_gate', 'pass_gate', 'in_pass_gate'], {}), '(l_in, 6, no_gate, pass_gate, in_pass_gate)\n', (44056, 44099), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((44110, 44142), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec'], {}), '(l_rec)\n', (44135, 44142), False, 'import lasagne\n'), ((44496, 44514), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shp'], {}), '(in_shp)\n', (44506, 44514), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((44573, 44623), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp', 'num_units'], {'only_return_final': '(True)'}), '(l_inp, num_units, only_return_final=True)\n', (44581, 44623), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((44680, 44731), 'lasagne.layers.GRULayer', 'GRULayer', (['l_inp', 'num_units'], {'only_return_final': '(False)'}), '(l_inp, num_units, only_return_final=False)\n', (44688, 44731), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((45055, 45099), 'numpy.allclose', 'np.allclose', (['output_final', 'output_all[:, -1]'], {}), '(output_final, output_all[:, -1])\n', (45066, 45099), True, 'import numpy as np\n'), ((45237, 45258), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (45247, 45258), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((45731, 45755), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, None, 3)'], {}), '((2, None, 3))\n', (45741, 45755), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((46517, 46527), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (46525, 46527), True, 'import theano.tensor as T\n'), ((46866, 46876), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (46874, 46876), True, 'import theano.tensor as T\n'), ((47164, 47199), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec'], {'foo': '"""bar"""'}), "(l_rec, foo='bar')\n", (47181, 47199), False, 'from lasagne.layers import helper\n'), ((726, 768), 'lasagne.layers.helper.get_output_shape', 'helper.get_output_shape', (['l_rec', 'x_in.shape'], {}), '(l_rec, x_in.shape)\n', (749, 768), False, 'from lasagne.layers import helper\n'), ((1137, 1151), 'theano.tensor.mean', 'T.mean', (['output'], {}), '(output)\n', (1143, 1151), True, 'import theano.tensor as T\n'), ((1153, 1189), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_rec'], {}), '(l_rec)\n', (1182, 1189), False, 'import lasagne\n'), ((2597, 2633), 'lasagne.layers.get_all_layers', 'lasagne.layers.get_all_layers', (['l_rec'], {}), '(l_rec)\n', (2626, 2633), False, 'import lasagne\n'), ((6286, 6346), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(None, n_channels, width, height)'], {}), '((None, n_channels, width, height))\n', (6311, 6346), False, 'import lasagne\n'), ((6452, 6515), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(None, n_out_filters, width, height)'], {}), '((None, n_out_filters, width, height))\n', (6477, 6515), False, 'import lasagne\n'), ((6816, 6848), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec'], {}), '(l_rec)\n', (6841, 6848), False, 'import lasagne\n'), ((8760, 8792), 'lasagne.layers.get_output', 'lasagne.layers.get_output', (['l_rec'], {}), '(l_rec)\n', (8785, 8792), False, 'import lasagne\n'), ((12160, 12185), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12173, 12185), False, 'import pytest\n'), ((12203, 12275), 'lasagne.layers.CustomRecurrentLayer', 'lasagne.layers.CustomRecurrentLayer', (['l_in', 'l_in_to_hid_bad', 'l_hid_to_hid'], {}), '(l_in, l_in_to_hid_bad, l_hid_to_hid)\n', (12238, 12275), False, 'import lasagne\n'), ((12299, 12324), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12312, 12324), False, 'import pytest\n'), ((12342, 12414), 'lasagne.layers.CustomRecurrentLayer', 'lasagne.layers.CustomRecurrentLayer', (['l_in', 'l_in_to_hid', 'l_hid_to_hid_bad'], {}), '(l_in, l_in_to_hid, l_hid_to_hid_bad)\n', (12377, 12414), False, 'import lasagne\n'), ((12438, 12463), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (12451, 12463), False, 'import pytest\n'), ((12481, 12557), 'lasagne.layers.CustomRecurrentLayer', 'lasagne.layers.CustomRecurrentLayer', (['l_in', 'l_in_to_hid_bad', 'l_hid_to_hid_bad'], {}), '(l_in, l_in_to_hid_bad, l_hid_to_hid_bad)\n', (12516, 12557), False, 'import lasagne\n'), ((13049, 13115), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(n_batch, n_out_filters, width, height)'], {}), '((n_batch, n_out_filters, width, height))\n', (13074, 13115), False, 'import lasagne\n'), ((13321, 13384), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(n_batch, n_channels, width, height)'], {}), '((n_batch, n_channels, width, height))\n', (13346, 13384), False, 'import lasagne\n'), ((13444, 13469), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13457, 13469), False, 'import pytest\n'), ((13487, 13582), 'lasagne.layers.CustomRecurrentLayer', 'lasagne.layers.CustomRecurrentLayer', (['l_in', 'l_in_to_hid', 'l_hid_to_hid'], {'precompute_input': '(True)'}), '(l_in, l_in_to_hid, l_hid_to_hid,\n precompute_input=True)\n', (13522, 13582), False, 'import lasagne\n'), ((13757, 13824), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(n_batch + 1, n_channels, width, height)'], {}), '((n_batch + 1, n_channels, width, height))\n', (13782, 13824), False, 'import lasagne\n'), ((13884, 13909), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (13897, 13909), False, 'import pytest\n'), ((13927, 14023), 'lasagne.layers.CustomRecurrentLayer', 'lasagne.layers.CustomRecurrentLayer', (['l_in', 'l_in_to_hid', 'l_hid_to_hid'], {'precompute_input': '(False)'}), '(l_in, l_in_to_hid, l_hid_to_hid,\n precompute_input=False)\n', (13962, 14023), False, 'import lasagne\n'), ((14203, 14267), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(None, n_channels, width + 1, height)'], {}), '((None, n_channels, width + 1, height))\n', (14228, 14267), False, 'import lasagne\n'), ((14327, 14352), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (14340, 14352), False, 'import pytest\n'), ((14370, 14438), 'lasagne.layers.CustomRecurrentLayer', 'lasagne.layers.CustomRecurrentLayer', (['l_in', 'l_in_to_hid', 'l_hid_to_hid'], {}), '(l_in, l_in_to_hid, l_hid_to_hid)\n', (14405, 14438), False, 'import lasagne\n'), ((14752, 14812), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(None, n_channels, width, height)'], {}), '((None, n_channels, width, height))\n', (14777, 14812), False, 'import lasagne\n'), ((14906, 14972), 'lasagne.layers.InputLayer', 'lasagne.layers.InputLayer', (['(n_batch, n_out_filters, width, height)'], {}), '((n_batch, n_out_filters, width, height))\n', (14931, 14972), False, 'import lasagne\n'), ((15020, 15045), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15033, 15045), False, 'import pytest\n'), ((15063, 15131), 'lasagne.layers.CustomRecurrentLayer', 'lasagne.layers.CustomRecurrentLayer', (['l_in', 'l_in_to_hid', 'l_hid_to_hid'], {}), '(l_in, l_in_to_hid, l_hid_to_hid)\n', (15098, 15131), False, 'import lasagne\n'), ((20884, 20928), 'lasagne.layers.get_output_shape', 'lasagne.layers.get_output_shape', (['l_rec_final'], {}), '(l_rec_final)\n', (20915, 20928), False, 'import lasagne\n'), ((21410, 21453), 'lasagne.layers.helper.get_output_shape', 'helper.get_output_shape', (['l_lstm', 'x_in.shape'], {}), '(l_lstm, x_in.shape)\n', (21433, 21453), False, 'from lasagne.layers import helper\n'), ((21787, 21801), 'theano.tensor.mean', 'T.mean', (['output'], {}), '(output)\n', (21793, 21801), True, 'import theano.tensor as T\n'), ((21803, 21840), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_lstm'], {}), '(l_lstm)\n', (21832, 21840), False, 'import lasagne\n'), ((24107, 24144), 'lasagne.layers.get_all_layers', 'lasagne.layers.get_all_layers', (['l_lstm'], {}), '(l_lstm)\n', (24136, 24144), False, 'import lasagne\n'), ((27435, 27456), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (27445, 27456), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((33569, 33613), 'lasagne.layers.get_output_shape', 'lasagne.layers.get_output_shape', (['l_rec_final'], {}), '(l_rec_final)\n', (33600, 33613), False, 'import lasagne\n'), ((34091, 34133), 'lasagne.layers.helper.get_output_shape', 'helper.get_output_shape', (['l_rec', 'x_in.shape'], {}), '(l_rec, x_in.shape)\n', (34114, 34133), False, 'from lasagne.layers import helper\n'), ((34484, 34498), 'theano.tensor.mean', 'T.mean', (['output'], {}), '(output)\n', (34490, 34498), True, 'import theano.tensor as T\n'), ((34500, 34536), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_gru'], {}), '(l_gru)\n', (34529, 34536), False, 'import lasagne\n'), ((36046, 36082), 'lasagne.layers.get_all_layers', 'lasagne.layers.get_all_layers', (['l_gru'], {}), '(l_gru)\n', (36075, 36082), False, 'import lasagne\n'), ((38906, 38927), 'lasagne.layers.InputLayer', 'InputLayer', (['(2, 2, 3)'], {}), '((2, 2, 3))\n', (38916, 38927), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((44999, 45043), 'lasagne.layers.get_output_shape', 'lasagne.layers.get_output_shape', (['l_rec_final'], {}), '(l_rec_final)\n', (45030, 45043), False, 'import lasagne\n'), ((45268, 45293), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (45281, 45293), False, 'import pytest\n'), ((45303, 45362), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_in', '(5)'], {'gradient_steps': '(3)', 'unroll_scan': '(True)'}), '(l_in, 5, gradient_steps=3, unroll_scan=True)\n', (45317, 45362), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((45373, 45398), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (45386, 45398), False, 'import pytest\n'), ((45408, 45462), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_in', '(5)'], {'gradient_steps': '(3)', 'unroll_scan': '(True)'}), '(l_in, 5, gradient_steps=3, unroll_scan=True)\n', (45417, 45462), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((45473, 45498), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (45486, 45498), False, 'import pytest\n'), ((45508, 45561), 'lasagne.layers.GRULayer', 'GRULayer', (['l_in', '(5)'], {'gradient_steps': '(3)', 'unroll_scan': '(True)'}), '(l_in, 5, gradient_steps=3, unroll_scan=True)\n', (45516, 45561), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((45765, 45790), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (45778, 45790), False, 'import pytest\n'), ((45800, 45841), 'lasagne.layers.RecurrentLayer', 'RecurrentLayer', (['l_in', '(5)'], {'unroll_scan': '(True)'}), '(l_in, 5, unroll_scan=True)\n', (45814, 45841), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((45852, 45877), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (45865, 45877), False, 'import pytest\n'), ((45887, 45923), 'lasagne.layers.LSTMLayer', 'LSTMLayer', (['l_in', '(5)'], {'unroll_scan': '(True)'}), '(l_in, 5, unroll_scan=True)\n', (45896, 45923), False, 'from lasagne.layers import RecurrentLayer, LSTMLayer, CustomRecurrentLayer\n'), ((45934, 45959), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (45947, 45959), False, 'import pytest\n'), ((45969, 46004), 'lasagne.layers.GRULayer', 'GRULayer', (['l_in', '(5)'], {'unroll_scan': '(True)'}), '(l_in, 5, unroll_scan=True)\n', (45977, 46004), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((47035, 47055), 'lasagne.layers.InputLayer', 'InputLayer', (['in_shape'], {}), '(in_shape)\n', (47045, 47055), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((590, 614), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (606, 614), True, 'import numpy as np\n'), ((1425, 1477), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_rec'], {'trainable': '(True)'}), '(l_rec, trainable=True)\n', (1454, 1477), False, 'import lasagne\n'), ((1519, 1576), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_rec'], {'regularizable': '(False)'}), '(l_rec, regularizable=False)\n', (1548, 1576), False, 'import lasagne\n'), ((1779, 1831), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_rec'], {'trainable': '(True)'}), '(l_rec, trainable=True)\n', (1808, 1831), False, 'import lasagne\n'), ((1873, 1930), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_rec'], {'regularizable': '(False)'}), '(l_rec, regularizable=False)\n', (1902, 1930), False, 'import lasagne\n'), ((2787, 2839), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_rec'], {'trainable': '(True)'}), '(l_rec, trainable=True)\n', (2816, 2839), False, 'import lasagne\n'), ((2894, 2951), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_rec'], {'regularizable': '(False)'}), '(l_rec, regularizable=False)\n', (2923, 2951), False, 'import lasagne\n'), ((6870, 6958), 'numpy.zeros', 'np.zeros', (['(n_batch, n_steps, n_channels, width, height)'], {'dtype': 'theano.config.floatX'}), '((n_batch, n_steps, n_channels, width, height), dtype=theano.config\n .floatX)\n', (6878, 6958), True, 'import numpy as np\n'), ((8814, 8902), 'numpy.zeros', 'np.zeros', (['(n_batch, n_steps, n_channels, width, height)'], {'dtype': 'theano.config.floatX'}), '((n_batch, n_steps, n_channels, width, height), dtype=theano.config\n .floatX)\n', (8822, 8902), True, 'import numpy as np\n'), ((15664, 15679), 'numpy.ones', 'np.ones', (['in_shp'], {}), '(in_shp)\n', (15671, 15679), True, 'import numpy as np\n'), ((15734, 15758), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (15756, 15758), False, 'import lasagne\n'), ((15850, 15874), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (15872, 15874), False, 'import lasagne\n'), ((16539, 16580), 'numpy.ones', 'np.ones', (['(num_batch + 1, 10, n_features1)'], {}), '((num_batch + 1, 10, n_features1))\n', (16546, 16580), True, 'import numpy as np\n'), ((16609, 16646), 'numpy.ones', 'np.ones', (['(num_batch, 15, n_features1)'], {}), '((num_batch, 15, n_features1))\n', (16616, 16646), True, 'import numpy as np\n'), ((17096, 17120), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (17112, 17120), True, 'import numpy as np\n'), ((17153, 17172), 'numpy.ones', 'np.ones', (['in_shp[:2]'], {}), '(in_shp[:2])\n', (17160, 17172), True, 'import numpy as np\n'), ((17227, 17251), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (17249, 17251), False, 'import lasagne\n'), ((17418, 17442), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (17440, 17442), False, 'import lasagne\n'), ((18213, 18237), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (18229, 18237), True, 'import numpy as np\n'), ((18292, 18316), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (18314, 18316), False, 'import lasagne\n'), ((18459, 18483), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (18481, 18483), False, 'import lasagne\n'), ((19144, 19168), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (19160, 19168), True, 'import numpy as np\n'), ((19284, 19308), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (19306, 19308), False, 'import lasagne\n'), ((19512, 19536), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (19534, 19536), False, 'import lasagne\n'), ((19770, 19805), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec_precompute'], {}), '(l_rec_precompute)\n', (19787, 19805), False, 'from lasagne.layers import helper\n'), ((19935, 19973), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec_no_precompute'], {}), '(l_rec_no_precompute)\n', (19952, 19973), False, 'from lasagne.layers import helper\n'), ((20313, 20337), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (20329, 20337), True, 'import numpy as np\n'), ((20392, 20416), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (20414, 20416), False, 'import lasagne\n'), ((20507, 20531), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (20529, 20531), False, 'import lasagne\n'), ((20637, 20667), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec_final'], {}), '(l_rec_final)\n', (20654, 20667), False, 'from lasagne.layers import helper\n'), ((20715, 20743), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec_all'], {}), '(l_rec_all)\n', (20732, 20743), False, 'from lasagne.layers import helper\n'), ((21222, 21246), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (21238, 21246), True, 'import numpy as np\n'), ((22135, 22188), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_lstm'], {'trainable': '(True)'}), '(l_lstm, trainable=True)\n', (22164, 22188), False, 'import lasagne\n'), ((22244, 22302), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_lstm'], {'regularizable': '(False)'}), '(l_lstm, regularizable=False)\n', (22273, 22302), False, 'import lasagne\n'), ((22574, 22627), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_lstm'], {'trainable': '(True)'}), '(l_lstm, trainable=True)\n', (22603, 22627), False, 'import lasagne\n'), ((22689, 22747), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_lstm'], {'regularizable': '(False)'}), '(l_lstm, regularizable=False)\n', (22718, 22747), False, 'import lasagne\n'), ((23016, 23069), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_lstm'], {'trainable': '(True)'}), '(l_lstm, trainable=True)\n', (23045, 23069), False, 'import lasagne\n'), ((23131, 23189), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_lstm'], {'regularizable': '(False)'}), '(l_lstm, regularizable=False)\n', (23160, 23189), False, 'import lasagne\n'), ((24345, 24398), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_lstm'], {'trainable': '(True)'}), '(l_lstm, trainable=True)\n', (24374, 24398), False, 'import lasagne\n'), ((24474, 24532), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_lstm'], {'regularizable': '(False)'}), '(l_lstm, regularizable=False)\n', (24503, 24532), False, 'import lasagne\n'), ((27724, 27739), 'numpy.ones', 'np.ones', (['in_shp'], {}), '(in_shp)\n', (27731, 27739), True, 'import numpy as np\n'), ((27794, 27818), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (27816, 27818), False, 'import lasagne\n'), ((27906, 27930), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (27928, 27930), False, 'import lasagne\n'), ((28572, 28596), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (28588, 28596), True, 'import numpy as np\n'), ((28712, 28736), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (28734, 28736), False, 'import lasagne\n'), ((28877, 28901), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (28899, 28901), False, 'import lasagne\n'), ((29066, 29102), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_lstm_precompute'], {}), '(l_lstm_precompute)\n', (29083, 29102), False, 'from lasagne.layers import helper\n'), ((29233, 29272), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_lstm_no_precompute'], {}), '(l_lstm_no_precompute)\n', (29250, 29272), False, 'from lasagne.layers import helper\n'), ((29755, 29799), 'numpy.ones', 'np.ones', (['(num_batch + 1, 3 + 1, n_features1)'], {}), '((num_batch + 1, 3 + 1, n_features1))\n', (29762, 29799), True, 'import numpy as np\n'), ((29826, 29862), 'numpy.ones', 'np.ones', (['(num_batch, 3, n_features1)'], {}), '((num_batch, 3, n_features1))\n', (29833, 29862), True, 'import numpy as np\n'), ((30302, 30326), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (30318, 30326), True, 'import numpy as np\n'), ((30359, 30378), 'numpy.ones', 'np.ones', (['in_shp[:2]'], {}), '(in_shp[:2])\n', (30366, 30378), True, 'import numpy as np\n'), ((30433, 30457), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (30455, 30457), False, 'import lasagne\n'), ((30616, 30640), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (30638, 30640), False, 'import lasagne\n'), ((31478, 31502), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (31494, 31502), True, 'import numpy as np\n'), ((31557, 31581), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (31579, 31581), False, 'import lasagne\n'), ((31716, 31740), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (31738, 31740), False, 'import lasagne\n'), ((33008, 33032), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (33024, 33032), True, 'import numpy as np\n'), ((33087, 33111), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (33109, 33111), False, 'import lasagne\n'), ((33197, 33221), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (33219, 33221), False, 'import lasagne\n'), ((33322, 33352), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec_final'], {}), '(l_rec_final)\n', (33339, 33352), False, 'from lasagne.layers import helper\n'), ((33400, 33428), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec_all'], {}), '(l_rec_all)\n', (33417, 33428), False, 'from lasagne.layers import helper\n'), ((33955, 33979), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (33971, 33979), True, 'import numpy as np\n'), ((34815, 34867), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_gru'], {'trainable': '(True)'}), '(l_gru, trainable=True)\n', (34844, 34867), False, 'import lasagne\n'), ((34922, 34979), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_gru'], {'regularizable': '(False)'}), '(l_gru, regularizable=False)\n', (34951, 34979), False, 'import lasagne\n'), ((35232, 35284), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_gru'], {'trainable': '(True)'}), '(l_gru, trainable=True)\n', (35261, 35284), False, 'import lasagne\n'), ((35346, 35403), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_gru'], {'regularizable': '(False)'}), '(l_gru, regularizable=False)\n', (35375, 35403), False, 'import lasagne\n'), ((36345, 36397), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_gru'], {'trainable': '(True)'}), '(l_gru, trainable=True)\n', (36374, 36397), False, 'import lasagne\n'), ((36469, 36526), 'lasagne.layers.get_all_params', 'lasagne.layers.get_all_params', (['l_gru'], {'regularizable': '(False)'}), '(l_gru, regularizable=False)\n', (36498, 36526), False, 'import lasagne\n'), ((39194, 39209), 'numpy.ones', 'np.ones', (['in_shp'], {}), '(in_shp)\n', (39201, 39209), True, 'import numpy as np\n'), ((39264, 39288), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (39286, 39288), False, 'import lasagne\n'), ((39374, 39398), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (39396, 39398), False, 'import lasagne\n'), ((40049, 40090), 'numpy.ones', 'np.ones', (['(num_batch + 1, 10, n_features1)'], {}), '((num_batch + 1, 10, n_features1))\n', (40056, 40090), True, 'import numpy as np\n'), ((40119, 40156), 'numpy.ones', 'np.ones', (['(num_batch, 15, n_features1)'], {}), '((num_batch, 15, n_features1))\n', (40126, 40156), True, 'import numpy as np\n'), ((40567, 40591), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (40583, 40591), True, 'import numpy as np\n'), ((40624, 40643), 'numpy.ones', 'np.ones', (['in_shp[:2]'], {}), '(in_shp[:2])\n', (40631, 40643), True, 'import numpy as np\n'), ((40698, 40722), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (40720, 40722), False, 'import lasagne\n'), ((40877, 40901), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (40899, 40901), False, 'import lasagne\n'), ((41731, 41755), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (41747, 41755), True, 'import numpy as np\n'), ((41810, 41834), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (41832, 41834), False, 'import lasagne\n'), ((41965, 41989), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (41987, 41989), False, 'import lasagne\n'), ((42639, 42663), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (42655, 42663), True, 'import numpy as np\n'), ((42779, 42803), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (42801, 42803), False, 'import lasagne\n'), ((42957, 42981), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (42979, 42981), False, 'import lasagne\n'), ((43197, 43232), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_gru_precompute'], {}), '(l_gru_precompute)\n', (43214, 43232), False, 'from lasagne.layers import helper\n'), ((43362, 43400), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_gru_no_precompute'], {}), '(l_gru_no_precompute)\n', (43379, 43400), False, 'from lasagne.layers import helper\n'), ((44440, 44464), 'numpy.random.random', 'np.random.random', (['in_shp'], {}), '(in_shp)\n', (44456, 44464), True, 'import numpy as np\n'), ((44519, 44543), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (44541, 44543), False, 'import lasagne\n'), ((44628, 44652), 'lasagne.random.get_rng', 'lasagne.random.get_rng', ([], {}), '()\n', (44650, 44652), False, 'import lasagne\n'), ((44752, 44782), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec_final'], {}), '(l_rec_final)\n', (44769, 44782), False, 'from lasagne.layers import helper\n'), ((44830, 44858), 'lasagne.layers.helper.get_output', 'helper.get_output', (['l_rec_all'], {}), '(l_rec_all)\n', (44847, 44858), False, 'from lasagne.layers import helper\n'), ((46311, 46363), 'lasagne.layers.InputLayer', 'InputLayer', (['(in_shape[0] * in_shape[1], in_shape[2])'], {}), '((in_shape[0] * in_shape[1], in_shape[2]))\n', (46321, 46363), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((46757, 46789), 'lasagne.layers.InputLayer', 'InputLayer', (['(in_shape[0], n_hid)'], {}), '((in_shape[0], n_hid))\n', (46767, 46789), False, 'from lasagne.layers import InputLayer, DenseLayer, GRULayer, Gate, Layer\n'), ((32512, 32521), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (32518, 32521), True, 'import numpy as np\n'), ((43972, 43981), 'numpy.eye', 'np.eye', (['(6)'], {}), '(6)\n', (43978, 43981), True, 'import numpy as np\n'), ((32720, 32740), 'numpy.arange', 'np.arange', (['(4 * 5 * 6)'], {}), '(4 * 5 * 6)\n', (32729, 32740), True, 'import numpy as np\n'), ((44153, 44173), 'numpy.arange', 'np.arange', (['(4 * 5 * 6)'], {}), '(4 * 5 * 6)\n', (44162, 44173), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join
import numpy as np
import gym
from src.envs.base import Env
from src.utils.helpers import preprocessAtari
class AtariEnv(Env): # low dimensional observations
def __init__(self, **kwargs):
super(AtariEnv, self).__init__(**kwargs)
self.env_type = 'atari'
self.game = kwargs.get('game', 'Breakout-v0')
self.env = gym.make(self.game)
self.env.seed(self.seed) # NOTE: so each env would be different
# state & action space setup
self.actions = kwargs.get('actions', range(self.action_dim))
self.scale_factor = kwargs.get('scale_factor', 2)
self.preprocess_mode = kwargs.get('preprocess_mode', 0)
self.reset()
self.logger.info("Action Space: %s", self.actions)
self.logger.info("State Space: %s", self.state_shape)
self.last_lives = 0
# atari POMDP
self.pomdp_mask = np.ones((1, *self.state_shape[1:]))
hide_pixels = int(self.state_shape[1] * (1 - self.pomdp_prob))
low, high = int((self.state_shape[1] - hide_pixels) / 2), int((self.state_shape[1] + hide_pixels) / 2)
self.pomdp_mask[:, low:high, :] = 0
def render(self):
if self.mode == 2:
frame = self.env.render(mode='rgb_array')
frame_name = join(self.img_dir, "frame_%04d.jpg" % self.frame_idx)
self.imsave(frame_name, frame)
self.frame_idx += 1
return frame
else:
return self.env.render()
def visual(self):
pass
def sample_random_action(self):
return self.env.action_space.sample()
def reset(self):
self._reset_experience()
self.seq_state1.append(preprocessAtari(self.env.reset()))
self.last_lives = 0
self.episode_ended = False
return self._get_experience()
def step(self, action):
self.exp_action = self.actions[action]
self.exp_state1, self.exp_reward, terminal, info = self.env.step(self.exp_action)
# augmenting telling lost life is bad
self.episode_ended = terminal
if info['ale.lives'] < self.last_lives:
self.exp_terminal1 = True
else:
self.exp_terminal1 = terminal
self.last_lives = info['ale.lives']
self.exp_state1 = preprocessAtari(self.exp_state1)
if self.pomdp:
if self.pomdp_type == 'flickering':
if np.random.rand() > self.pomdp_prob:
self.exp_state1 = np.zeros(self.exp_state1.shape)
elif self.pomdp_type == 'delete_dim':
self.exp_state1 = np.array(self.exp_state1) * self.pomdp_mask
self.seq_state0.append(self.seq_state1[0])
self.seq_state1.append(self.exp_state1)
return self._get_experience()
@property
def state_shape(self):
return self.seq_state1[0].shape
| [
"numpy.ones",
"numpy.random.rand",
"os.path.join",
"numpy.array",
"numpy.zeros",
"src.utils.helpers.preprocessAtari",
"gym.make"
] | [((484, 503), 'gym.make', 'gym.make', (['self.game'], {}), '(self.game)\n', (492, 503), False, 'import gym\n'), ((1025, 1060), 'numpy.ones', 'np.ones', (['(1, *self.state_shape[1:])'], {}), '((1, *self.state_shape[1:]))\n', (1032, 1060), True, 'import numpy as np\n'), ((2424, 2456), 'src.utils.helpers.preprocessAtari', 'preprocessAtari', (['self.exp_state1'], {}), '(self.exp_state1)\n', (2439, 2456), False, 'from src.utils.helpers import preprocessAtari\n'), ((1416, 1469), 'os.path.join', 'join', (['self.img_dir', "('frame_%04d.jpg' % self.frame_idx)"], {}), "(self.img_dir, 'frame_%04d.jpg' % self.frame_idx)\n", (1420, 1469), False, 'from os.path import join\n'), ((2547, 2563), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2561, 2563), True, 'import numpy as np\n'), ((2621, 2652), 'numpy.zeros', 'np.zeros', (['self.exp_state1.shape'], {}), '(self.exp_state1.shape)\n', (2629, 2652), True, 'import numpy as np\n'), ((2737, 2762), 'numpy.array', 'np.array', (['self.exp_state1'], {}), '(self.exp_state1)\n', (2745, 2762), True, 'import numpy as np\n')] |
import random
import numpy as np
import tensorflow as tf
Normal = tf.contrib.distributions.Normal
np.random.seed(0)
tf.set_random_seed(0)
class VariationalAutoencoder(object):
#"VAE implementation is based on the implementation from McCoy, J.T.,et al."
#https://www-sciencedirect-com.stanford.idm.oclc.org/science/article/pii/S2405896318320949"
def __init__(self, network_architecture, transfer_fct=tf.nn.relu,
learning_rate=0.001, batch_size=100, istrain=True, restore_path=None, beta=1):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
self.batch_size = batch_size
self.beta=beta
self.x = tf.placeholder(tf.float32, [None, network_architecture["n_input"]])
self._create_network()
self._create_loss_optimizer()
self.saver = tf.train.Saver()
init = tf.global_variables_initializer()
if istrain:
self.sess = tf.InteractiveSession()
self.sess.run(init)
else:
self.sess=tf.Session()
self.saver.restore(self.sess, restore_path)
def _create_network(self):
network_weights = self._initialize_weights(**self.network_architecture)
self.z_mean, self.z_log_sigma_sq = \
self._recognition_network(network_weights["weights_recog"],
network_weights["biases_recog"])
eps = tf.random_normal(tf.shape(self.z_mean), 0, 1,
dtype=tf.float32)
self.z = tf.add(self.z_mean,
tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
self.x_hat_mean, self.x_hat_log_sigma_sq = \
self._generator_network(network_weights["weights_gener"],
network_weights["biases_gener"])
def _initialize_weights(self, n_hidden_recog_1, n_hidden_recog_2,
n_hidden_gener_1, n_hidden_gener_2,
n_input, n_z):
all_weights = dict()
all_weights['weights_recog'] = {
'h1': tf.Variable(xavier_init(n_input, n_hidden_recog_1)),
'h2': tf.Variable(xavier_init(n_hidden_recog_1, n_hidden_recog_2)),
'out_mean': tf.Variable(xavier_init(n_hidden_recog_2, n_z)),
'out_log_sigma': tf.Variable(xavier_init(n_hidden_recog_2, n_z))}
all_weights['biases_recog'] = {
'b1': tf.Variable(tf.zeros([n_hidden_recog_1], dtype=tf.float32)),
'b2': tf.Variable(tf.zeros([n_hidden_recog_2], dtype=tf.float32)),
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32)),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32))}
all_weights['weights_gener'] = {
'h1': tf.Variable(xavier_init(n_z, n_hidden_gener_1)),
'h2': tf.Variable(xavier_init(n_hidden_gener_1, n_hidden_gener_2)),
'out_mean': tf.Variable(xavier_init(n_hidden_gener_2, n_input)),
'out_log_sigma': tf.Variable(xavier_init(n_hidden_gener_2, n_input))}
all_weights['biases_gener'] = {
'b1': tf.Variable(tf.zeros([n_hidden_gener_1], dtype=tf.float32)),
'b2': tf.Variable(tf.zeros([n_hidden_gener_2], dtype=tf.float32)),
'out_mean': tf.Variable(tf.zeros([n_input], dtype=tf.float32)),
'out_log_sigma': tf.Variable(tf.zeros([n_input], dtype=tf.float32))}
return all_weights
def _recognition_network(self, weights, biases):
layer_1 = self.transfer_fct(tf.add(tf.matmul(self.x, weights['h1']),
biases['b1']))
layer_2 = self.transfer_fct(tf.add(tf.matmul(layer_1, weights['h2']),
biases['b2']))
z_mean = tf.add(tf.matmul(layer_2, weights['out_mean']),
biases['out_mean'])
z_log_sigma_sq = \
tf.add(tf.matmul(layer_2, weights['out_log_sigma']),
biases['out_log_sigma'])
return (z_mean, z_log_sigma_sq)
def _generator_network(self, weights, biases):
layer_1 = self.transfer_fct(tf.add(tf.matmul(self.z, weights['h1']),
biases['b1']))
layer_2 = self.transfer_fct(tf.add(tf.matmul(layer_1, weights['h2']),
biases['b2']))
x_hat_mean = tf.add(tf.matmul(layer_2, weights['out_mean']),
biases['out_mean'])
x_hat_log_sigma_sq = \
tf.add(tf.matmul(layer_2, weights['out_log_sigma']),
biases['out_log_sigma'])
return (x_hat_mean, x_hat_log_sigma_sq)
def _create_loss_optimizer(self):
X_hat_distribution = Normal(loc=self.x_hat_mean,
scale=tf.exp(self.x_hat_log_sigma_sq))
reconstr_loss = \
-tf.reduce_sum(X_hat_distribution.log_prob(self.x), 1)
latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq
- tf.square(self.z_mean)
- tf.exp(self.z_log_sigma_sq), 1)
self.cost = tf.reduce_mean(reconstr_loss + self.beta *latent_loss) # average over batch
self.latent_cost=self.beta *latent_loss
self.optimizer = \
tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost)
def fit(self, data):
opt, cost = self.sess.run((self.optimizer, self.cost),
feed_dict={self.x: data})
return cost
def inspect_latent_cost (self, data):
lc = self.sess.run(self.latent_cost,
feed_dict={self.x: data})
return lc
def transform_feature(self, data):
return self.sess.run(self.z_mean, feed_dict={self.x: data})
def reconstruct(self, data, sample = 'mean'):
if sample == 'sample':
x_hat_mu, x_hat_logsigsq = self.sess.run((self.x_hat_mean, self.x_hat_log_sigma_sq),
feed_dict={self.x: data})
eps = tf.random_normal(tf.shape(data), 1,
dtype=tf.float32)
x_hat = tf.add(x_hat_mu,
tf.multiply(tf.sqrt(tf.exp(x_hat_logsigsq)), eps))
x_hat = x_hat.eval()
else:
x_hat_mu = self.sess.run(self.x_hat_mean,
feed_dict={self.x: data})
x_hat = x_hat_mu
return x_hat
def impute(self, data_corrupt, max_iter = 10):
""" Use VAE to impute missing values in data_corrupt. Missing values
are indicated by a NaN.
"""
missing_row_ind = np.where(np.isnan(np.sum(data_corrupt,axis=1)))
data_miss_val = data_corrupt[missing_row_ind[0],:]
na_ind= np.where(np.isnan(data_miss_val))
data_miss_val[na_ind] = 0
for i in range(max_iter):
data_reconstruct = self.reconstruct(data_miss_val)
data_miss_val[na_ind] = data_reconstruct[na_ind]
data_corrupt[missing_row_ind,:] = data_miss_val
data_imputed = data_corrupt
return data_imputed
def train(self, data, training_epochs=10, display_step=10):
missing_row_ind = np.where(np.isnan(np.sum(data,axis=1)))
n_samples = np.size(data, 0) - missing_row_ind[0].shape[0]
losshistory = []
losshistory_epoch = []
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(n_samples / self.batch_size)
for i in range(total_batch):
batch_xs = next_batch(data,self.batch_size)
cost = self.fit(batch_xs)
lc = self.inspect_latent_cost(batch_xs)
avg_cost += cost / n_samples * self.batch_size
if epoch % display_step == 0:
losshistory_epoch.append(epoch)
losshistory.append(-avg_cost)
print(f'Epoch: {epoch+1:.4f} Cost= {avg_cost:.9f}')
#print (lc)
self.losshistory = losshistory
self.losshistory_epoch = losshistory_epoch
return self
def next_batch(data,batch_size):
non_missing_row_ind = np.where(np.isfinite(np.sum(data,axis=1)))
sample_ind = random.sample(list(non_missing_row_ind[0]),batch_size)
data_sample = np.copy(data[sample_ind,:])
return data_sample
def xavier_init(fan_in, fan_out, constant=1):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
| [
"numpy.sqrt",
"tensorflow.shape",
"tensorflow.reduce_mean",
"tensorflow.set_random_seed",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.random.seed",
"tensorflow.matmul",
"tensorflow.square",
"tensorflow.train.AdamOptimizer",
"tensorflow.zeros",
"tensorflow.InteractiveSession",
"num... | [((98, 115), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (112, 115), True, 'import numpy as np\n'), ((116, 137), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(0)'], {}), '(0)\n', (134, 137), True, 'import tensorflow as tf\n'), ((8734, 8762), 'numpy.copy', 'np.copy', (['data[sample_ind, :]'], {}), '(data[sample_ind, :])\n', (8741, 8762), True, 'import numpy as np\n'), ((9103, 9182), 'tensorflow.random_uniform', 'tf.random_uniform', (['(fan_in, fan_out)'], {'minval': 'low', 'maxval': 'high', 'dtype': 'tf.float32'}), '((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32)\n', (9120, 9182), True, 'import tensorflow as tf\n'), ((743, 810), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', "[None, network_architecture['n_input']]"], {}), "(tf.float32, [None, network_architecture['n_input']])\n", (757, 810), True, 'import tensorflow as tf\n'), ((928, 944), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (942, 944), True, 'import tensorflow as tf\n'), ((961, 994), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (992, 994), True, 'import tensorflow as tf\n'), ((5394, 5449), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(reconstr_loss + self.beta * latent_loss)'], {}), '(reconstr_loss + self.beta * latent_loss)\n', (5408, 5449), True, 'import tensorflow as tf\n'), ((9007, 9040), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (fan_in + fan_out))'], {}), '(6.0 / (fan_in + fan_out))\n', (9014, 9040), True, 'import numpy as np\n'), ((9060, 9093), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (fan_in + fan_out))'], {}), '(6.0 / (fan_in + fan_out))\n', (9067, 9093), True, 'import numpy as np\n'), ((1048, 1071), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (1069, 1071), True, 'import tensorflow as tf\n'), ((1140, 1152), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1150, 1152), True, 'import tensorflow as tf\n'), ((1560, 1581), 'tensorflow.shape', 'tf.shape', (['self.z_mean'], {}), '(self.z_mean)\n', (1568, 1581), True, 'import tensorflow as tf\n'), ((3958, 3997), 'tensorflow.matmul', 'tf.matmul', (['layer_2', "weights['out_mean']"], {}), "(layer_2, weights['out_mean'])\n", (3967, 3997), True, 'import tensorflow as tf\n'), ((4089, 4133), 'tensorflow.matmul', 'tf.matmul', (['layer_2', "weights['out_log_sigma']"], {}), "(layer_2, weights['out_log_sigma'])\n", (4098, 4133), True, 'import tensorflow as tf\n'), ((4580, 4619), 'tensorflow.matmul', 'tf.matmul', (['layer_2', "weights['out_mean']"], {}), "(layer_2, weights['out_mean'])\n", (4589, 4619), True, 'import tensorflow as tf\n'), ((4715, 4759), 'tensorflow.matmul', 'tf.matmul', (['layer_2', "weights['out_log_sigma']"], {}), "(layer_2, weights['out_log_sigma'])\n", (4724, 4759), True, 'import tensorflow as tf\n'), ((7149, 7172), 'numpy.isnan', 'np.isnan', (['data_miss_val'], {}), '(data_miss_val)\n', (7157, 7172), True, 'import numpy as np\n'), ((7679, 7695), 'numpy.size', 'np.size', (['data', '(0)'], {}), '(data, 0)\n', (7686, 7695), True, 'import numpy as np\n'), ((8622, 8642), 'numpy.sum', 'np.sum', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (8628, 8642), True, 'import numpy as np\n'), ((2586, 2632), 'tensorflow.zeros', 'tf.zeros', (['[n_hidden_recog_1]'], {'dtype': 'tf.float32'}), '([n_hidden_recog_1], dtype=tf.float32)\n', (2594, 2632), True, 'import tensorflow as tf\n'), ((2665, 2711), 'tensorflow.zeros', 'tf.zeros', (['[n_hidden_recog_2]'], {'dtype': 'tf.float32'}), '([n_hidden_recog_2], dtype=tf.float32)\n', (2673, 2711), True, 'import tensorflow as tf\n'), ((2750, 2783), 'tensorflow.zeros', 'tf.zeros', (['[n_z]'], {'dtype': 'tf.float32'}), '([n_z], dtype=tf.float32)\n', (2758, 2783), True, 'import tensorflow as tf\n'), ((2827, 2860), 'tensorflow.zeros', 'tf.zeros', (['[n_z]'], {'dtype': 'tf.float32'}), '([n_z], dtype=tf.float32)\n', (2835, 2860), True, 'import tensorflow as tf\n'), ((3280, 3326), 'tensorflow.zeros', 'tf.zeros', (['[n_hidden_gener_1]'], {'dtype': 'tf.float32'}), '([n_hidden_gener_1], dtype=tf.float32)\n', (3288, 3326), True, 'import tensorflow as tf\n'), ((3359, 3405), 'tensorflow.zeros', 'tf.zeros', (['[n_hidden_gener_2]'], {'dtype': 'tf.float32'}), '([n_hidden_gener_2], dtype=tf.float32)\n', (3367, 3405), True, 'import tensorflow as tf\n'), ((3444, 3481), 'tensorflow.zeros', 'tf.zeros', (['[n_input]'], {'dtype': 'tf.float32'}), '([n_input], dtype=tf.float32)\n', (3452, 3481), True, 'import tensorflow as tf\n'), ((3525, 3562), 'tensorflow.zeros', 'tf.zeros', (['[n_input]'], {'dtype': 'tf.float32'}), '([n_input], dtype=tf.float32)\n', (3533, 3562), True, 'import tensorflow as tf\n'), ((3702, 3734), 'tensorflow.matmul', 'tf.matmul', (['self.x', "weights['h1']"], {}), "(self.x, weights['h1'])\n", (3711, 3734), True, 'import tensorflow as tf\n'), ((3839, 3872), 'tensorflow.matmul', 'tf.matmul', (['layer_1', "weights['h2']"], {}), "(layer_1, weights['h2'])\n", (3848, 3872), True, 'import tensorflow as tf\n'), ((4320, 4352), 'tensorflow.matmul', 'tf.matmul', (['self.z', "weights['h1']"], {}), "(self.z, weights['h1'])\n", (4329, 4352), True, 'import tensorflow as tf\n'), ((4457, 4490), 'tensorflow.matmul', 'tf.matmul', (['layer_1', "weights['h2']"], {}), "(layer_1, weights['h2'])\n", (4466, 4490), True, 'import tensorflow as tf\n'), ((5022, 5053), 'tensorflow.exp', 'tf.exp', (['self.x_hat_log_sigma_sq'], {}), '(self.x_hat_log_sigma_sq)\n', (5028, 5053), True, 'import tensorflow as tf\n'), ((5568, 5624), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (5590, 5624), True, 'import tensorflow as tf\n'), ((6401, 6415), 'tensorflow.shape', 'tf.shape', (['data'], {}), '(data)\n', (6409, 6415), True, 'import tensorflow as tf\n'), ((7026, 7054), 'numpy.sum', 'np.sum', (['data_corrupt'], {'axis': '(1)'}), '(data_corrupt, axis=1)\n', (7032, 7054), True, 'import numpy as np\n'), ((7637, 7657), 'numpy.sum', 'np.sum', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (7643, 7657), True, 'import numpy as np\n'), ((1722, 1749), 'tensorflow.exp', 'tf.exp', (['self.z_log_sigma_sq'], {}), '(self.z_log_sigma_sq)\n', (1728, 1749), True, 'import tensorflow as tf\n'), ((5342, 5369), 'tensorflow.exp', 'tf.exp', (['self.z_log_sigma_sq'], {}), '(self.z_log_sigma_sq)\n', (5348, 5369), True, 'import tensorflow as tf\n'), ((5273, 5295), 'tensorflow.square', 'tf.square', (['self.z_mean'], {}), '(self.z_mean)\n', (5282, 5295), True, 'import tensorflow as tf\n'), ((6553, 6575), 'tensorflow.exp', 'tf.exp', (['x_hat_logsigsq'], {}), '(x_hat_logsigsq)\n', (6559, 6575), True, 'import tensorflow as tf\n')] |
from __future__ import absolute_import, division
from psychopy import locale_setup
from psychopy import prefs
from psychopy import sound, gui, visual, core, data, event, logging, clock
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np
import pandas as pd
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os
import sys
import time
from psychopy.hardware import keyboard
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
expName = 'cartoon' # from the Builder filename that created this script
expInfo = {'participant': '', '': ''}
dlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])
frameTolerance = 0.001 # how close to onset before 'same' frame
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(
size=[1440, 900], fullscr=True, screen=0,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[-1.000,-1.000,-1.000], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='cm')
win.monitor.setSizePix([2560, 1600])
win.monitor.setWidth(33.3)
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
# Initialize components for Routine "Consent"
ConsentClock = core.Clock()
ConsentPage = visual.ImageStim(
win=win,
name='ConsentPage',
image=_thisDir + '/images/Consent.jpeg', mask=None,
ori=0, pos=(0, 0), size=(10.2, 10.0),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False, units="cm",
texRes=128, interpolate=True, depth=0.0)
ConsentResp = keyboard.Keyboard()
# Initialize components for Routine "Break"
BreakClock = core.Clock()
blankscreen = visual.TextStim(win=win, name='blankscreen',
text=None,
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR', units="cm",
depth=0.0);
# Initialize components for Routine "Instructions1"
Instructions1Clock = core.Clock()
textInstructions = visual.TextStim(win=win, name='textInstructions',
text='Task Instructions\n\nIn this experiment, you will switch back and forth between 2 games.\n\n In the first game, press X for letters c & o and N for letters i & l.\n\nIn the second game, press X for letters d & b and N for letters q & p.\n\nYou will switch between both games 4 times.\n\nYou will be reminded of the instructions each time you switch!\n\n The task takes less than 20 minutes! \n\n Press SPACEBAR for more instructions!',
font='Arial',
pos=(0, 0), height=0.5, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR', units="cm",
depth=0.0);
textInstructionsResp = keyboard.Keyboard()
# Initialize components for Routine "Break"
BreakClock = core.Clock()
blankscreen = visual.TextStim(win=win, name='blankscreen',
text=None,
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR', units="cm",
depth=0.0);
# Initialize components for Routine "Instructions2"
Instructions2Clock = core.Clock()
imgInstructions = visual.ImageStim(
win=win,
name='imgInstructions',
image=_thisDir + '/images/Instructions.jpeg', mask=None,
ori=0, pos=(0, 0), size=(11.0, 11.0),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False, units="cm",
texRes=128, interpolate=True, depth=0.0)
imgInstructionsResp = keyboard.Keyboard()
# Initialize components for Routine "Break"
BreakClock = core.Clock()
blankscreen = visual.TextStim(win=win, name='blankscreen',
text=None,
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR', units="cm",
depth=0.0);
# Initialize components for Routine "PracticeWarning"
PracticeWarningClock = core.Clock()
PracticeText = visual.TextStim(win=win, name='PracticeText',
text='PRESS SPACEBAR TO BEGIN PRACTICE!\n\n Remember: \n\n Game 1: Press X for c & o and N for i & l\n\n Game 2: Press X for d & b and N for q & p',
font='Arial',
pos=(0, 0), height=0.5, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR', units="cm",
depth=0.0);
PracticeTextResp = keyboard.Keyboard()
PracticeClock = core.Clock()
# Initialize components for Routine "ReminderLow"
ReminderLowClock = core.Clock()
imageReminderLow = visual.ImageStim(
win=win,
name='imageReminderLow',
image=_thisDir + '/images/LowReminder.jpeg', mask=None,
ori=0, pos=(0, 0), size=(6.0, 4.0),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False, units="cm",
texRes=128, interpolate=True, depth=0.0)
ReminderLow_Resp = keyboard.Keyboard()
# Initialize components for Routine "ReminderHigh"
ReminderHighClock = core.Clock()
imageReminderHigh = visual.ImageStim(
win=win,
name='imageReminderHigh',
image=_thisDir + '/images/HighReminder.jpeg', mask=None,
ori=0, pos=(0, 0), size=(6.0, 4.0),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False, units="cm",
texRes=128, interpolate=True, depth=0.0)
ReminderHigh_Resp = keyboard.Keyboard()
# Initialize components for Routine "LowLoad1"
def item_generator(name, pos, win):
if name == "circle_situator":
h=1.5
else:
h=0.5
item = visual.TextStim(
win=win, name=name,
text='default text',
font='Arial',
pos=pos, height=h, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR', units="cm",
depth=0.0)
return item
LowLoad1Clock = core.Clock()
item1 = item_generator("item1", (0, 4.1), win)
item1_resp = keyboard.Keyboard()
item2 = item_generator("item2", (1.905, 3.5), win)
item3 = item_generator("item3", (3.5, 2.2), win)
item4 = item_generator("item4", (4.2, 0), win)
item5 = item_generator("item5", (3.5, -2.2), win)
item6 = item_generator("item6", (1.905, -3.5), win)
item7 = item_generator("item7", (0, -4.1), win)
item8 = item_generator("item8", (-1.905, -3.5), win)
item9 = item_generator("item9", (-3.5, -2.2), win)
item10 = item_generator("item10", (-4.2, 0), win)
item11 = item_generator("item11", (-3.5, 2.2), win)
item12 = item_generator("item12", (-1.905, 3.5), win)
item13 = item_generator("circle_situator", (0, 4.1), win)
# Initialize components for Routine "Break"
BreakClock = core.Clock()
blankscreen = visual.TextStim(win=win, name='blankscreen',
text=None,
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR', units="cm",
depth=0.0);
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "Consent"-------
continueRoutine = True
# update component parameters for each repeat
ConsentResp.keys = []
ConsentResp.rt = []
_ConsentResp_allKeys = []
# keep track of which components have finished
ConsentComponents = [ConsentPage, ConsentResp]
for thisComponent in ConsentComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
ConsentClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Consent"-------
while continueRoutine:
# get current time
t = ConsentClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=ConsentClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *ConsentPage* updates
if ConsentPage.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
ConsentPage.frameNStart = frameN # exact frame index
ConsentPage.tStart = t # local t and not account for scr refresh
ConsentPage.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(ConsentPage, 'tStartRefresh') # time at next scr refresh
ConsentPage.setAutoDraw(True)
# *ConsentResp* updates
waitOnFlip = False
if ConsentResp.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
ConsentResp.frameNStart = frameN # exact frame index
ConsentResp.tStart = t # local t and not account for scr refresh
ConsentResp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(ConsentResp, 'tStartRefresh') # time at next scr refresh
ConsentResp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(ConsentResp.clock.reset) # t=0 on next screen flip
win.callOnFlip(ConsentResp.clearEvents, eventType='keyboard') # clear events on next screen flip
if ConsentResp.status == STARTED and not waitOnFlip:
theseKeys = ConsentResp.getKeys(keyList=['space'], waitRelease=False)
_ConsentResp_allKeys.extend(theseKeys)
if len(_ConsentResp_allKeys):
ConsentResp.keys = _ConsentResp_allKeys[-1].name # just the last key pressed
ConsentResp.rt = _ConsentResp_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in ConsentComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Consent"-------
for thisComponent in ConsentComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if ConsentResp.keys in ['', [], None]: # No response was made
ConsentResp.keys = None
# the Routine "Consent" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "Break"-------
continueRoutine = True
routineTimer.add(0.500000)
# update component parameters for each repeat
# keep track of which components have finished
BreakComponents = [blankscreen]
for thisComponent in BreakComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
BreakClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Break"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = BreakClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=BreakClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *blankscreen* updates
if blankscreen.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
blankscreen.frameNStart = frameN # exact frame index
blankscreen.tStart = t # local t and not account for scr refresh
blankscreen.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(blankscreen, 'tStartRefresh') # time at next scr refresh
blankscreen.setAutoDraw(True)
if blankscreen.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > blankscreen.tStartRefresh + 0.5-frameTolerance:
# keep track of stop time/frame for later
blankscreen.tStop = t # not accounting for scr refresh
blankscreen.frameNStop = frameN # exact frame index
win.timeOnFlip(blankscreen, 'tStopRefresh') # time at next scr refresh
blankscreen.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in BreakComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Break"-------
for thisComponent in BreakComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# ------Prepare to start Routine "Instructions1"-------
continueRoutine = True
# update component parameters for each repeat
textInstructionsResp.keys = []
textInstructionsResp.rt = []
_textInstructionsResp_allKeys = []
# keep track of which components have finished
Instructions1Components = [textInstructions, textInstructionsResp]
for thisComponent in Instructions1Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
Instructions1Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Instructions1"-------
while continueRoutine:
# get current time
t = Instructions1Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=Instructions1Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *textInstructions* updates
if textInstructions.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
textInstructions.frameNStart = frameN # exact frame index
textInstructions.tStart = t # local t and not account for scr refresh
textInstructions.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(textInstructions, 'tStartRefresh') # time at next scr refresh
textInstructions.setAutoDraw(True)
# *textInstructionsResp* updates
waitOnFlip = False
if textInstructionsResp.status == NOT_STARTED and tThisFlip >= 0-frameTolerance:
# keep track of start time/frame for later
textInstructionsResp.frameNStart = frameN # exact frame index
textInstructionsResp.tStart = t # local t and not account for scr refresh
textInstructionsResp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(textInstructionsResp, 'tStartRefresh') # time at next scr refresh
textInstructionsResp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(textInstructionsResp.clock.reset) # t=0 on next screen flip
win.callOnFlip(textInstructionsResp.clearEvents, eventType='keyboard') # clear events on next screen flip
if textInstructionsResp.status == STARTED and not waitOnFlip:
theseKeys = textInstructionsResp.getKeys(keyList=['space'], waitRelease=False)
_textInstructionsResp_allKeys.extend(theseKeys)
if len(_textInstructionsResp_allKeys):
textInstructionsResp.keys = _textInstructionsResp_allKeys[-1].name # just the last key pressed
textInstructionsResp.rt = _textInstructionsResp_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in Instructions1Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Instructions1"-------
for thisComponent in Instructions1Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "Instructions" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "Break"-------
continueRoutine = True
routineTimer.add(0.500000)
# update component parameters for each repeat
# keep track of which components have finished
BreakComponents = [blankscreen]
for thisComponent in BreakComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
BreakClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Break"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = BreakClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=BreakClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *blankscreen* updates
if blankscreen.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
blankscreen.frameNStart = frameN # exact frame index
blankscreen.tStart = t # local t and not account for scr refresh
blankscreen.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(blankscreen, 'tStartRefresh') # time at next scr refresh
blankscreen.setAutoDraw(True)
if blankscreen.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > blankscreen.tStartRefresh + 0.5-frameTolerance:
# keep track of stop time/frame for later
blankscreen.tStop = t # not accounting for scr refresh
blankscreen.frameNStop = frameN # exact frame index
win.timeOnFlip(blankscreen, 'tStopRefresh') # time at next scr refresh
blankscreen.setAutoDraw(False)
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in BreakComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Break"-------
for thisComponent in BreakComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# ------Prepare to start Routine "Instructions2"-------
continueRoutine = True
# update component parameters for each repeat
imgInstructionsResp.keys = []
imgInstructionsResp.rt = []
_imgInstructionsResp_allKeys = []
# keep track of which components have finished
Instructions2Components = [imgInstructions, imgInstructionsResp]
for thisComponent in Instructions2Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
Instructions2Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Instructions2"-------
while continueRoutine:
# get current time
t = Instructions2Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=Instructions2Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *imgInstructions* updates
if imgInstructions.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
imgInstructions.frameNStart = frameN # exact frame index
imgInstructions.tStart = t # local t and not account for scr refresh
imgInstructions.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(imgInstructions, 'tStartRefresh') # time at next scr refresh
imgInstructions.setAutoDraw(True)
# *imgInstructionsResp* updates
waitOnFlip = False
if imgInstructionsResp.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
imgInstructionsResp.frameNStart = frameN # exact frame index
imgInstructionsResp.tStart = t # local t and not account for scr refresh
imgInstructionsResp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(imgInstructionsResp, 'tStartRefresh') # time at next scr refresh
imgInstructionsResp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(imgInstructionsResp.clock.reset) # t=0 on next screen flip
win.callOnFlip(imgInstructionsResp.clearEvents, eventType='keyboard') # clear events on next screen flip
if imgInstructionsResp.status == STARTED and not waitOnFlip:
theseKeys = imgInstructionsResp.getKeys(keyList=['space'], waitRelease=False)
_imgInstructionsResp_allKeys.extend(theseKeys)
if len(_imgInstructionsResp_allKeys):
imgInstructionsResp.keys = _imgInstructionsResp_allKeys[-1].name # just the last key pressed
imgInstructionsResp.rt = _imgInstructionsResp_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in Instructions2Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Instructions2"-------
for thisComponent in Instructions2Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if imgInstructionsResp.keys in ['', [], None]: # No response was made
imgInstructionsResp.keys = None
# the Routine "imgInstructions" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "PracticeWarning"-------
continueRoutine = True
# update component parameters for each repeat
PracticeTextResp.keys = []
PracticeTextResp.rt = []
_PracticeTextResp_allKeys = []
# keep track of which components have finished
PracticeWarningComponents = [PracticeText, PracticeTextResp]
for thisComponent in PracticeWarningComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
PracticeWarningClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Practice Warning"-------
while continueRoutine:
# get current time
t = PracticeWarningClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=PracticeWarningClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *PracticeText* updates
if PracticeText.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
PracticeText.frameNStart = frameN # exact frame index
PracticeText.tStart = t # local t and not account for scr refresh
PracticeText.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(PracticeText, 'tStartRefresh') # time at next scr refresh
PracticeText.setAutoDraw(True)
# *PracticeTextResp* updates
waitOnFlip = False
if PracticeTextResp.status == NOT_STARTED and tThisFlip >= 0-frameTolerance:
# keep track of start time/frame for later
PracticeTextResp.frameNStart = frameN # exact frame index
PracticeTextResp.tStart = t # local t and not account for scr refresh
PracticeTextResp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(PracticeTextResp, 'tStartRefresh') # time at next scr refresh
PracticeTextResp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(PracticeTextResp.clock.reset) # t=0 on next screen flip
win.callOnFlip(PracticeTextResp.clearEvents, eventType='keyboard') # clear events on next screen flip
if PracticeTextResp.status == STARTED and not waitOnFlip:
theseKeys = PracticeTextResp.getKeys(keyList=['space'], waitRelease=False)
_PracticeTextResp_allKeys.extend(theseKeys)
if len(_PracticeTextResp_allKeys):
PracticeTextResp.keys = _PracticeTextResp_allKeys[-1].name # just the last key pressed
PracticeTextResp.rt = _PracticeTextResp_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in PracticeWarningComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Practice Warning"-------
for thisComponent in PracticeWarningComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "Instructions" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
#----------------------------PRACTICE TRIALS----------------------------
#Parameters
npractrials = 2
npracblocks = 2
#Lists
pracresponses_list = []
pracreaction_list = []
praccorrect_list = []
for i in range(npracblocks):
if i % 2 == 0:
letters = ["c", "o", "i", "l"]
else:
letters = ["p", "d", "q", "b"]
for tr in range(npractrials):
low_letters = np.random.choice(letters, size=12)
#-----Prepare to start Routine "PRACTICE"-----
continueRoutine = True
#update component parameters for each repeat
item1.setText(low_letters[0])
item1_resp.keys = []
item1_resp.rt = []
_item1_resp_allKeys = []
item2.setText(low_letters[1])
item3.setText(low_letters[2])
item4.setText(low_letters[3])
item5.setText(low_letters[4])
item6.setText(low_letters[5])
item7.setText(low_letters[6])
item8.setText(low_letters[7])
item9.setText(low_letters[8])
item10.setText(low_letters[9])
item11.setText(low_letters[10])
item12.setText(low_letters[11])
item13.setText("O")
#keep track of which components have finished
PracticeComponents = [
item1, item1_resp
]
for thisComponent in PracticeComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
#reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
PracticeClock.reset(-_timeToFirstFrame) #t0 is time of first possible flip
frameN = -1
#--------Run Routine "Practice"--------
while continueRoutine:
#get current time
t = PracticeClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=PracticeClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 #number of completed frames (so 0 is the first frame)
#updaate/draw components on each frame
#--------item1--------
#*item1* updates
if item1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
#keep track of start time/frame for later
item1.frameNStart = frameN #exact frame index
item1.tStart = t #local t and not account for scr refresh
item1.tStartRefresh = tThisFlipGlobal #on global time
win.timeOnFlip(item1, 'tStartRefresh') #time at next scr refresh
item1.setAutoDraw(True)
#*item1_resp* updates
waitOnFlip = False
if item1_resp.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
#keep track of start time/frame for later
item1_resp.frameNStart = frameN #exact frame index
item1_resp.tStart = t #local t and not account for scr refresh
item1_resp.tStartRefresh = tThisFlipGlobal #on global time
win.timeOnFlip(item1_resp, 'tStartRefresh') #time at next scr refresh
item1_resp.status = STARTED
#keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(item1_resp.clock.reset) # t=0 on next screen flip
if item1_resp.status == STARTED and not waitOnFlip:
theseKeys = item1_resp.getKeys(keyList=['x', 'n'], waitRelease=False)
_item1_resp_allKeys.extend(theseKeys)
if len(_item1_resp_allKeys):
item1_resp.keys = [key.name for key in _item1_resp_allKeys] #storing all keys
item1_resp.rt = [key.rt for key in _item1_resp_allKeys]
#--------item 2 through 12--------
item2.setAutoDraw(True)
item3.setAutoDraw(True)
item4.setAutoDraw(True)
item5.setAutoDraw(True)
item6.setAutoDraw(True)
item7.setAutoDraw(True)
item8.setAutoDraw(True)
item9.setAutoDraw(True)
item10.setAutoDraw(True)
item11.setAutoDraw(True)
item12.setAutoDraw(True)
item13.setAutoDraw(True)
#--------------------------------------------------------------------
if len(item1_resp.keys) == 12:
item1.setAutoDraw(False)
item2.setAutoDraw(False)
item3.setAutoDraw(False)
item4.setAutoDraw(False)
item5.setAutoDraw(False)
item6.setAutoDraw(False)
item7.setAutoDraw(False)
item8.setAutoDraw(False)
item9.setAutoDraw(False)
item10.setAutoDraw(False)
item11.setAutoDraw(False)
item12.setAutoDraw(False)
item13.setAutoDraw(False)
win.flip()
continueRoutine = False
else:
continueRoutine = True
#refresh the screen
if continueRoutine: #don't flip if this routine is over or we'll get blank screen
win.flip()
if i % 2 == 0:
correct_response = [
"x" if r == "c" or r == "o" else "n"
for r in low_letters
]
else:
correct_response = [
"x" if r == "d" or r == "b" else "n"
for r in low_letters
]
correct = np.array(correct_response) == np.array(item1_resp.keys)
pracresponses_list.append(item1_resp.keys)
pracreaction_list.append(item1_resp.rt)
praccorrect_list.append(correct)
time.sleep(1)
pracreaction_list = np.diff(pracreaction_list).tolist()
pracresults = pd.DataFrame(
data = {
"block": [n+1 for n in range(npracblocks) for n1 in range (npractrials)],
"trials": [n%npractrials + 1 for n in range (npracblocks*npractrials)],
"Responses": pracresponses_list,
"Accuracy": praccorrect_list,
"Reaction Times": pracreaction_list
}
)
pracresults.to_csv(os.getcwd() + f"/pracresults_{expInfo['participant']}.csv")
item1.setAutoDraw(False)
item2.setAutoDraw(False)
item3.setAutoDraw(False)
item4.setAutoDraw(False)
item5.setAutoDraw(False)
item6.setAutoDraw(False)
item7.setAutoDraw(False)
item8.setAutoDraw(False)
item9.setAutoDraw(False)
item10.setAutoDraw(False)
item11.setAutoDraw(False)
item12.setAutoDraw(False)
item13.setAutoDraw(False)
#------------------------------------------------------------------------------------------
# ------Prepare to start Routine "ReminderLow"-------
continueRoutine = True
# update component parameters for each repeat
ReminderLow_Resp.keys = []
ReminderLow_Resp.rt = []
_ReminderLow_Resp_allKeys = []
# keep track of which components have finished
ReminderLowComponents = [imageReminderLow, ReminderLow_Resp]
for thisComponent in ReminderLowComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
ReminderLowClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "ReminderLow"-------
while continueRoutine:
# get current time
t = ReminderLowClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=ReminderLowClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *imageReminderLow* updates
if imageReminderLow.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
imageReminderLow.frameNStart = frameN # exact frame index
imageReminderLow.tStart = t # local t and not account for scr refresh
imageReminderLow.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(imageReminderLow, 'tStartRefresh') # time at next scr refresh
imageReminderLow.setAutoDraw(True)
# *ReminderLow_Resp* updates
waitOnFlip = False
if ReminderLow_Resp.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
ReminderLow_Resp.frameNStart = frameN # exact frame index
ReminderLow_Resp.tStart = t # local t and not account for scr refresh
ReminderLow_Resp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(ReminderLow_Resp, 'tStartRefresh') # time at next scr refresh
ReminderLow_Resp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(ReminderLow_Resp.clock.reset) # t=0 on next screen flip
win.callOnFlip(ReminderLow_Resp.clearEvents, eventType='keyboard') # clear events on next screen flip
if ReminderLow_Resp.status == STARTED and not waitOnFlip:
theseKeys = ReminderLow_Resp.getKeys(keyList=['space'], waitRelease=False)
_ReminderLow_Resp_allKeys.extend(theseKeys)
if len(_ReminderLow_Resp_allKeys):
ReminderLow_Resp.keys = _ReminderLow_Resp_allKeys[-1].name # just the last key pressed
ReminderLow_Resp.rt = _ReminderLow_Resp_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in ReminderLowComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "ReminderLow"-------
for thisComponent in ReminderLowComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
routineTimer.reset()
# ----------------------------------RUN EXPERIMENT TRIALS---------------------------------
######################## EXPERIMENT PARAMETERS ########################
ntrials = 20
nblocks = 2
#######################################################################
responses_list = []
reaction_list = []
correct_list = []
distractor_pos_list = []
def longest_rep(seq):
streak = 1
longest_streak = streak
for s in range(1, seq.shape[0]):
if seq[s] == seq[s-1]:
streak += 1
else:
streak = 1
longest_streak = max(longest_streak, streak)
return longest_streak
for i in range(nblocks):
if i % 2 == 0:
letters = ["c", "o", "i", "l"]
else:
letters = ["p", "d", "q", "b"]
distractor_trials = np.random.choice(
np.arange(2, 21, 1), size=6, replace=False
)
distractor_images = os.listdir(_thisDir + "/distractors")
np.random.shuffle(distractor_images)
d = 0
distractor_step_list = [3, 4, 5, 6, 7, 8]
np.random.shuffle(distractor_step_list)
for tr in range(ntrials):
low_letters = None
gen = True
while gen:
low_letters = np.random.choice(letters, size=12)
if longest_rep(low_letters) < 4:
gen = False
image = distractor_images[d]
distractor_image = visual.ImageStim(
win=win,
name='distractor',
image=_thisDir + f'/distractors/{image}', mask=None,
ori=0, pos=(0, 0), size=(2.0, 2.0),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False, units="cm",
texRes=128, interpolate=True, depth=0.0
)
distractor_step = [distractor_step_list[d]]
# ------Prepare to start Routine "LowLoad1"-------
continueRoutine = True
# update component parameters for each repeat
item1.setText(low_letters[0])
item1_resp.keys = []
item1_resp.rt = []
_item1_resp_allKeys = []
item2.setText(low_letters[1])
item3.setText(low_letters[2])
item4.setText(low_letters[3])
item5.setText(low_letters[4])
item6.setText(low_letters[5])
item7.setText(low_letters[6])
item8.setText(low_letters[7])
item9.setText(low_letters[8])
item10.setText(low_letters[9])
item11.setText(low_letters[10])
item12.setText(low_letters[11])
item13.setText("O")
# keep track of which components have finished
LowLoad1Components = [
item1, item1_resp
]
for thisComponent in LowLoad1Components:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
LowLoad1Clock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "LowLoad1"-------
while continueRoutine:
# get current time
t = LowLoad1Clock.getTime()
tThisFlip = win.getFutureFlipTime(clock=LowLoad1Clock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# --------------item1--------------
# *item1* updates
if item1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
item1.frameNStart = frameN # exact frame index
item1.tStart = t # local t and not account for scr refresh
item1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(item1, 'tStartRefresh') # time at next scr refresh
item1.setAutoDraw(True)
# *item1_resp* updates
waitOnFlip = False
if item1_resp.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
item1_resp.frameNStart = frameN # exact frame index
item1_resp.tStart = t # local t and not account for scr refresh
item1_resp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(item1_resp, 'tStartRefresh') # time at next scr refresh
item1_resp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(item1_resp.clock.reset) # t=0 on next screen flip
if item1_resp.status == STARTED and not waitOnFlip:
theseKeys = item1_resp.getKeys(keyList=['x', 'n'], waitRelease=False)
_item1_resp_allKeys.extend(theseKeys)
if len(_item1_resp_allKeys):
item1_resp.keys = [key.name for key in _item1_resp_allKeys] # storing all keys
item1_resp.rt = [key.rt for key in _item1_resp_allKeys]
# --------------item2--------------
item2.setAutoDraw(True)
item3.setAutoDraw(True)
item4.setAutoDraw(True)
item5.setAutoDraw(True)
item6.setAutoDraw(True)
item7.setAutoDraw(True)
item8.setAutoDraw(True)
item9.setAutoDraw(True)
item10.setAutoDraw(True)
item11.setAutoDraw(True)
item12.setAutoDraw(True)
item13.setAutoDraw(True)
if tr+1 in distractor_trials and (len(item1_resp.keys) == distractor_step[0]):
distractor_image.setAutoDraw(True)
# ---------------------------------------------------------------------
if len(item1_resp.keys) == 12:
item1.setAutoDraw(False)
item2.setAutoDraw(False)
item3.setAutoDraw(False)
item4.setAutoDraw(False)
item5.setAutoDraw(False)
item6.setAutoDraw(False)
item7.setAutoDraw(False)
item8.setAutoDraw(False)
item9.setAutoDraw(False)
item10.setAutoDraw(False)
item11.setAutoDraw(False)
item12.setAutoDraw(False)
item13.setAutoDraw(False)
distractor_image.setAutoDraw(False)
win.flip()
continueRoutine=False
else:
continueRoutine=True
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
if tr+1 in distractor_trials:
distractor_pos_list.append(distractor_step[0])
else:
distractor_pos_list.append("FALSE")
if i % 2 == 0:
correct_response = [
"x" if r == "c" or r == "o" else "n"
for r in low_letters
]
else:
correct_response = [
"x" if r == "d" or r == "b" else "n"
for r in low_letters
]
correct = np.array(correct_response) == np.array(item1_resp.keys)
responses_list.append(item1_resp.keys)
reaction_list.append(item1_resp.rt)
correct_list.append(correct)
if tr+1 in distractor_trials and d<5:
d+=1
distractor_image.setAutoDraw(False)
time.sleep(1)
item1.setAutoDraw(False)
item2.setAutoDraw(False)
item3.setAutoDraw(False)
item4.setAutoDraw(False)
item5.setAutoDraw(False)
item6.setAutoDraw(False)
item7.setAutoDraw(False)
item8.setAutoDraw(False)
item9.setAutoDraw(False)
item10.setAutoDraw(False)
item11.setAutoDraw(False)
item12.setAutoDraw(False)
item13.setAutoDraw(False)
#------------------------------------------------------------------------------------------
if i+1 != nblocks:
if i % 2 == 1:
# ------Prepare to start Routine "ReminderLow"-------
continueRoutine = True
# update component parameters for each repeat
ReminderLow_Resp.keys = []
ReminderLow_Resp.rt = []
_ReminderLow_Resp_allKeys = []
# keep track of which components have finished
ReminderLowComponents = [imageReminderLow, ReminderLow_Resp]
for thisComponent in ReminderLowComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
ReminderLowClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "ReminderLow"-------
while continueRoutine:
# get current time
t = ReminderLowClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=ReminderLowClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *imageReminderLow* updates
if imageReminderLow.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
imageReminderLow.frameNStart = frameN # exact frame index
imageReminderLow.tStart = t # local t and not account for scr refresh
imageReminderLow.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(imageReminderLow, 'tStartRefresh') # time at next scr refresh
imageReminderLow.setAutoDraw(True)
# *ReminderLow_Resp* updates
waitOnFlip = False
if ReminderLow_Resp.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
ReminderLow_Resp.frameNStart = frameN # exact frame index
ReminderLow_Resp.tStart = t # local t and not account for scr refresh
ReminderLow_Resp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(ReminderLow_Resp, 'tStartRefresh') # time at next scr refresh
ReminderLow_Resp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(ReminderLow_Resp.clock.reset) # t=0 on next screen flip
win.callOnFlip(ReminderLow_Resp.clearEvents, eventType='keyboard') # clear events on next screen flip
if ReminderLow_Resp.status == STARTED and not waitOnFlip:
theseKeys = ReminderLow_Resp.getKeys(keyList=['space'], waitRelease=False)
_ReminderLow_Resp_allKeys.extend(theseKeys)
if len(_ReminderLow_Resp_allKeys):
ReminderLow_Resp.keys = _ReminderLow_Resp_allKeys[-1].name # just the last key pressed
ReminderLow_Resp.rt = _ReminderLow_Resp_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in ReminderLowComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "ReminderLow"-------
for thisComponent in ReminderLowComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
routineTimer.reset()
else:
# ------Prepare to start Routine "ReminderHigh"-------
continueRoutine = True
# update component parameters for each repeat
ReminderHigh_Resp.keys = []
ReminderHigh_Resp.rt = []
_ReminderHigh_Resp_allKeys = []
# keep track of which components have finished
ReminderHighComponents = [imageReminderHigh, ReminderHigh_Resp]
for thisComponent in ReminderHighComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
ReminderHighClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "ReminderHigh"-------
while continueRoutine:
# get current time
t = ReminderHighClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=ReminderHighClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *imageReminderHigh* updates
if imageReminderHigh.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
imageReminderHigh.frameNStart = frameN # exact frame index
imageReminderHigh.tStart = t # local t and not account for scr refresh
imageReminderHigh.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(imageReminderHigh, 'tStartRefresh') # time at next scr refresh
imageReminderHigh.setAutoDraw(True)
# *ReminderHigh_Resp* updates
waitOnFlip = False
if ReminderHigh_Resp.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
ReminderHigh_Resp.frameNStart = frameN # exact frame index
ReminderHigh_Resp.tStart = t # local t and not account for scr refresh
ReminderHigh_Resp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(ReminderHigh_Resp, 'tStartRefresh') # time at next scr refresh
ReminderHigh_Resp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(ReminderHigh_Resp.clock.reset) # t=0 on next screen flip
win.callOnFlip(ReminderHigh_Resp.clearEvents, eventType='keyboard') # clear events on next screen flip
if ReminderHigh_Resp.status == STARTED and not waitOnFlip:
theseKeys = ReminderHigh_Resp.getKeys(keyList=['space'], waitRelease=False)
_ReminderHigh_Resp_allKeys.extend(theseKeys)
if len(_ReminderHigh_Resp_allKeys):
ReminderHigh_Resp.keys = _ReminderHigh_Resp_allKeys[-1].name # just the last key pressed
ReminderHigh_Resp.rt = _ReminderHigh_Resp_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in ReminderHighComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "ReminderHigh"-------
for thisComponent in ReminderHighComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
routineTimer.reset()
reaction_list = np.diff(reaction_list).tolist()
results = pd.DataFrame(
data = {
"block" : [n+1 for n in range(nblocks) for n1 in range(ntrials)],
"trial" : [n%ntrials + 1 for n in range(nblocks*ntrials)],
"Responses" : responses_list,
"Accuracy" : correct_list,
"Reaction Times" : reaction_list,
"Distractor Position" : distractor_pos_list #Not working - need column of True and Falses for distraactor presence for each trial
}
)
results.to_csv(os.getcwd() + f"/results_{expInfo['participant']}.csv")
# Flip one final time so any remaining win.callOnFlip()
# and win.timeOnFlip() tasks get executed before quitting
win.flip()
# make sure everything is closed down
win.close()
core.quit() | [
"psychopy.core.quit",
"time.sleep",
"numpy.array",
"psychopy.visual.ImageStim",
"numpy.arange",
"psychopy.gui.DlgFromDict",
"os.listdir",
"numpy.diff",
"psychopy.hardware.keyboard.Keyboard",
"psychopy.core.Clock",
"numpy.random.choice",
"psychopy.visual.TextStim",
"psychopy.visual.Window",
... | [((767, 785), 'os.chdir', 'os.chdir', (['_thisDir'], {}), '(_thisDir)\n', (775, 785), False, 'import os\n'), ((948, 1014), 'psychopy.gui.DlgFromDict', 'gui.DlgFromDict', ([], {'dictionary': 'expInfo', 'sortKeys': '(False)', 'title': 'expName'}), '(dictionary=expInfo, sortKeys=False, title=expName)\n', (963, 1014), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((1092, 1109), 'psychopy.data.getDateStr', 'data.getDateStr', ([], {}), '()\n', (1107, 1109), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((1508, 1734), 'psychopy.visual.Window', 'visual.Window', ([], {'size': '[1440, 900]', 'fullscr': '(True)', 'screen': '(0)', 'winType': '"""pyglet"""', 'allowGUI': '(False)', 'allowStencil': '(False)', 'monitor': '"""testMonitor"""', 'color': '[-1.0, -1.0, -1.0]', 'colorSpace': '"""rgb"""', 'blendMode': '"""avg"""', 'useFBO': '(True)', 'units': '"""cm"""'}), "(size=[1440, 900], fullscr=True, screen=0, winType='pyglet',\n allowGUI=False, allowStencil=False, monitor='testMonitor', color=[-1.0,\n -1.0, -1.0], colorSpace='rgb', blendMode='avg', useFBO=True, units='cm')\n", (1521, 1734), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((2137, 2156), 'psychopy.hardware.keyboard.Keyboard', 'keyboard.Keyboard', ([], {}), '()\n', (2154, 2156), False, 'from psychopy.hardware import keyboard\n'), ((2219, 2231), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (2229, 2231), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((2246, 2526), 'psychopy.visual.ImageStim', 'visual.ImageStim', ([], {'win': 'win', 'name': '"""ConsentPage"""', 'image': "(_thisDir + '/images/Consent.jpeg')", 'mask': 'None', 'ori': '(0)', 'pos': '(0, 0)', 'size': '(10.2, 10.0)', 'color': '[1, 1, 1]', 'colorSpace': '"""rgb"""', 'opacity': '(1)', 'flipHoriz': '(False)', 'flipVert': '(False)', 'units': '"""cm"""', 'texRes': '(128)', 'interpolate': '(True)', 'depth': '(0.0)'}), "(win=win, name='ConsentPage', image=_thisDir +\n '/images/Consent.jpeg', mask=None, ori=0, pos=(0, 0), size=(10.2, 10.0),\n color=[1, 1, 1], colorSpace='rgb', opacity=1, flipHoriz=False, flipVert\n =False, units='cm', texRes=128, interpolate=True, depth=0.0)\n", (2262, 2526), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((2557, 2576), 'psychopy.hardware.keyboard.Keyboard', 'keyboard.Keyboard', ([], {}), '()\n', (2574, 2576), False, 'from psychopy.hardware import keyboard\n'), ((2635, 2647), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (2645, 2647), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((2662, 2876), 'psychopy.visual.TextStim', 'visual.TextStim', ([], {'win': 'win', 'name': '"""blankscreen"""', 'text': 'None', 'font': '"""Arial"""', 'pos': '(0, 0)', 'height': '(0.1)', 'wrapWidth': 'None', 'ori': '(0)', 'color': '"""white"""', 'colorSpace': '"""rgb"""', 'opacity': '(1)', 'languageStyle': '"""LTR"""', 'units': '"""cm"""', 'depth': '(0.0)'}), "(win=win, name='blankscreen', text=None, font='Arial', pos=(\n 0, 0), height=0.1, wrapWidth=None, ori=0, color='white', colorSpace=\n 'rgb', opacity=1, languageStyle='LTR', units='cm', depth=0.0)\n", (2677, 2876), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((2968, 2980), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (2978, 2980), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((3000, 3649), 'psychopy.visual.TextStim', 'visual.TextStim', ([], {'win': 'win', 'name': '"""textInstructions"""', 'text': '"""Task Instructions\n\nIn this experiment, you will switch back and forth between 2 games.\n\n In the first game, press X for letters c & o and N for letters i & l.\n\nIn the second game, press X for letters d & b and N for letters q & p.\n\nYou will switch between both games 4 times.\n\nYou will be reminded of the instructions each time you switch!\n\n The task takes less than 20 minutes! \n\n Press SPACEBAR for more instructions!"""', 'font': '"""Arial"""', 'pos': '(0, 0)', 'height': '(0.5)', 'wrapWidth': 'None', 'ori': '(0)', 'color': '"""white"""', 'colorSpace': '"""rgb"""', 'opacity': '(1)', 'languageStyle': '"""LTR"""', 'units': '"""cm"""', 'depth': '(0.0)'}), '(win=win, name=\'textInstructions\', text=\n """Task Instructions\n\nIn this experiment, you will switch back and forth between 2 games.\n\n In the first game, press X for letters c & o and N for letters i & l.\n\nIn the second game, press X for letters d & b and N for letters q & p.\n\nYou will switch between both games 4 times.\n\nYou will be reminded of the instructions each time you switch!\n\n The task takes less than 20 minutes! \n\n Press SPACEBAR for more instructions!"""\n , font=\'Arial\', pos=(0, 0), height=0.5, wrapWidth=None, ori=0, color=\n \'white\', colorSpace=\'rgb\', opacity=1, languageStyle=\'LTR\', units=\'cm\',\n depth=0.0)\n', (3015, 3649), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((3691, 3710), 'psychopy.hardware.keyboard.Keyboard', 'keyboard.Keyboard', ([], {}), '()\n', (3708, 3710), False, 'from psychopy.hardware import keyboard\n'), ((3769, 3781), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (3779, 3781), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((3796, 4010), 'psychopy.visual.TextStim', 'visual.TextStim', ([], {'win': 'win', 'name': '"""blankscreen"""', 'text': 'None', 'font': '"""Arial"""', 'pos': '(0, 0)', 'height': '(0.1)', 'wrapWidth': 'None', 'ori': '(0)', 'color': '"""white"""', 'colorSpace': '"""rgb"""', 'opacity': '(1)', 'languageStyle': '"""LTR"""', 'units': '"""cm"""', 'depth': '(0.0)'}), "(win=win, name='blankscreen', text=None, font='Arial', pos=(\n 0, 0), height=0.1, wrapWidth=None, ori=0, color='white', colorSpace=\n 'rgb', opacity=1, languageStyle='LTR', units='cm', depth=0.0)\n", (3811, 4010), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((4102, 4114), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (4112, 4114), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((4133, 4422), 'psychopy.visual.ImageStim', 'visual.ImageStim', ([], {'win': 'win', 'name': '"""imgInstructions"""', 'image': "(_thisDir + '/images/Instructions.jpeg')", 'mask': 'None', 'ori': '(0)', 'pos': '(0, 0)', 'size': '(11.0, 11.0)', 'color': '[1, 1, 1]', 'colorSpace': '"""rgb"""', 'opacity': '(1)', 'flipHoriz': '(False)', 'flipVert': '(False)', 'units': '"""cm"""', 'texRes': '(128)', 'interpolate': '(True)', 'depth': '(0.0)'}), "(win=win, name='imgInstructions', image=_thisDir +\n '/images/Instructions.jpeg', mask=None, ori=0, pos=(0, 0), size=(11.0, \n 11.0), color=[1, 1, 1], colorSpace='rgb', opacity=1, flipHoriz=False,\n flipVert=False, units='cm', texRes=128, interpolate=True, depth=0.0)\n", (4149, 4422), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((4461, 4480), 'psychopy.hardware.keyboard.Keyboard', 'keyboard.Keyboard', ([], {}), '()\n', (4478, 4480), False, 'from psychopy.hardware import keyboard\n'), ((4539, 4551), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (4549, 4551), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((4566, 4780), 'psychopy.visual.TextStim', 'visual.TextStim', ([], {'win': 'win', 'name': '"""blankscreen"""', 'text': 'None', 'font': '"""Arial"""', 'pos': '(0, 0)', 'height': '(0.1)', 'wrapWidth': 'None', 'ori': '(0)', 'color': '"""white"""', 'colorSpace': '"""rgb"""', 'opacity': '(1)', 'languageStyle': '"""LTR"""', 'units': '"""cm"""', 'depth': '(0.0)'}), "(win=win, name='blankscreen', text=None, font='Arial', pos=(\n 0, 0), height=0.1, wrapWidth=None, ori=0, color='white', colorSpace=\n 'rgb', opacity=1, languageStyle='LTR', units='cm', depth=0.0)\n", (4581, 4780), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((4877, 4889), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (4887, 4889), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((4905, 5265), 'psychopy.visual.TextStim', 'visual.TextStim', ([], {'win': 'win', 'name': '"""PracticeText"""', 'text': '"""PRESS SPACEBAR TO BEGIN PRACTICE!\n\n Remember: \n\n Game 1: Press X for c & o and N for i & l\n\n Game 2: Press X for d & b and N for q & p"""', 'font': '"""Arial"""', 'pos': '(0, 0)', 'height': '(0.5)', 'wrapWidth': 'None', 'ori': '(0)', 'color': '"""white"""', 'colorSpace': '"""rgb"""', 'opacity': '(1)', 'languageStyle': '"""LTR"""', 'units': '"""cm"""', 'depth': '(0.0)'}), '(win=win, name=\'PracticeText\', text=\n """PRESS SPACEBAR TO BEGIN PRACTICE!\n\n Remember: \n\n Game 1: Press X for c & o and N for i & l\n\n Game 2: Press X for d & b and N for q & p"""\n , font=\'Arial\', pos=(0, 0), height=0.5, wrapWidth=None, ori=0, color=\n \'white\', colorSpace=\'rgb\', opacity=1, languageStyle=\'LTR\', units=\'cm\',\n depth=0.0)\n', (4920, 5265), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((5296, 5315), 'psychopy.hardware.keyboard.Keyboard', 'keyboard.Keyboard', ([], {}), '()\n', (5313, 5315), False, 'from psychopy.hardware import keyboard\n'), ((5333, 5345), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (5343, 5345), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((5416, 5428), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (5426, 5428), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((5448, 5735), 'psychopy.visual.ImageStim', 'visual.ImageStim', ([], {'win': 'win', 'name': '"""imageReminderLow"""', 'image': "(_thisDir + '/images/LowReminder.jpeg')", 'mask': 'None', 'ori': '(0)', 'pos': '(0, 0)', 'size': '(6.0, 4.0)', 'color': '[1, 1, 1]', 'colorSpace': '"""rgb"""', 'opacity': '(1)', 'flipHoriz': '(False)', 'flipVert': '(False)', 'units': '"""cm"""', 'texRes': '(128)', 'interpolate': '(True)', 'depth': '(0.0)'}), "(win=win, name='imageReminderLow', image=_thisDir +\n '/images/LowReminder.jpeg', mask=None, ori=0, pos=(0, 0), size=(6.0, \n 4.0), color=[1, 1, 1], colorSpace='rgb', opacity=1, flipHoriz=False,\n flipVert=False, units='cm', texRes=128, interpolate=True, depth=0.0)\n", (5464, 5735), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((5771, 5790), 'psychopy.hardware.keyboard.Keyboard', 'keyboard.Keyboard', ([], {}), '()\n', (5788, 5790), False, 'from psychopy.hardware import keyboard\n'), ((5863, 5875), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (5873, 5875), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((5896, 6185), 'psychopy.visual.ImageStim', 'visual.ImageStim', ([], {'win': 'win', 'name': '"""imageReminderHigh"""', 'image': "(_thisDir + '/images/HighReminder.jpeg')", 'mask': 'None', 'ori': '(0)', 'pos': '(0, 0)', 'size': '(6.0, 4.0)', 'color': '[1, 1, 1]', 'colorSpace': '"""rgb"""', 'opacity': '(1)', 'flipHoriz': '(False)', 'flipVert': '(False)', 'units': '"""cm"""', 'texRes': '(128)', 'interpolate': '(True)', 'depth': '(0.0)'}), "(win=win, name='imageReminderHigh', image=_thisDir +\n '/images/HighReminder.jpeg', mask=None, ori=0, pos=(0, 0), size=(6.0, \n 4.0), color=[1, 1, 1], colorSpace='rgb', opacity=1, flipHoriz=False,\n flipVert=False, units='cm', texRes=128, interpolate=True, depth=0.0)\n", (5912, 6185), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((6222, 6241), 'psychopy.hardware.keyboard.Keyboard', 'keyboard.Keyboard', ([], {}), '()\n', (6239, 6241), False, 'from psychopy.hardware import keyboard\n'), ((6707, 6719), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (6717, 6719), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((6780, 6799), 'psychopy.hardware.keyboard.Keyboard', 'keyboard.Keyboard', ([], {}), '()\n', (6797, 6799), False, 'from psychopy.hardware import keyboard\n'), ((7475, 7487), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (7485, 7487), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((7502, 7716), 'psychopy.visual.TextStim', 'visual.TextStim', ([], {'win': 'win', 'name': '"""blankscreen"""', 'text': 'None', 'font': '"""Arial"""', 'pos': '(0, 0)', 'height': '(0.1)', 'wrapWidth': 'None', 'ori': '(0)', 'color': '"""white"""', 'colorSpace': '"""rgb"""', 'opacity': '(1)', 'languageStyle': '"""LTR"""', 'units': '"""cm"""', 'depth': '(0.0)'}), "(win=win, name='blankscreen', text=None, font='Arial', pos=(\n 0, 0), height=0.1, wrapWidth=None, ori=0, color='white', colorSpace=\n 'rgb', opacity=1, languageStyle='LTR', units='cm', depth=0.0)\n", (7517, 7716), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((7777, 7789), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (7787, 7789), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((7861, 7882), 'psychopy.core.CountdownTimer', 'core.CountdownTimer', ([], {}), '()\n', (7880, 7882), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((58184, 58195), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (58193, 58195), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((740, 765), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (755, 765), False, 'import os\n'), ((1039, 1050), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (1048, 1050), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((6408, 6617), 'psychopy.visual.TextStim', 'visual.TextStim', ([], {'win': 'win', 'name': 'name', 'text': '"""default text"""', 'font': '"""Arial"""', 'pos': 'pos', 'height': 'h', 'wrapWidth': 'None', 'ori': '(0)', 'color': '"""white"""', 'colorSpace': '"""rgb"""', 'opacity': '(1)', 'languageStyle': '"""LTR"""', 'units': '"""cm"""', 'depth': '(0.0)'}), "(win=win, name=name, text='default text', font='Arial', pos=\n pos, height=h, wrapWidth=None, ori=0, color='white', colorSpace='rgb',\n opacity=1, languageStyle='LTR', units='cm', depth=0.0)\n", (6423, 6617), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((40617, 40654), 'os.listdir', 'os.listdir', (["(_thisDir + '/distractors')"], {}), "(_thisDir + '/distractors')\n", (40627, 40654), False, 'import os\n'), ((40659, 40695), 'numpy.random.shuffle', 'np.random.shuffle', (['distractor_images'], {}), '(distractor_images)\n', (40676, 40695), True, 'import numpy as np\n'), ((40756, 40795), 'numpy.random.shuffle', 'np.random.shuffle', (['distractor_step_list'], {}), '(distractor_step_list)\n', (40773, 40795), True, 'import numpy as np\n'), ((29396, 29430), 'numpy.random.choice', 'np.random.choice', (['letters'], {'size': '(12)'}), '(letters, size=12)\n', (29412, 29430), True, 'import numpy as np\n'), ((35086, 35099), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (35096, 35099), False, 'import time\n'), ((35121, 35147), 'numpy.diff', 'np.diff', (['pracreaction_list'], {}), '(pracreaction_list)\n', (35128, 35147), True, 'import numpy as np\n'), ((35512, 35523), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (35521, 35523), False, 'import os\n'), ((40543, 40562), 'numpy.arange', 'np.arange', (['(2)', '(21)', '(1)'], {}), '(2, 21, 1)\n', (40552, 40562), True, 'import numpy as np\n'), ((41100, 41378), 'psychopy.visual.ImageStim', 'visual.ImageStim', ([], {'win': 'win', 'name': '"""distractor"""', 'image': "(_thisDir + f'/distractors/{image}')", 'mask': 'None', 'ori': '(0)', 'pos': '(0, 0)', 'size': '(2.0, 2.0)', 'color': '[1, 1, 1]', 'colorSpace': '"""rgb"""', 'opacity': '(1)', 'flipHoriz': '(False)', 'flipVert': '(False)', 'units': '"""cm"""', 'texRes': '(128)', 'interpolate': '(True)', 'depth': '(0.0)'}), "(win=win, name='distractor', image=_thisDir +\n f'/distractors/{image}', mask=None, ori=0, pos=(0, 0), size=(2.0, 2.0),\n color=[1, 1, 1], colorSpace='rgb', opacity=1, flipHoriz=False, flipVert\n =False, units='cm', texRes=128, interpolate=True, depth=0.0)\n", (41116, 41378), False, 'from psychopy import sound, gui, visual, core, data, event, logging, clock\n'), ((47517, 47530), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (47527, 47530), False, 'import time\n'), ((57459, 57481), 'numpy.diff', 'np.diff', (['reaction_list'], {}), '(reaction_list)\n', (57466, 57481), True, 'import numpy as np\n'), ((57950, 57961), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (57959, 57961), False, 'import os\n'), ((34872, 34898), 'numpy.array', 'np.array', (['correct_response'], {}), '(correct_response)\n', (34880, 34898), True, 'import numpy as np\n'), ((34902, 34927), 'numpy.array', 'np.array', (['item1_resp.keys'], {}), '(item1_resp.keys)\n', (34910, 34927), True, 'import numpy as np\n'), ((40927, 40961), 'numpy.random.choice', 'np.random.choice', (['letters'], {'size': '(12)'}), '(letters, size=12)\n', (40943, 40961), True, 'import numpy as np\n'), ((47214, 47240), 'numpy.array', 'np.array', (['correct_response'], {}), '(correct_response)\n', (47222, 47240), True, 'import numpy as np\n'), ((47244, 47269), 'numpy.array', 'np.array', (['item1_resp.keys'], {}), '(item1_resp.keys)\n', (47252, 47269), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from collections import Counter
from collections import OrderedDict
from itertools import chain
import numpy as np
from .enchant_backend import in_dictionary
from .enchant_backend import suggest_words
from .nltk_backend import make_ngrams
from .nltk_backend import calculate_levenshtein_distance
def correct_misspelling(token, distance_threshold=2):
""" correct misspelling of token by comparing suggestions and measuring
the levenshtein distance
args:
token: string
distance_threshold: int that describes the maximum number of desired
changed characters from 'token'
returns:
best_suggestion: string containing suggestion
token: original token is not match was found
"""
if in_dictionary(token):
return token
suggested_words = suggest_words(token)
if suggested_words is not None:
num_modified_characters = []
for suggested_word in suggested_words:
distance = calculate_levenshtein_distance(token, suggested_word)
num_modified_characters.append(distance)
# this min is showing errors since it takes an empy/none variable as inputen
min_num_modified_characters = min(num_modified_characters)
best_arg = num_modified_characters.index(min_num_modified_characters)
if distance_threshold > min_num_modified_characters:
best_suggestion = suggested_words[best_arg]
return best_suggestion
else:
return token
else:
return token
def remove_repeated_elements(tokens):
""" removes repeated tokens
args:
tokens: a list of tokens
returns:
filtered_tokens: a list of tokens without repeated tokens
"""
filtered_tokens = list(OrderedDict((token, None) for token in tokens))
return filtered_tokens
def join(tokens):
""" construct a string sentence from joining tokens with spaces
args:
tokens: list of strings
returns:
joined_tokens: a string containing all tokens
"""
joined_tokens = ' '.join(tokens)
return joined_tokens
def compute_jaccard_similarity(query, document):
""" function taken explicitly from:
http://billchambers.me/tutorials/2014/12/21/tf-idf-explained-in-python.html
calculates the intersection over union of a query
in a given document.
"""
intersection = set(query).intersection(set(document))
union = set(query).union(set(document))
return len(intersection)/len(union)
def calculate_jaccard_coefficient(a, b):
union = list(set(a + b))
intersection = list(set(a) - (set(a) - set(b)))
jaccard_coefficient = float(len(intersection)) / len(union)
return jaccard_coefficient
def correct_misspelling_ngram(token, levenshtein_treshold=3):
""" corrects token by suggesting words and filtering them
using the levenhstein distance. Then it takes all filtered
words and chooses the one with the highest jaccard coefficient
calculated using bigrams.
args:
token: string
levenshtein threshold: int
returns:
token: string
"""
if in_dictionary(token):
return token
suggested_words = suggest_words(token)
jaccard_coefficients = []
best_suggested_words = []
if suggested_words is not None:
token_bigrams = make_ngrams(token, 2)
for suggested_word in suggested_words:
distance = calculate_levenshtein_distance(token, suggested_word)
if distance < levenshtein_treshold:
suggested_bigrams = make_ngrams(suggested_word, 2)
jaccard_coefficient = calculate_jaccard_coefficient(
token_bigrams, suggested_bigrams)
jaccard_coefficients.append(jaccard_coefficient)
best_suggested_words.append(suggested_word)
highest_jaccard = max(jaccard_coefficients)
best_arg = jaccard_coefficients.index(highest_jaccard)
word = best_suggested_words[best_arg]
return word
else:
return word
def calculate_token_frequencies(sentences):
""" count the number of times all tokens appear in all sentences.
args:
sentences: list of sentences where each sentence contains a
list of tokens
returns:
word_frequencies: list containing two elements lists with the
word (string) and an integer describing the amount of times
the word has appeared in the all the sentences.
"""
word_frequencies = Counter(chain(*sentences)).most_common()
return word_frequencies
def pad(tokens, max_token_size=22, remove=False, BOS_token='<BOS>',
EOS_token='<EOS>', PAD_token='<PAD>'):
"""
args:
tokens: a list of strings containing tokens
max_token_size: Max token size including the EOS and BOS tokens.
remove: Boolean flag for determining if it should remove tokens
bigger than max_token_size.
BOS_token: string beginning of the sentence token.
EOS_token: string end of the sentence token.
PAD_token: string pad token.
returns:
tuple of strings containing all added/removed tokens.
Tokens will get removed if the remove flag is enabled
and the number of tokens is bigger than max_token_size.
"""
sentence = list(tokens)
max_sentence_length = max_token_size - 2
if len(sentence) == max_sentence_length:
padded_sentence = [BOS_token] + sentence + [EOS_token]
elif len(sentence) > max_sentence_length and not remove:
sentence = sentence[:max_sentence_length]
padded_sentence = [BOS_token] + sentence + [EOS_token]
elif len(sentence) > max_sentence_length and remove:
return []
elif len(sentence) < max_sentence_length:
padded_sentence = [BOS_token] + sentence + [EOS_token]
pad_size = max_token_size - len(padded_sentence)
pad = [PAD_token] * pad_size
padded_sentence = padded_sentence + pad
return padded_sentence
def remove_infrequent_tokens(sentences, word_frequencies, min_frequency=3):
pass
#######################################################
# Deprecated functions
#######################################################
# def remove_long_sentences(sentences, max_token_size=25):
# # TODO: Add y predictions that should/could also be filtered
# # you pass y and iterate over its first dimension.
# """ removes sentences with a length bigger that max_length.
# args:
# sentences: list of lists containing strings/tokens.
# max_length: int > 0
# returns:
# filtered_sentences: list pf lists containing a strings/tokens.
# """
# filtered_sentences = []
# for tokens in sentences:
# if len(tokens) <= max_token_size:
# filtered_sentences.append(tokens)
# return filtered_sentences
#
#
# def remove_long_sentences(sentences, associated_data=None,
# max_token_size=25):
# # TODO: Add y predictions that should/could also be filtered
# # you pass y and iterate over its first dimension.
# """ removes sentences with a length bigger that max_length.
# args:
# sentences: list of lists containing strings/tokens.
# data: Additional associated data that should be removed
# if a sentence is removed e. g. labels or another pair of sentences
# max_length: int > 0
# returns:
# filtered_sentences: list pf lists containing a strings/tokens.
# """
# mask = get_token_size_mask(sentences, max_token_size)
# sentences = np.asarray(sentences)[mask]
# associated_data = np.asarray(associated_data)[mask]
def get_token_size_mask(sentences, max_token_size=25):
""" returns mask containing True for sentences with less
or equal amount of tokens than the max_token_size, and
false otherwise.
args:
sentences: list of lists containing strings/tokens
max_length: int > 0
returns:
boolean numpy array of size(sentences)
"""
mask = np.zeros(shape=len(sentences))
for sentence_arg, tokens in enumerate(sentences):
if len(tokens) <= max_token_size:
mask[sentence_arg] = True
else:
mask[sentence_arg] = False
return mask
def mask_data(data, mask):
if data is not np.ndarray:
data = np.asarray(data)
return data[mask].tolist()
else:
return data[mask]
def pad_with_zeros(sentences, max_length=25):
data = []
for vectors in sentences:
vectors = np.asarray(vectors)
sentence_length, embedding_dimension = vectors.shape
missing_zeros = max_length - sentence_length
zero_array = np.zeros(shape=(missing_zeros, embedding_dimension))
vectors = np.concatenate((vectors, zero_array), axis=0)
data.append(vectors.tolist())
return data
| [
"itertools.chain",
"collections.OrderedDict",
"numpy.asarray",
"numpy.zeros",
"numpy.concatenate"
] | [((1801, 1847), 'collections.OrderedDict', 'OrderedDict', (['((token, None) for token in tokens)'], {}), '((token, None) for token in tokens)\n', (1812, 1847), False, 'from collections import OrderedDict\n'), ((8368, 8384), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (8378, 8384), True, 'import numpy as np\n'), ((8566, 8585), 'numpy.asarray', 'np.asarray', (['vectors'], {}), '(vectors)\n', (8576, 8585), True, 'import numpy as np\n'), ((8721, 8773), 'numpy.zeros', 'np.zeros', ([], {'shape': '(missing_zeros, embedding_dimension)'}), '(shape=(missing_zeros, embedding_dimension))\n', (8729, 8773), True, 'import numpy as np\n'), ((8792, 8837), 'numpy.concatenate', 'np.concatenate', (['(vectors, zero_array)'], {'axis': '(0)'}), '((vectors, zero_array), axis=0)\n', (8806, 8837), True, 'import numpy as np\n'), ((4570, 4587), 'itertools.chain', 'chain', (['*sentences'], {}), '(*sentences)\n', (4575, 4587), False, 'from itertools import chain\n')] |
# Copyright (c) 2022, ETH Zurich and UNC Chapel Hill.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: <NAME> (jsch-at-demuc-dot-de)
# This script exports a COLMAP database to the file structure to run Bundler.
import os
import argparse
import sqlite3
import shutil
import gzip
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--database_path", required=True)
parser.add_argument("--image_path", required=True)
parser.add_argument("--output_path", required=True)
parser.add_argument("--min_num_matches", type=int, default=15)
args = parser.parse_args()
return args
def pair_id_to_image_ids(pair_id):
image_id2 = pair_id % 2147483647
image_id1 = (pair_id - image_id2) / 2147483647
return image_id1, image_id2
def main():
args = parse_args()
connection = sqlite3.connect(args.database_path)
cursor = connection.cursor()
try:
os.makedirs(args.output_path)
except:
pass
cameras = {}
cursor.execute("SELECT camera_id, params FROM cameras;")
for row in cursor:
camera_id = row[0]
params = np.fromstring(row[1], dtype=np.double)
cameras[camera_id] = params
images = {}
with open(os.path.join(args.output_path, "list.txt"), "w") as fid:
cursor.execute("SELECT image_id, camera_id, name FROM images;")
for row in cursor:
image_id = row[0]
camera_id = row[1]
image_name = row[2]
print("Copying image", image_name)
images[image_id] = (len(images), image_name)
fid.write("./%s 0 %f\n" % (image_name, cameras[camera_id][0]))
if not os.path.exists(os.path.join(args.output_path, image_name)):
shutil.copyfile(os.path.join(args.image_path, image_name),
os.path.join(args.output_path, image_name))
for image_id, (image_idx, image_name) in images.iteritems():
print("Exporting key file for", image_name)
base_name, ext = os.path.splitext(image_name)
key_file_name = os.path.join(args.output_path, base_name + ".key")
key_file_name_gz = key_file_name + ".gz"
if os.path.exists(key_file_name_gz):
continue
cursor.execute("SELECT data FROM keypoints WHERE image_id=?;",
(image_id,))
row = next(cursor)
if row[0] is None:
keypoints = np.zeros((0, 6), dtype=np.float32)
descriptors = np.zeros((0, 128), dtype=np.uint8)
else:
keypoints = np.fromstring(row[0], dtype=np.float32).reshape(-1, 6)
cursor.execute("SELECT data FROM descriptors WHERE image_id=?;",
(image_id,))
row = next(cursor)
descriptors = np.fromstring(row[0], dtype=np.uint8).reshape(-1, 128)
with open(key_file_name, "w") as fid:
fid.write("%d %d\n" % (keypoints.shape[0], descriptors.shape[1]))
for r in range(keypoints.shape[0]):
fid.write("%f %f %f %f\n" % (keypoints[r, 1], keypoints[r, 0],
keypoints[r, 2], keypoints[r, 3]))
for i in range(0, 128, 20):
desc_block = descriptors[r, i:i+20]
fid.write(" ".join(map(str, desc_block.ravel().tolist())))
fid.write("\n")
with open(key_file_name, "rb") as fid_in:
with gzip.open(key_file_name + ".gz", "wb") as fid_out:
fid_out.writelines(fid_in)
os.remove(key_file_name)
with open(os.path.join(args.output_path, "matches.init.txt"), "w") as fid:
cursor.execute("SELECT pair_id, data FROM two_view_geometries "
"WHERE rows>=?;", (args.min_num_matches,))
for row in cursor:
pair_id = row[0]
inlier_matches = np.fromstring(row[1],
dtype=np.uint32).reshape(-1, 2)
image_id1, image_id2 = pair_id_to_image_ids(pair_id)
image_idx1 = images[image_id1][0]
image_idx2 = images[image_id2][0]
fid.write("%d %d\n%d\n" % (image_idx1, image_idx2,
inlier_matches.shape[0]))
for i in range(inlier_matches.shape[0]):
fid.write("%d %d\n" % (inlier_matches[i, 0],
inlier_matches[i, 1]))
with open(os.path.join(args.output_path, "run_bundler.sh"), "w") as fid:
fid.write("bin/Bundler list.txt \\\n")
fid.write("--run_bundle \\\n")
fid.write("--use_focal_estimate \\\n")
fid.write("--output_all bundle_ \\\n")
fid.write("--constrain_focal \\\n")
fid.write("--estimate_distortion \\\n")
fid.write("--match_table matches.init.txt \\\n")
fid.write("--variable_focal_length \\\n")
fid.write("--output_dir bundle \\\n")
fid.write("--output bundle.out \\\n")
fid.write("--constrain_focal_weight 0.0001 \\\n")
cursor.close()
connection.close()
if __name__ == "__main__":
main()
| [
"os.path.exists",
"sqlite3.connect",
"os.makedirs",
"argparse.ArgumentParser",
"gzip.open",
"os.path.splitext",
"os.path.join",
"numpy.zeros",
"numpy.fromstring",
"os.remove"
] | [((1844, 1869), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1867, 1869), False, 'import argparse\n'), ((2366, 2401), 'sqlite3.connect', 'sqlite3.connect', (['args.database_path'], {}), '(args.database_path)\n', (2381, 2401), False, 'import sqlite3\n'), ((2453, 2482), 'os.makedirs', 'os.makedirs', (['args.output_path'], {}), '(args.output_path)\n', (2464, 2482), False, 'import os\n'), ((2654, 2692), 'numpy.fromstring', 'np.fromstring', (['row[1]'], {'dtype': 'np.double'}), '(row[1], dtype=np.double)\n', (2667, 2692), True, 'import numpy as np\n'), ((3561, 3589), 'os.path.splitext', 'os.path.splitext', (['image_name'], {}), '(image_name)\n', (3577, 3589), False, 'import os\n'), ((3614, 3664), 'os.path.join', 'os.path.join', (['args.output_path', "(base_name + '.key')"], {}), "(args.output_path, base_name + '.key')\n", (3626, 3664), False, 'import os\n'), ((3725, 3757), 'os.path.exists', 'os.path.exists', (['key_file_name_gz'], {}), '(key_file_name_gz)\n', (3739, 3757), False, 'import os\n'), ((5099, 5123), 'os.remove', 'os.remove', (['key_file_name'], {}), '(key_file_name)\n', (5108, 5123), False, 'import os\n'), ((2760, 2802), 'os.path.join', 'os.path.join', (['args.output_path', '"""list.txt"""'], {}), "(args.output_path, 'list.txt')\n", (2772, 2802), False, 'import os\n'), ((3966, 4000), 'numpy.zeros', 'np.zeros', (['(0, 6)'], {'dtype': 'np.float32'}), '((0, 6), dtype=np.float32)\n', (3974, 4000), True, 'import numpy as np\n'), ((4027, 4061), 'numpy.zeros', 'np.zeros', (['(0, 128)'], {'dtype': 'np.uint8'}), '((0, 128), dtype=np.uint8)\n', (4035, 4061), True, 'import numpy as np\n'), ((5139, 5189), 'os.path.join', 'os.path.join', (['args.output_path', '"""matches.init.txt"""'], {}), "(args.output_path, 'matches.init.txt')\n", (5151, 5189), False, 'import os\n'), ((6000, 6048), 'os.path.join', 'os.path.join', (['args.output_path', '"""run_bundler.sh"""'], {}), "(args.output_path, 'run_bundler.sh')\n", (6012, 6048), False, 'import os\n'), ((4996, 5034), 'gzip.open', 'gzip.open', (["(key_file_name + '.gz')", '"""wb"""'], {}), "(key_file_name + '.gz', 'wb')\n", (5005, 5034), False, 'import gzip\n'), ((3222, 3264), 'os.path.join', 'os.path.join', (['args.output_path', 'image_name'], {}), '(args.output_path, image_name)\n', (3234, 3264), False, 'import os\n'), ((3299, 3340), 'os.path.join', 'os.path.join', (['args.image_path', 'image_name'], {}), '(args.image_path, image_name)\n', (3311, 3340), False, 'import os\n'), ((3374, 3416), 'os.path.join', 'os.path.join', (['args.output_path', 'image_name'], {}), '(args.output_path, image_name)\n', (3386, 3416), False, 'import os\n'), ((4100, 4139), 'numpy.fromstring', 'np.fromstring', (['row[0]'], {'dtype': 'np.float32'}), '(row[0], dtype=np.float32)\n', (4113, 4139), True, 'import numpy as np\n'), ((4326, 4363), 'numpy.fromstring', 'np.fromstring', (['row[0]'], {'dtype': 'np.uint8'}), '(row[0], dtype=np.uint8)\n', (4339, 4363), True, 'import numpy as np\n'), ((5427, 5465), 'numpy.fromstring', 'np.fromstring', (['row[1]'], {'dtype': 'np.uint32'}), '(row[1], dtype=np.uint32)\n', (5440, 5465), True, 'import numpy as np\n')] |
# ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware
# <NAME>, <NAME>, <NAME>
# International Conference on Learning Representations (ICLR), 2019.
import numpy as np
from search.utils import *
class DataProvider:
VALID_SEED = 0 # random seed for the validation set
@staticmethod
def name():
""" Return name of the dataset """
raise NotImplementedError
@property
def data_shape(self):
""" Return shape as python list of one data entry """
raise NotImplementedError
@property
def n_classes(self):
""" Return `int` of num classes """
raise NotImplementedError
@property
def save_path(self):
""" local path to save the data """
raise NotImplementedError
@property
def data_url(self):
""" link to download the data """
raise NotImplementedError
@staticmethod
def random_sample_valid_set(train_labels, valid_size, n_classes):
train_size = len(train_labels)
assert train_size > valid_size
g = torch.Generator()
g.manual_seed(DataProvider.VALID_SEED) # set random seed before sampling validation set
rand_indexes = torch.randperm(train_size, generator=g).tolist()
train_indexes, valid_indexes = [], []
per_class_remain = get_split_list(valid_size, n_classes)
for idx in rand_indexes:
label = train_labels[idx]
if isinstance(label, float):
label = int(label)
elif isinstance(label, np.ndarray):
label = np.argmax(label)
else:
assert isinstance(label, int)
if per_class_remain[label] > 0:
valid_indexes.append(idx)
per_class_remain[label] -= 1
else:
train_indexes.append(idx)
return train_indexes, valid_indexes
| [
"numpy.argmax"
] | [((1598, 1614), 'numpy.argmax', 'np.argmax', (['label'], {}), '(label)\n', (1607, 1614), True, 'import numpy as np\n')] |
import os
import json
import ecco
from IPython import display as d
from ecco import util, lm_plots
import random
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.nn import functional as F
from sklearn import decomposition
from typing import Optional, List
class OutputSeq:
def __init__(self,
token_ids=None,
n_input_tokens=None,
tokenizer=None,
output_text=None,
tokens=None,
hidden_states=None,
attribution=None,
activations=None,
activations_type=None,
attention=None,
model_outputs=None,
lm_head=None,
device='cpu'):
self.token_ids = token_ids
self.tokenizer = tokenizer
self.n_input_tokens = n_input_tokens
self.output_text = output_text
self.tokens = tokens
self.hidden_states = hidden_states
self.attribution = attribution
self.activations = activations
self.activations_type = activations_type
self.model_outputs = model_outputs
self.attention_values = attention
self.lm_head = lm_head
self.device = device
self._path = os.path.dirname(ecco.__file__)
def __str__(self):
return "<LMOutput '{}' # of lm outputs: {}>".format(self.output_text, len(self.hidden_states))
def to(self, tensor: torch.Tensor):
if self.device == 'cuda':
return tensor.to('cuda')
return tensor
def explorable(self, printJson: Optional[bool] = False):
tokens = []
for idx, token in enumerate(self.tokens):
type = "input" if idx < self.n_input_tokens else 'output'
tokens.append({'token': token,
'token_id': int(self.token_ids[idx]),
'type': type
})
data = {
'tokens': tokens
}
d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
d.display(d.HTML(filename=os.path.join(self._path, "html", "basic.html")))
viz_id = 'viz_{}'.format(round(random.random() * 1000000))
js = """
requirejs(['basic', 'ecco'], function(basic, ecco){{
const viz_id = basic.init()
ecco.renderOutputSequence(viz_id, {})
}}, function (err) {{
console.log(err);
}})""".format(data)
d.display(d.Javascript(js))
if printJson:
print(data)
def __call__(self, position=None, **kwargs):
if position is not None:
self.position(position, **kwargs)
else:
self.saliency(**kwargs)
def position(self, position, attr_method='grad_x_input'):
if (position < self.n_input_tokens) or (position > len(self.tokens) - 1):
raise ValueError("'position' should indicate a position of a generated token. "
"Accepted values for this sequence are between {} and {}."
.format(self.n_input_tokens, len(self.tokens) - 1))
importance_id = position - self.n_input_tokens
tokens = []
attribution = self.attribution[attr_method]
for idx, token in enumerate(self.tokens):
type = "input" if idx < self.n_input_tokens else 'output'
if idx < len(attribution[importance_id]):
imp = attribution[importance_id][idx]
else:
imp = -1
tokens.append({'token': token,
'token_id': int(self.token_ids[idx]),
'type': type,
'value': str(imp) # because json complains of floats
})
data = {
'tokens': tokens
}
d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
d.display(d.HTML(filename=os.path.join(self._path, "html", "basic.html")))
viz_id = 'viz_{}'.format(round(random.random() * 1000000))
js = """
requirejs(['basic', 'ecco'], function(basic, ecco){{
const viz_id = basic.init()
ecco.renderSeqHighlightPosition(viz_id, {}, {})
}}, function (err) {{
console.log(err);
}})""".format(position, data)
d.display(d.Javascript(js))
def saliency(self, attr_method: Optional[str] = 'grad_x_input', style="minimal", **kwargs):
"""
Explorable showing saliency of each token generation step.
Hovering-over or tapping an output token imposes a saliency map on other tokens
showing their importance as features to that prediction.
"""
position = self.n_input_tokens
importance_id = position - self.n_input_tokens
tokens = []
attribution = self.attribution[attr_method]
for idx, token in enumerate(self.tokens):
type = "input" if idx < self.n_input_tokens else 'output'
if idx < len(attribution[importance_id]):
imp = attribution[importance_id][idx]
else:
imp = 0
tokens.append({'token': token,
'token_id': int(self.token_ids[idx]),
'type': type,
'value': str(imp), # because json complains of floats
'position': idx
})
data = {
'tokens': tokens,
'attributions': [att.tolist() for att in attribution]
}
d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
d.display(d.HTML(filename=os.path.join(self._path, "html", "basic.html")))
# viz_id = 'viz_{}'.format(round(random.random() * 1000000))
if( style == "minimal"):
js = f"""
requirejs(['basic', 'ecco'], function(basic, ecco){{
const viz_id = basic.init()
// ecco.interactiveTokens(viz_id, {{}})
window.ecco[viz_id] = new ecco.MinimalHighlighter({{
parentDiv: viz_id,
data: {data},
preset: 'viridis'
}})
window.ecco[viz_id].init();
window.ecco[viz_id].selectFirstToken();
}}, function (err) {{
console.log(err);
}})"""
elif (style == "detailed"):
js = f"""
requirejs(['basic', 'ecco'], function(basic, ecco){{
const viz_id = basic.init()
window.ecco[viz_id] = ecco.interactiveTokens(viz_id, {data})
}}, function (err) {{
console.log(err);
}})"""
d.display(d.Javascript(js))
if 'printJson' in kwargs and kwargs['printJson']:
print(data)
return data
def _repr_html_(self, **kwargs):
# if util.type_of_script() == "jupyter":
self.explorable(**kwargs)
return '<OutputSeq>'
# else:
# return "<OutputSeq Generated tokens: {}. \nFull sentence:'{}' \n# of lm outputus: {}\nTokens:\n{}>" \
# .format(self.tokens[self.n_input_tokens:],
# self.output_text,
# len(self.outputs),
# ', '.join(["{}:'{}'".format(idx, t) for idx, t in enumerate(self.tokens)]))
def plot_feature_importance_barplots(self):
"""
Barplot showing the improtance of each input token. Prints one barplot
for each generated token.
TODO: This should be LMOutput I think
:return:
"""
printable_tokens = [repr(token) for token in self.tokens]
for i in self.importance:
importance = i.numpy()
lm_plots.token_barplot(printable_tokens, importance)
# print(i.numpy())
plt.show()
def layer_predictions(self, position: int = 0, topk: Optional[int] = 10, layer: Optional[int] = None, **kwargs):
"""
Visualization plotting the topk predicted tokens after each layer (using its hidden state).
:param output: OutputSeq object generated by LM.generate()
:param position: The index of the output token to trace
:param topk: Number of tokens to show for each layer
:param layer: None shows all layers. Can also pass an int with the layer id to show only that layer
"""
watch = self.to(torch.tensor([self.token_ids[self.n_input_tokens]]))
# There is one lm output per generated token. To get the index
output_index = position - self.n_input_tokens
if layer is not None:
hidden_states = self.hidden_states[layer + 1].unsqueeze(0)
else:
hidden_states = self.hidden_states[1:] # Ignore the first element (embedding)
k = topk
top_tokens = []
probs = []
data = []
print('Predictions for position {}'.format(position))
for layer_no, h in enumerate(hidden_states):
# print(h.shape)
hidden_state = h[position - 1]
# Use lm_head to project the layer's hidden state to output vocabulary
logits = self.lm_head(hidden_state)
softmax = F.softmax(logits, dim=-1)
sorted_softmax = self.to(torch.argsort(softmax))
# Not currently used. If we're "watching" a specific token, this gets its ranking
# idx = sorted_softmax.shape[0] - torch.nonzero((sorted_softmax == watch)).flatten()
layer_top_tokens = [self.tokenizer.decode([t]) for t in sorted_softmax[-k:]][::-1]
top_tokens.append(layer_top_tokens)
layer_probs = softmax[sorted_softmax[-k:]].cpu().detach().numpy()[::-1]
probs.append(layer_probs.tolist())
# Package in output format
layer_data = []
for idx, (token, prob) in enumerate(zip(layer_top_tokens, layer_probs)):
# print(layer_no, idx, token)
layer_num = layer if layer is not None else layer_no
layer_data.append({'token': token,
'prob': str(prob),
'ranking': idx + 1,
'layer': layer_num
})
data.append(layer_data)
d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
d.display(d.HTML(filename=os.path.join(self._path, "html", "basic.html")))
viz_id = 'viz_{}'.format(round(random.random() * 1000000))
js = f"""
requirejs(['basic', 'ecco'], function(basic, ecco){{
const viz_id = basic.init()
let pred = new ecco.LayerPredictions({{
parentDiv: viz_id,
data:{json.dumps(data)}
}})
pred.init()
}}, function (err) {{
console.log(viz_id, err);
}})"""
d.display(d.Javascript(js))
if 'printJson' in kwargs and kwargs['printJson']:
print(data)
return data
def rankings(self, **kwargs):
"""
Plots the rankings (across layers) of the tokens the model selected.
Each column is a position in the sequence. Each row is a layer.
"""
hidden_states = self.hidden_states
n_layers = len(hidden_states)
position = hidden_states[0].shape[0] - self.n_input_tokens + 1
# print('position', position)
predicted_tokens = np.empty((n_layers - 1, position), dtype='U25')
rankings = np.zeros((n_layers - 1, position), dtype=np.int32)
token_found_mask = np.ones((n_layers - 1, position))
# loop through layer levels
for i, level in enumerate(hidden_states[1:]):
# Loop through generated/output positions
for j, hidden_state in enumerate(level[self.n_input_tokens - 1:]):
# print('hidden state layer', i, 'position', self.n_input_tokens-1+j)
# Project hidden state to vocabulary
# (after debugging pain: ensure input is on GPU, if appropriate)
logits = self.lm_head(hidden_state)
# logits = self.lm_head(torch.tensor(hidden_state))
# Sort by score (ascending)
sorted = torch.argsort(logits)
# What token was sampled in this position?
token_id = torch.tensor(self.token_ids[self.n_input_tokens + j])
# print('token_id', token_id)
# What's the index of the sampled token in the sorted list?
r = torch.nonzero((sorted == token_id)).flatten()
# subtract to get ranking (where 1 is the top scoring, because sorting was in ascending order)
ranking = sorted.shape[0] - r
# print('ranking', ranking)
# token_id = torch.argmax(sm)
token = self.tokenizer.decode([token_id])
predicted_tokens[i, j] = token
rankings[i, j] = int(ranking)
# print('layer', i, 'position', j, 'top1', token_id, 'actual label', output['token_ids'][j]+1)
if token_id == self.token_ids[j + 1]:
token_found_mask[i, j] = 0
input_tokens = [repr(t) for t in self.tokens[self.n_input_tokens - 1:-1]]
output_tokens = [repr(t) for t in self.tokens[self.n_input_tokens:]]
# print('in out', input_tokens, output_tokens)
lm_plots.plot_inner_token_rankings(input_tokens,
output_tokens,
rankings,
**kwargs)
if 'printJson' in kwargs and kwargs['printJson']:
data = {'input_tokens': input_tokens,
'output_tokens': output_tokens,
'rankings': rankings,
'predicted_tokens': predicted_tokens}
print(data)
return data
def rankings_watch(self, watch: List[int] = None, position: int = -1, **kwargs):
"""
Plots the rankings of the tokens whose ids are supplied in the watch list.
Only considers one position.
"""
if position != -1:
position = position - 1 # e.g. position 5 corresponds to activation 4
hidden_states = self.hidden_states
n_layers = len(hidden_states)
n_tokens_to_watch = len(watch)
# predicted_tokens = np.empty((n_layers - 1, n_tokens_to_watch), dtype='U25')
rankings = np.zeros((n_layers - 1, n_tokens_to_watch), dtype=np.int32)
# loop through layer levels
for i, level in enumerate(hidden_states[1:]): # Skip the embedding layer
# Loop through generated/output positions
for j, token_id in enumerate(watch):
hidden_state = level[position]
# Project hidden state to vocabulary
# (after debugging pain: ensure input is on GPU, if appropriate)
logits = self.lm_head(hidden_state)
# logits = lmhead(torch.tensor(hidden_state))
# Sort by score (ascending)
sorted = torch.argsort(logits)
# What token was sampled in this position?
token_id = torch.tensor(token_id)
# print('token_id', token_id)
# What's the index of the sampled token in the sorted list?
r = torch.nonzero((sorted == token_id)).flatten()
# subtract to get ranking (where 1 is the top scoring, because sorting was in ascending order)
ranking = sorted.shape[0] - r
# print('ranking', ranking)
# token_id = torch.argmax(sm)
# token = self.tokenizer.decode([token_id])
# predicted_tokens[i, j] = token
rankings[i, j] = int(ranking)
# print('layer', i, 'position', j, 'top1', token_id, 'actual label', output['token_ids'][j]+1)
# if token_id == self.token_ids[j + 1]:
# token_found_mask[i, j] = 0
input_tokens = [t for t in self.tokens]
output_tokens = [repr(self.tokenizer.decode(t)) for t in watch]
# print('in out', input_tokens, output_tokens)
lm_plots.plot_inner_token_rankings_watch(input_tokens,
output_tokens,
rankings)
if 'printJson' in kwargs and kwargs['printJson']:
data = {'input_tokens': input_tokens,
'output_tokens': output_tokens,
'rankings': rankings}
print(data)
return data
def run_nmf(self, **kwargs):
"""
Run Non-negative Matrix Factorization on network activations of FFNN.
Saves the components in self.components
"""
return NMF(self.activations,
n_input_tokens=self.n_input_tokens,
token_ids=self.token_ids,
_path=self._path,
tokens=self.tokens, **kwargs)
def attention(self, attention_values=None, layer=0, **kwargs):
position = self.n_input_tokens
# importance_id = position - self.n_input_tokens
importance_id = self.n_input_tokens - 1 # Sete first values to first output token
tokens = []
if attention_values:
attn = attention_values
else:
attn = self.attention_values[layer]
# normalize attention heads
attn = attn.sum(axis=1) / attn.shape[1]
for idx, token in enumerate(self.tokens):
# print(idx, attn.shape)
type = "input" if idx < self.n_input_tokens else 'output'
if idx < len(attn[0][importance_id]):
attention_value = attn[0][importance_id][idx].cpu().detach().numpy()
else:
attention_value = 0
tokens.append({'token': token,
'token_id': int(self.token_ids[idx]),
'type': type,
'value': str(attention_value), # because json complains of floats
'position': idx
})
data = {
'tokens': tokens,
'attributions': [att.tolist() for att in attn[0].cpu().detach().numpy()]
}
d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
d.display(d.HTML(filename=os.path.join(self._path, "html", "basic.html")))
viz_id = 'viz_{}'.format(round(random.random() * 1000000))
js = """
requirejs(['basic', 'ecco'], function(basic, ecco){{
const viz_id = basic.init()
ecco.interactiveTokens(viz_id, {})
}}, function (err) {{
console.log(err);
}})""".format(data)
d.display(d.Javascript(js))
if 'printJson' in kwargs and kwargs['printJson']:
print(data)
class NMF:
" Conducts NMF and holds the models and components "
def __init__(self, activations: np.ndarray,
n_input_tokens: int = 0,
token_ids: torch.Tensor = torch.Tensor(0),
_path: str = '',
n_components: int = 10,
# from_layer: Optional[int] = None,
# to_layer: Optional[int] = None,
tokens: Optional[List[str]] = None,
**kwargs):
self._path = _path
self.token_ids = token_ids
self.n_input_tokens = n_input_tokens
from_layer = kwargs['from_layer'] if 'from_layer' in kwargs else None
to_layer = kwargs['to_layer'] if 'to_layer' in kwargs else None
if len(activations.shape) != 3:
raise ValueError(f"The 'activations' parameter should have three dimensions: (layers, neurons, positions). "
f"Supplied dimensions: {activations.shape}", 'activations')
if from_layer is not None or to_layer is not None:
from_layer = from_layer if from_layer is not None else 0
to_layer = to_layer if to_layer is not None else activations.shape[0]
if from_layer == to_layer:
raise ValueError(f"from_layer ({from_layer}) and to_layer ({to_layer}) cannot be the same value. "
"They must be apart by at least one to allow for a layer of activations.")
if from_layer > to_layer:
raise ValueError(f"from_layer ({from_layer}) cannot be larger than to_layer ({to_layer}).")
else:
from_layer = 0
to_layer = activations.shape[0]
merged_act = np.concatenate(activations[from_layer: to_layer], axis=0)
activations = np.expand_dims(merged_act, axis=0)
self.tokens = tokens
" Run NMF. Activations is neuron activations shaped (layers, neurons, positions)"
n_output_tokens = activations.shape[-1]
n_layers = activations.shape[0]
n_components = min([n_components, n_output_tokens])
components = np.zeros((n_layers, n_components, n_output_tokens))
models = []
# Get rid of negative activation values
# (There are some, because GPT2 uses GLEU, which allow small negative values)
activations = np.maximum(activations, 0)
# print(activations.shape)
for idx, layer in enumerate(activations):
# print(layer.shape)
model = decomposition.NMF(n_components=n_components,
init='random',
random_state=0,
max_iter=500)
components[idx] = model.fit_transform(layer.T).T
models.append(model)
self.models = models
self.components = components
def explore(self, **kwargs):
# position = self.n_input_tokens + 1
tokens = []
for idx, token in enumerate(self.tokens): # self.tokens[:-1]
type = "input" if idx < self.n_input_tokens else 'output'
tokens.append({'token': token,
'token_id': int(self.token_ids[idx]),
'type': type,
# 'value': str(components[0][comp_num][idx]), # because json complains of floats
'position': idx
})
# Duplicate the factor at index 'n_input_tokens'. THis way
# each token has an activation value (instead of having one activation less than tokens)
# But with different meanings: For inputs, the activation is a response
# For outputs, the activation is a cause
# print('shape', components.shape)
# for i, comp in enumerate(components[0]):
# print(i, comp, '\nconcat:', np.concatenate([comp[:self.n_input_tokens], comp[self.n_input_tokens-1:]]))
factors = np.array(
[[np.concatenate([comp[:self.n_input_tokens], comp[self.n_input_tokens - 1:]]) for comp in
self.components[0]]])
factors = [comp.tolist() for comp in factors]
data = {
'tokens': tokens,
'factors': factors
}
d.display(d.HTML(filename=os.path.join(self._path, "html", "setup.html")))
d.display(d.HTML(filename=os.path.join(self._path, "html", "basic.html")))
viz_id = 'viz_{}'.format(round(random.random() * 1000000))
# print(data)
js = """
requirejs(['basic', 'ecco'], function(basic, ecco){{
const viz_id = basic.init()
ecco.interactiveTokensAndFactorSparklines(viz_id, {})
}}, function (err) {{
console.log(err);
}})""".format(data)
d.display(d.Javascript(js))
if 'printJson' in kwargs and kwargs['printJson']:
print(data)
def plot(self, n_components=3):
for idx, comp in enumerate(self.components):
# print('Layer {} components'.format(idx), 'Variance: {}'.format(lm.variances[idx][:n_components]))
print('Layer {} components'.format(idx))
comp = comp[:n_components, :].T
# plt.figure(figsize=(16,2))
fig, ax1 = plt.subplots(1)
plt.subplots_adjust(wspace=.4)
fig.set_figheight(2)
fig.set_figwidth(17)
# fig.tight_layout()
# PCA Line plot
ax1.plot(comp)
ax1.set_xticks(range(len(self.tokens)))
ax1.set_xticklabels(self.tokens, rotation=-90)
ax1.legend(['Component {}'.format(i + 1) for i in range(n_components)], loc='center left',
bbox_to_anchor=(1.01, 0.5))
plt.show()
| [
"torch.nn.functional.softmax",
"ecco.lm_plots.plot_inner_token_rankings",
"IPython.display.Javascript",
"ecco.lm_plots.plot_inner_token_rankings_watch",
"json.dumps",
"torch.argsort",
"numpy.empty",
"numpy.concatenate",
"numpy.maximum",
"sklearn.decomposition.NMF",
"numpy.ones",
"torch.Tensor"... | [((1292, 1322), 'os.path.dirname', 'os.path.dirname', (['ecco.__file__'], {}), '(ecco.__file__)\n', (1307, 1322), False, 'import os\n'), ((11693, 11740), 'numpy.empty', 'np.empty', (['(n_layers - 1, position)'], {'dtype': '"""U25"""'}), "((n_layers - 1, position), dtype='U25')\n", (11701, 11740), True, 'import numpy as np\n'), ((11760, 11810), 'numpy.zeros', 'np.zeros', (['(n_layers - 1, position)'], {'dtype': 'np.int32'}), '((n_layers - 1, position), dtype=np.int32)\n', (11768, 11810), True, 'import numpy as np\n'), ((11838, 11871), 'numpy.ones', 'np.ones', (['(n_layers - 1, position)'], {}), '((n_layers - 1, position))\n', (11845, 11871), True, 'import numpy as np\n'), ((13689, 13776), 'ecco.lm_plots.plot_inner_token_rankings', 'lm_plots.plot_inner_token_rankings', (['input_tokens', 'output_tokens', 'rankings'], {}), '(input_tokens, output_tokens, rankings,\n **kwargs)\n', (13723, 13776), False, 'from ecco import util, lm_plots\n'), ((14779, 14838), 'numpy.zeros', 'np.zeros', (['(n_layers - 1, n_tokens_to_watch)'], {'dtype': 'np.int32'}), '((n_layers - 1, n_tokens_to_watch), dtype=np.int32)\n', (14787, 14838), True, 'import numpy as np\n'), ((16547, 16626), 'ecco.lm_plots.plot_inner_token_rankings_watch', 'lm_plots.plot_inner_token_rankings_watch', (['input_tokens', 'output_tokens', 'rankings'], {}), '(input_tokens, output_tokens, rankings)\n', (16587, 16626), False, 'from ecco import util, lm_plots\n'), ((19502, 19517), 'torch.Tensor', 'torch.Tensor', (['(0)'], {}), '(0)\n', (19514, 19517), False, 'import torch\n'), ((21015, 21071), 'numpy.concatenate', 'np.concatenate', (['activations[from_layer:to_layer]'], {'axis': '(0)'}), '(activations[from_layer:to_layer], axis=0)\n', (21029, 21071), True, 'import numpy as np\n'), ((21095, 21129), 'numpy.expand_dims', 'np.expand_dims', (['merged_act'], {'axis': '(0)'}), '(merged_act, axis=0)\n', (21109, 21129), True, 'import numpy as np\n'), ((21419, 21470), 'numpy.zeros', 'np.zeros', (['(n_layers, n_components, n_output_tokens)'], {}), '((n_layers, n_components, n_output_tokens))\n', (21427, 21470), True, 'import numpy as np\n'), ((21648, 21674), 'numpy.maximum', 'np.maximum', (['activations', '(0)'], {}), '(activations, 0)\n', (21658, 21674), True, 'import numpy as np\n'), ((2534, 2550), 'IPython.display.Javascript', 'd.Javascript', (['js'], {}), '(js)\n', (2546, 2550), True, 'from IPython import display as d\n'), ((4435, 4451), 'IPython.display.Javascript', 'd.Javascript', (['js'], {}), '(js)\n', (4447, 4451), True, 'from IPython import display as d\n'), ((6857, 6873), 'IPython.display.Javascript', 'd.Javascript', (['js'], {}), '(js)\n', (6869, 6873), True, 'from IPython import display as d\n'), ((7915, 7967), 'ecco.lm_plots.token_barplot', 'lm_plots.token_barplot', (['printable_tokens', 'importance'], {}), '(printable_tokens, importance)\n', (7937, 7967), False, 'from ecco import util, lm_plots\n'), ((8011, 8021), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8019, 8021), True, 'import matplotlib.pyplot as plt\n'), ((8589, 8640), 'torch.tensor', 'torch.tensor', (['[self.token_ids[self.n_input_tokens]]'], {}), '([self.token_ids[self.n_input_tokens]])\n', (8601, 8640), False, 'import torch\n'), ((9393, 9418), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (9402, 9418), True, 'from torch.nn import functional as F\n'), ((11141, 11157), 'IPython.display.Javascript', 'd.Javascript', (['js'], {}), '(js)\n', (11153, 11157), True, 'from IPython import display as d\n'), ((19197, 19213), 'IPython.display.Javascript', 'd.Javascript', (['js'], {}), '(js)\n', (19209, 19213), True, 'from IPython import display as d\n'), ((21818, 21911), 'sklearn.decomposition.NMF', 'decomposition.NMF', ([], {'n_components': 'n_components', 'init': '"""random"""', 'random_state': '(0)', 'max_iter': '(500)'}), "(n_components=n_components, init='random', random_state=0,\n max_iter=500)\n", (21835, 21911), False, 'from sklearn import decomposition\n'), ((24118, 24134), 'IPython.display.Javascript', 'd.Javascript', (['js'], {}), '(js)\n', (24130, 24134), True, 'from IPython import display as d\n'), ((24592, 24607), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (24604, 24607), True, 'import matplotlib.pyplot as plt\n'), ((24620, 24651), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.4)'}), '(wspace=0.4)\n', (24639, 24651), True, 'import matplotlib.pyplot as plt\n'), ((25087, 25097), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25095, 25097), True, 'import matplotlib.pyplot as plt\n'), ((9456, 9478), 'torch.argsort', 'torch.argsort', (['softmax'], {}), '(softmax)\n', (9469, 9478), False, 'import torch\n'), ((10995, 11011), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (11005, 11011), False, 'import json\n'), ((12505, 12526), 'torch.argsort', 'torch.argsort', (['logits'], {}), '(logits)\n', (12518, 12526), False, 'import torch\n'), ((12613, 12666), 'torch.tensor', 'torch.tensor', (['self.token_ids[self.n_input_tokens + j]'], {}), '(self.token_ids[self.n_input_tokens + j])\n', (12625, 12666), False, 'import torch\n'), ((15425, 15446), 'torch.argsort', 'torch.argsort', (['logits'], {}), '(logits)\n', (15438, 15446), False, 'import torch\n'), ((15533, 15555), 'torch.tensor', 'torch.tensor', (['token_id'], {}), '(token_id)\n', (15545, 15555), False, 'import torch\n'), ((2058, 2104), 'os.path.join', 'os.path.join', (['self._path', '"""html"""', '"""setup.html"""'], {}), "(self._path, 'html', 'setup.html')\n", (2070, 2104), False, 'import os\n'), ((2141, 2187), 'os.path.join', 'os.path.join', (['self._path', '"""html"""', '"""basic.html"""'], {}), "(self._path, 'html', 'basic.html')\n", (2153, 2187), False, 'import os\n'), ((2229, 2244), 'random.random', 'random.random', ([], {}), '()\n', (2242, 2244), False, 'import random\n'), ((3939, 3985), 'os.path.join', 'os.path.join', (['self._path', '"""html"""', '"""setup.html"""'], {}), "(self._path, 'html', 'setup.html')\n", (3951, 3985), False, 'import os\n'), ((4022, 4068), 'os.path.join', 'os.path.join', (['self._path', '"""html"""', '"""basic.html"""'], {}), "(self._path, 'html', 'basic.html')\n", (4034, 4068), False, 'import os\n'), ((4110, 4125), 'random.random', 'random.random', ([], {}), '()\n', (4123, 4125), False, 'import random\n'), ((5695, 5741), 'os.path.join', 'os.path.join', (['self._path', '"""html"""', '"""setup.html"""'], {}), "(self._path, 'html', 'setup.html')\n", (5707, 5741), False, 'import os\n'), ((5778, 5824), 'os.path.join', 'os.path.join', (['self._path', '"""html"""', '"""basic.html"""'], {}), "(self._path, 'html', 'basic.html')\n", (5790, 5824), False, 'import os\n'), ((10539, 10585), 'os.path.join', 'os.path.join', (['self._path', '"""html"""', '"""setup.html"""'], {}), "(self._path, 'html', 'setup.html')\n", (10551, 10585), False, 'import os\n'), ((10622, 10668), 'os.path.join', 'os.path.join', (['self._path', '"""html"""', '"""basic.html"""'], {}), "(self._path, 'html', 'basic.html')\n", (10634, 10668), False, 'import os\n'), ((10710, 10725), 'random.random', 'random.random', ([], {}), '()\n', (10723, 10725), False, 'import random\n'), ((18724, 18770), 'os.path.join', 'os.path.join', (['self._path', '"""html"""', '"""setup.html"""'], {}), "(self._path, 'html', 'setup.html')\n", (18736, 18770), False, 'import os\n'), ((18807, 18853), 'os.path.join', 'os.path.join', (['self._path', '"""html"""', '"""basic.html"""'], {}), "(self._path, 'html', 'basic.html')\n", (18819, 18853), False, 'import os\n'), ((18895, 18910), 'random.random', 'random.random', ([], {}), '()\n', (18908, 18910), False, 'import random\n'), ((23302, 23378), 'numpy.concatenate', 'np.concatenate', (['[comp[:self.n_input_tokens], comp[self.n_input_tokens - 1:]]'], {}), '([comp[:self.n_input_tokens], comp[self.n_input_tokens - 1:]])\n', (23316, 23378), True, 'import numpy as np\n'), ((23605, 23651), 'os.path.join', 'os.path.join', (['self._path', '"""html"""', '"""setup.html"""'], {}), "(self._path, 'html', 'setup.html')\n", (23617, 23651), False, 'import os\n'), ((23688, 23734), 'os.path.join', 'os.path.join', (['self._path', '"""html"""', '"""basic.html"""'], {}), "(self._path, 'html', 'basic.html')\n", (23700, 23734), False, 'import os\n'), ((23776, 23791), 'random.random', 'random.random', ([], {}), '()\n', (23789, 23791), False, 'import random\n'), ((12809, 12842), 'torch.nonzero', 'torch.nonzero', (['(sorted == token_id)'], {}), '(sorted == token_id)\n', (12822, 12842), False, 'import torch\n'), ((15698, 15731), 'torch.nonzero', 'torch.nonzero', (['(sorted == token_id)'], {}), '(sorted == token_id)\n', (15711, 15731), False, 'import torch\n')] |
# Copyright (c) 2020 zfit
import functools
import math as _mt
from collections import defaultdict
from typing import Any, Callable
import numpy as np
import tensorflow as tf
from ..settings import ztypes
from ..util.exception import BreakingAPIChangeError
from ..util.warnings import warn_advanced_feature
def constant(value, dtype=ztypes.float, shape=None, name="Const", verify_shape=None):
# TODO(tf2): remove this legacy thing below
if verify_shape is not None:
raise RuntimeError("'verify_shape' is not a valid argument anymore. It's always true. Please remove.")
return tf.constant(value, dtype=dtype, shape=shape, name=name)
pi = np.float64(_mt.pi)
def to_complex(number, dtype=ztypes.complex):
return tf.cast(number, dtype=dtype)
def to_real(x, dtype=ztypes.float):
return tf.cast(x, dtype=dtype)
def abs_square(x):
return tf.math.real(x * tf.math.conj(x))
def nth_pow(x, n, name=None):
"""Calculate the nth power of the complex Tensor x.
Args:
x:
n: Power of x, has to be a positive int
name: No effect, for API compatibility with tf.pow
"""
if not n >= 0:
raise ValueError("n (power) has to be >= 0. Currently, n={}".format(n))
power = to_complex(1.)
for _ in range(n):
power *= x
return power
def unstack_x(value: Any, num: Any = None, axis: int = -1, always_list: bool = False, name: str = "unstack_x"):
"""Unstack a Data object and return a list of (or a single) tensors in the right order.
Args:
value:
num:
axis:
always_list: If True, also return a list if only one element.
name:
Returns:
Union[List[tensorflow.python.framework.ops.Tensor], tensorflow.python.framework.ops.Tensor, None]:
"""
if isinstance(value, list):
if len(value) == 1 and not always_list:
value = value[0]
return value
try:
return value.unstack_x(always_list=always_list)
except AttributeError:
unstacked_x = tf.unstack(value=value, num=num, axis=axis, name=name)
if len(unstacked_x) == 1 and not always_list:
unstacked_x = unstacked_x[0]
return unstacked_x
def stack_x(values, axis: int = -1, name: str = "stack_x"):
return tf.stack(values=values, axis=axis, name=name)
# random sampling
def convert_to_tensor(value, dtype=None, name=None, preferred_dtype=None):
return tf.convert_to_tensor(value=value, dtype=dtype, name=name, dtype_hint=preferred_dtype)
def safe_where(condition: tf.Tensor, func: Callable, safe_func: Callable, values: tf.Tensor,
value_safer: Callable = tf.ones_like) -> tf.Tensor:
"""Like :py:func:`tf.where` but fixes gradient `NaN` if func produces `NaN` with certain `values`.
Args:
condition: Same argument as to :py:func:`tf.where`, a boolean :py:class:`tf.Tensor`
func: Function taking `values` as argument and returning the tensor _in case
condition is True_. Equivalent `x` of :py:func:`tf.where` but as function.
safe_func: Function taking `values` as argument and returning the tensor
_in case the condition is False_, Equivalent `y` of :py:func:`tf.where` but as function.
values: Values to be evaluated either by `func` or `safe_func` depending on
`condition`.
value_safer: Function taking `values` as arguments and returns "safe" values
that won't cause troubles when given to`func` or by taking the gradient with respect
to `func(value_safer(values))`.
Returns:
:py:class:`tf.Tensor`:
"""
safe_x = tf.where(condition=condition, x=values, y=value_safer(values))
result = tf.where(condition=condition, x=func(safe_x), y=safe_func(values))
return result
def run_no_nan(func, x):
from zfit.core.data import Data
value_with_nans = func(x=x)
if value_with_nans.dtype in (tf.complex128, tf.complex64):
value_with_nans = tf.math.real(value_with_nans) + tf.math.imag(value_with_nans) # we care only about NaN or not
finite_bools = tf.math.is_finite(tf.cast(value_with_nans, dtype=tf.float64))
finite_indices = tf.where(finite_bools)
new_x = tf.gather_nd(params=x, indices=finite_indices)
new_x = Data.from_tensor(obs=x.obs, tensor=new_x)
vals_no_nan = func(x=new_x)
result = tf.scatter_nd(indices=finite_indices, updates=vals_no_nan,
shape=tf.shape(input=value_with_nans, out_type=finite_indices.dtype))
return result
class FunctionWrapperRegistry:
all_wrapped_functions = []
registries = []
allow_jit = True
_DEFAULT_DO_JIT_TYPES = defaultdict(lambda: True)
_DEFAULT_DO_JIT_TYPES.update({
None: True,
'model': False,
'loss': True,
'sample': True,
'model_sampling': True,
'zfit_tensor': True,
'tensor': True,
})
do_jit_types = _DEFAULT_DO_JIT_TYPES.copy()
@classmethod
def all_wrapped_functions_registered(cls):
return all((func.zfit_graph_cache_registered for func in cls.all_wrapped_functions))
def __init__(self, wraps=None, **kwargs_user) -> None:
"""`tf.function`-like decorator with additional cache-invalidation functionality.
Args:
**kwargs_user: arguments to `tf.function`
"""
super().__init__()
self._initial_user_kwargs = kwargs_user
self.registries.append(self)
self.wrapped_func = None
if not wraps in self.do_jit_types:
# raise RuntimeError(f"Currently custom 'wraps' category ({wraps}) not allowed, set explicitly in `do_jit_types`")
self.do_jit_types[wraps] = True
self.wraps = wraps
self.function_cache = defaultdict(list)
self.reset(**self._initial_user_kwargs)
self.currently_traced = set()
@property
def do_jit(self):
return self.do_jit_types[self.wraps] and self.allow_jit
def reset(self, **kwargs_user):
kwargs = dict(autograph=False, experimental_relax_shapes=True)
kwargs.update(self._initial_user_kwargs)
kwargs.update(kwargs_user)
self.tf_function = tf.function(**kwargs)
for cache in self.function_cache.values():
cache.clear()
def __call__(self, func):
wrapped_func = self.tf_function(func)
cache = self.function_cache[func]
from ..util.cache import FunctionCacheHolder
def concrete_func(*args, **kwargs):
if not self.do_jit or func in self.currently_traced:
return func(*args, **kwargs)
assert self.all_wrapped_functions_registered()
self.currently_traced.add(func)
nonlocal wrapped_func
function_holder = FunctionCacheHolder(func, wrapped_func, args, kwargs)
if function_holder in cache:
func_holder_index = cache.index(function_holder)
func_holder_cached = cache[func_holder_index]
if func_holder_cached.is_valid:
function_holder = func_holder_cached
else:
wrapped_func = self.tf_function(func) # update nonlocal wrapped function
function_holder = FunctionCacheHolder(func, wrapped_func, args, kwargs)
cache[func_holder_index] = function_holder
else:
cache.append(function_holder)
func_to_run = function_holder.wrapped_func
try:
result = func_to_run(*args, **kwargs)
finally:
self.currently_traced.remove(func)
return result
concrete_func.zfit_graph_cache_registered = False
return concrete_func
# equivalent to tf.function
def function(func=None, **kwargs):
if callable(func):
wrapper = FunctionWrapperRegistry()
return wrapper(func)
elif func:
raise ValueError("All argument have to be key-word only. `func` must not be used")
else:
return FunctionWrapperRegistry(**kwargs)
# legacy, remove 0.6
def function_tf_input(*_, **__):
raise BreakingAPIChangeError("This function has been removed. Use `z.function(wraps='zfit_tensor') or your"
"own category")
# legacy, remove 0.6
def function_sampling(*_, **__):
raise BreakingAPIChangeError("This function has been removed. Use `z.function(wraps='zfit_sampling') or your"
"own category")
@functools.wraps(tf.py_function)
def py_function(func, inp, Tout, name=None):
from .. import settings
if not settings.options['numerical_grad']:
warn_advanced_feature("Using py_function without numerical gradients. If the Python code does not contain any"
" parametrization by `zfit.Parameter` or similar, this can work out. Otherwise, in case"
" it depends on those, you may want to set `zfit.run.set_autograd_mode(=False)`.",
identifier="py_func_autograd")
return tf.py_function(func=func, inp=inp, Tout=Tout, name=name)
| [
"tensorflow.unstack",
"tensorflow.math.imag",
"tensorflow.shape",
"zfit.core.data.Data.from_tensor",
"numpy.float64",
"tensorflow.py_function",
"functools.wraps",
"tensorflow.math.conj",
"tensorflow.where",
"tensorflow.constant",
"collections.defaultdict",
"tensorflow.function",
"tensorflow.... | [((663, 681), 'numpy.float64', 'np.float64', (['_mt.pi'], {}), '(_mt.pi)\n', (673, 681), True, 'import numpy as np\n'), ((8528, 8559), 'functools.wraps', 'functools.wraps', (['tf.py_function'], {}), '(tf.py_function)\n', (8543, 8559), False, 'import functools\n'), ((600, 655), 'tensorflow.constant', 'tf.constant', (['value'], {'dtype': 'dtype', 'shape': 'shape', 'name': 'name'}), '(value, dtype=dtype, shape=shape, name=name)\n', (611, 655), True, 'import tensorflow as tf\n'), ((741, 769), 'tensorflow.cast', 'tf.cast', (['number'], {'dtype': 'dtype'}), '(number, dtype=dtype)\n', (748, 769), True, 'import tensorflow as tf\n'), ((819, 842), 'tensorflow.cast', 'tf.cast', (['x'], {'dtype': 'dtype'}), '(x, dtype=dtype)\n', (826, 842), True, 'import tensorflow as tf\n'), ((2274, 2319), 'tensorflow.stack', 'tf.stack', ([], {'values': 'values', 'axis': 'axis', 'name': 'name'}), '(values=values, axis=axis, name=name)\n', (2282, 2319), True, 'import tensorflow as tf\n'), ((2428, 2518), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ([], {'value': 'value', 'dtype': 'dtype', 'name': 'name', 'dtype_hint': 'preferred_dtype'}), '(value=value, dtype=dtype, name=name, dtype_hint=\n preferred_dtype)\n', (2448, 2518), True, 'import tensorflow as tf\n'), ((4180, 4202), 'tensorflow.where', 'tf.where', (['finite_bools'], {}), '(finite_bools)\n', (4188, 4202), True, 'import tensorflow as tf\n'), ((4215, 4261), 'tensorflow.gather_nd', 'tf.gather_nd', ([], {'params': 'x', 'indices': 'finite_indices'}), '(params=x, indices=finite_indices)\n', (4227, 4261), True, 'import tensorflow as tf\n'), ((4274, 4315), 'zfit.core.data.Data.from_tensor', 'Data.from_tensor', ([], {'obs': 'x.obs', 'tensor': 'new_x'}), '(obs=x.obs, tensor=new_x)\n', (4290, 4315), False, 'from zfit.core.data import Data\n'), ((4668, 4694), 'collections.defaultdict', 'defaultdict', (['(lambda : True)'], {}), '(lambda : True)\n', (4679, 4694), False, 'from collections import defaultdict\n'), ((9104, 9160), 'tensorflow.py_function', 'tf.py_function', ([], {'func': 'func', 'inp': 'inp', 'Tout': 'Tout', 'name': 'name'}), '(func=func, inp=inp, Tout=Tout, name=name)\n', (9118, 9160), True, 'import tensorflow as tf\n'), ((4115, 4157), 'tensorflow.cast', 'tf.cast', (['value_with_nans'], {'dtype': 'tf.float64'}), '(value_with_nans, dtype=tf.float64)\n', (4122, 4157), True, 'import tensorflow as tf\n'), ((5767, 5784), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5778, 5784), False, 'from collections import defaultdict\n'), ((6192, 6213), 'tensorflow.function', 'tf.function', ([], {}), '(**kwargs)\n', (6203, 6213), True, 'import tensorflow as tf\n'), ((892, 907), 'tensorflow.math.conj', 'tf.math.conj', (['x'], {}), '(x)\n', (904, 907), True, 'import tensorflow as tf\n'), ((2036, 2090), 'tensorflow.unstack', 'tf.unstack', ([], {'value': 'value', 'num': 'num', 'axis': 'axis', 'name': 'name'}), '(value=value, num=num, axis=axis, name=name)\n', (2046, 2090), True, 'import tensorflow as tf\n'), ((3983, 4012), 'tensorflow.math.real', 'tf.math.real', (['value_with_nans'], {}), '(value_with_nans)\n', (3995, 4012), True, 'import tensorflow as tf\n'), ((4015, 4044), 'tensorflow.math.imag', 'tf.math.imag', (['value_with_nans'], {}), '(value_with_nans)\n', (4027, 4044), True, 'import tensorflow as tf\n'), ((4453, 4515), 'tensorflow.shape', 'tf.shape', ([], {'input': 'value_with_nans', 'out_type': 'finite_indices.dtype'}), '(input=value_with_nans, out_type=finite_indices.dtype)\n', (4461, 4515), True, 'import tensorflow as tf\n')] |
import numpy as np
import matplotlib.pyplot as plt
import cvxpy as cvx
from scipy.linalg import circulant
from scipy.stats import norm
import seaborn as sns
import pandas as pd
from scipy.integrate import solve_ivp
from scipy.spatial.distance import cdist
def BinaryRandomMatrix(S,M,p):
r = np.random.rand(S,M)
m = np.zeros((S,M))
m[r<p] = 1.0
return m
def MakeAffinities(params):
sampling = params['sampling']
if sampling == 'Binary':
pix = BinaryRandomMatrix(params['Num_tcell'],params['Num_sites'],params['pval_cell'])
palphax = (params['c'] + np.random.normal(0,params['sigma_cp'],(params['Num_treg'],params['Num_sites']) ) )* BinaryRandomMatrix(params['Num_treg'],params['Num_sites'],params['pval_treg'])
elif sampling == '1D':
circ = circulant(norm.pdf(np.linspace(-params['Num_sites']/2,params['Num_sites']/2,params['Num_sites'])/params['niche_width'])/norm.pdf(0))
Tcell_choice = np.random.choice(params['Num_sites'],size=params['Num_tcell'],replace=True)
Treg_choice = np.random.choice(params['Num_sites'],size=params['Num_treg'],replace=True)
pix = circ[Tcell_choice,:]
palphax = params['c']*circ[Treg_choice,:]
elif sampling == 'Multidimensional':
antigens = np.random.randn(params['Num_sites'],params['shape_dim'])
receptors = np.random.randn(params['Num_tcell'],params['shape_dim'])
receptors_reg = np.random.randn(params['Num_treg'],params['shape_dim'])
pix = np.exp(-cdist(receptors,antigens,'sqeuclidean')/(2*params['sigma']**2))
palphax = params['c']*np.exp(-cdist(receptors_reg,antigens,'sqeuclidean')/(2*params['sigma']**2))
elif sampling == 'Circulant':
circ = circulant(norm.pdf(np.linspace(-params['Num_sites']/2,params['Num_sites']/2,params['Num_sites'])/params['niche_width']))
pix = circ[np.linspace(0,params['Num_sites']-1,params['Num_tcell'],dtype=int),:]
palphax = params['c']*circ[np.linspace(0,params['Num_sites']-1,params['Num_treg'],dtype=int),:]
elif sampling == 'Fixed_degree':
pix = BinaryRandomMatrix(params['Num_tcell'],params['Num_sites'],params['pval_cell'])
palphax = np.zeros((params['Num_treg'],params['Num_sites']))
degree = np.asarray(params['degree']+np.random.randn(params['Num_sites'])*params['sigma_degree'],dtype=int)
for i in range(params['Num_sites']):
palphax[:degree[i],i] = params['c']*np.ones(degree[i])+np.random.randn(degree[i])*params['sigma_c']
np.random.shuffle(palphax[:,i])
else:
print('Invalid sampling choice. Valid choices are Binary, 1D, Circulant or Fixed_degree.')
pix = np.nan
palphax = np.nan
return pix, palphax
def MakeOverlaps(pix,palphax,vx):
phi_reg_reg = (palphax*vx).dot(palphax.T)
phi_cell_reg = (pix*vx).dot(palphax.T)
rvals = pix.dot(vx)
return phi_reg_reg, phi_cell_reg, rvals
def TrainNetwork(phi_reg_reg,phi_cell_reg,rvals):
Num_treg = len(phi_reg_reg)
Num_tcell = len(phi_cell_reg)
Treg = cvx.Variable(Num_treg)
G = np.vstack((-(phi_cell_reg.T/rvals).T,-np.eye(Num_treg)))
h = np.hstack((-np.ones(Num_tcell),np.zeros(Num_treg)))
constraints = [G@Treg <= h]
obj = cvx.Minimize((1/2)*cvx.quad_form(Treg,phi_reg_reg))
prob = cvx.Problem(obj, constraints)
prob.solve(solver=cvx.ECOS,abstol=1e-7,feastol=1e-7,abstol_inacc=1e-7,feastol_inacc=1e-7,max_iters=100,verbose=False)
Tcell=constraints[0].dual_value[:Num_tcell]/rvals
Treg=Treg.value
return Tcell,Treg
def ddt_simple(t,y,phi_reg_reg,phi_cell_reg,rvals):
Num_treg = len(phi_reg_reg)
Num_tcell = len(phi_cell_reg)
Tcell = y[:Num_tcell]
Treg = y[Num_tcell:]
dTcelldt = Tcell*(rvals-phi_cell_reg.dot(Treg))
dTregdt = Treg*(phi_cell_reg.T.dot(Tcell) - phi_reg_reg.dot(Treg))
return np.hstack((dTcelldt, dTregdt))
def ddt_full(t,y,pix,palphax,vx):
Num_treg = len(palphax)
Num_tcell = len(pix)
Tcell = y[:Num_tcell]
Treg = y[Num_tcell:]
Qx = palphax.T.dot(Treg)
ILx = (pix.T.dot(Tcell))/(palphax.T.dot(Treg))
dTcelldt = Tcell*pix.dot(vx*(1-Qx))
dTregdt = Treg*palphax.dot(vx*(ILx-1))
return np.hstack((dTcelldt, dTregdt)) | [
"numpy.random.normal",
"cvxpy.Variable",
"cvxpy.Problem",
"numpy.eye",
"numpy.random.rand",
"numpy.ones",
"numpy.hstack",
"numpy.random.choice",
"scipy.spatial.distance.cdist",
"numpy.zeros",
"numpy.linspace",
"scipy.stats.norm.pdf",
"cvxpy.quad_form",
"numpy.random.randn",
"numpy.random... | [((297, 317), 'numpy.random.rand', 'np.random.rand', (['S', 'M'], {}), '(S, M)\n', (311, 317), True, 'import numpy as np\n'), ((325, 341), 'numpy.zeros', 'np.zeros', (['(S, M)'], {}), '((S, M))\n', (333, 341), True, 'import numpy as np\n'), ((3057, 3079), 'cvxpy.Variable', 'cvx.Variable', (['Num_treg'], {}), '(Num_treg)\n', (3069, 3079), True, 'import cvxpy as cvx\n'), ((3310, 3339), 'cvxpy.Problem', 'cvx.Problem', (['obj', 'constraints'], {}), '(obj, constraints)\n', (3321, 3339), True, 'import cvxpy as cvx\n'), ((3872, 3902), 'numpy.hstack', 'np.hstack', (['(dTcelldt, dTregdt)'], {}), '((dTcelldt, dTregdt))\n', (3881, 3902), True, 'import numpy as np\n'), ((4227, 4257), 'numpy.hstack', 'np.hstack', (['(dTcelldt, dTregdt)'], {}), '((dTcelldt, dTregdt))\n', (4236, 4257), True, 'import numpy as np\n'), ((953, 1030), 'numpy.random.choice', 'np.random.choice', (["params['Num_sites']"], {'size': "params['Num_tcell']", 'replace': '(True)'}), "(params['Num_sites'], size=params['Num_tcell'], replace=True)\n", (969, 1030), True, 'import numpy as np\n'), ((1051, 1127), 'numpy.random.choice', 'np.random.choice', (["params['Num_sites']"], {'size': "params['Num_treg']", 'replace': '(True)'}), "(params['Num_sites'], size=params['Num_treg'], replace=True)\n", (1067, 1127), True, 'import numpy as np\n'), ((3184, 3202), 'numpy.zeros', 'np.zeros', (['Num_treg'], {}), '(Num_treg)\n', (3192, 3202), True, 'import numpy as np\n'), ((3266, 3298), 'cvxpy.quad_form', 'cvx.quad_form', (['Treg', 'phi_reg_reg'], {}), '(Treg, phi_reg_reg)\n', (3279, 3298), True, 'import cvxpy as cvx\n'), ((591, 678), 'numpy.random.normal', 'np.random.normal', (['(0)', "params['sigma_cp']", "(params['Num_treg'], params['Num_sites'])"], {}), "(0, params['sigma_cp'], (params['Num_treg'], params[\n 'Num_sites']))\n", (607, 678), True, 'import numpy as np\n'), ((1271, 1328), 'numpy.random.randn', 'np.random.randn', (["params['Num_sites']", "params['shape_dim']"], {}), "(params['Num_sites'], params['shape_dim'])\n", (1286, 1328), True, 'import numpy as np\n'), ((1348, 1405), 'numpy.random.randn', 'np.random.randn', (["params['Num_tcell']", "params['shape_dim']"], {}), "(params['Num_tcell'], params['shape_dim'])\n", (1363, 1405), True, 'import numpy as np\n'), ((1429, 1485), 'numpy.random.randn', 'np.random.randn', (["params['Num_treg']", "params['shape_dim']"], {}), "(params['Num_treg'], params['shape_dim'])\n", (1444, 1485), True, 'import numpy as np\n'), ((3126, 3142), 'numpy.eye', 'np.eye', (['Num_treg'], {}), '(Num_treg)\n', (3132, 3142), True, 'import numpy as np\n'), ((3165, 3183), 'numpy.ones', 'np.ones', (['Num_tcell'], {}), '(Num_tcell)\n', (3172, 3183), True, 'import numpy as np\n'), ((917, 928), 'scipy.stats.norm.pdf', 'norm.pdf', (['(0)'], {}), '(0)\n', (925, 928), False, 'from scipy.stats import norm\n'), ((2190, 2241), 'numpy.zeros', 'np.zeros', (["(params['Num_treg'], params['Num_sites'])"], {}), "((params['Num_treg'], params['Num_sites']))\n", (2198, 2241), True, 'import numpy as np\n'), ((816, 904), 'numpy.linspace', 'np.linspace', (["(-params['Num_sites'] / 2)", "(params['Num_sites'] / 2)", "params['Num_sites']"], {}), "(-params['Num_sites'] / 2, params['Num_sites'] / 2, params[\n 'Num_sites'])\n", (827, 904), True, 'import numpy as np\n'), ((1507, 1548), 'scipy.spatial.distance.cdist', 'cdist', (['receptors', 'antigens', '"""sqeuclidean"""'], {}), "(receptors, antigens, 'sqeuclidean')\n", (1512, 1548), False, 'from scipy.spatial.distance import cdist\n'), ((1866, 1937), 'numpy.linspace', 'np.linspace', (['(0)', "(params['Num_sites'] - 1)", "params['Num_tcell']"], {'dtype': 'int'}), "(0, params['Num_sites'] - 1, params['Num_tcell'], dtype=int)\n", (1877, 1937), True, 'import numpy as np\n'), ((2526, 2558), 'numpy.random.shuffle', 'np.random.shuffle', (['palphax[:, i]'], {}), '(palphax[:, i])\n', (2543, 2558), True, 'import numpy as np\n'), ((1609, 1654), 'scipy.spatial.distance.cdist', 'cdist', (['receptors_reg', 'antigens', '"""sqeuclidean"""'], {}), "(receptors_reg, antigens, 'sqeuclidean')\n", (1614, 1654), False, 'from scipy.spatial.distance import cdist\n'), ((1745, 1833), 'numpy.linspace', 'np.linspace', (["(-params['Num_sites'] / 2)", "(params['Num_sites'] / 2)", "params['Num_sites']"], {}), "(-params['Num_sites'] / 2, params['Num_sites'] / 2, params[\n 'Num_sites'])\n", (1756, 1833), True, 'import numpy as np\n'), ((1971, 2041), 'numpy.linspace', 'np.linspace', (['(0)', "(params['Num_sites'] - 1)", "params['Num_treg']"], {'dtype': 'int'}), "(0, params['Num_sites'] - 1, params['Num_treg'], dtype=int)\n", (1982, 2041), True, 'import numpy as np\n'), ((2286, 2322), 'numpy.random.randn', 'np.random.randn', (["params['Num_sites']"], {}), "(params['Num_sites'])\n", (2301, 2322), True, 'import numpy as np\n'), ((2450, 2468), 'numpy.ones', 'np.ones', (['degree[i]'], {}), '(degree[i])\n', (2457, 2468), True, 'import numpy as np\n'), ((2469, 2495), 'numpy.random.randn', 'np.random.randn', (['degree[i]'], {}), '(degree[i])\n', (2484, 2495), True, 'import numpy as np\n')] |
# Distributed DL Server runs on worker nodes
# @author: <NAME>
# @created date: 2021-06-28
# @last modified date: 2021-09-03
# @note:
import asyncio
import gc
import numpy as np
import sys
import time
from asyncio import StreamReader, StreamWriter
from pympler import asizeof
from tensorflow.keras import datasets, layers, models, optimizers, utils
from tensorflow.keras.models import *
from textwrap import dedent
from ddlf.iworker import *
from ddlf.request import *
from ddlf.response import *
from ddlf.status import *
from ddlf.tools import *
from ddlf.transport import *
class Worker(IWorker):
def __init__(self, id: int=1, N: int=1, host='localhost', port=8888):
self.id = id
self.N = N #number of workers
self.host = host
self.port = port
self.server = None
self.loop = None
self.n = 80 # the length of separators
# data can clean
self.x_train = None
self.y_train = None
self.x_test = None
self.y_test = None
self.x_train_partition = None
self.y_train_partition = None
self.model: Sequential = None
self.data = {} #used to store any data
def start(self):
self.loop = asyncio.get_event_loop()
self.server = self.loop.run_until_complete(asyncio.start_server(self.control, self.host, self.port))
print(f'The worker {self.id} ({self.host}:{self.port}) is already...')
try:
self.loop.run_forever()
except KeyboardInterrupt as e:
print(e)
except Exception as e:
print(e)
self.loop.close()
print(f'The worker {self.id} ({self.host}:{self.port}) has shut down. See you again!')
async def control(self, reader: StreamReader, writer: StreamWriter):
peer = writer.get_extra_info('peername')
print('-' * self.n, f"\nOpen the connection with {peer}")
shutdown_flag = False
loop = True
while loop:
# receive a request
req = await recv_message(reader)
# handle the request
loop, res, shutdown_flag = await self.handle(req)
# show the result
# print(f'cmd:{req.command}, response:{res}')
# send the result to the client
await send_message(res, writer)
# close the connection
print('-' * self.n, f"\nClose the connection with {peer}")
writer.close()
if shutdown_flag:
await self.shutdown()
async def handle(self, req: Request):
shutdown_flag = False
loop = True
# print(f"Handling the request {req.command!r}...")
# processing the request
if req.command == 'add_method':
res = await self.add_method(**req.kwargs)
if req.command == 'clean':
res = await self.clean()
elif req.command == 'close':
loop = False
res = await self.close()
elif req.command == 'load_cifar10':
res = await self.load_cifar10()
elif req.command == 'load_mnist':
res = await self.load_mnist()
elif req.command == 'load_partition':
res = await self.load_partition(**req.kwargs)
elif req.command == 'ping':
res = await self.ping()
elif req.command == 'remove_method':
res = await self.remove_method(method_name=req.args[0])
elif req.command == 'run':
res = await self.run(method_name=req.args[0], **req.kwargs)
elif req.command == 'run_code':
res = await self.run_code(code=req.args[0])
elif req.command == 'run_method':
res = await self.run_method(method_code=req.args[0], method_name=req.args[1], **req.kwargs)
elif req.command == 'show_data':
res = await self.show_data()
elif req.command == 'shutdown':
shutdown_flag = True
loop = False
res = Response(Status.OK, None)
elif req.command == 'train':
res = await self.train(**req.kwargs)
# print(f"Finished handling the request {req.command!r}.")
return loop, res, shutdown_flag
async def add_method(self, method_code, method_name):
print('-'*self.n,f'\nExecuting add_method({method_name})...')
# re-indentation
method_code = dedent(method_code)
# print(f"Method name: {method_name}")
# print(f"Method length: {len(method_code)}")
# print(f"Method code:\n{method_code}")
try:
code = f'''{method_code}\nsetattr(Worker, {method_name!r}, {method_name})'''
exec(code)
print(f'Finished executing add_method({method_name}).')
return Response(Status.OK, None)
except Exception as e:
print(f'Exception when executing add_method({method_name}):', e)
return Response(Status.ERROR, e)
async def clean(self):
print('-' * self.n, '\nExecuting clean()...')
try:
self.x_train = None
self.y_train = None
self.x_test = None
self.y_test = None
self.x_train_partition = None
self.y_train_partition = None
self.model: Sequential = None
self.data.clear()
gc.collect()
print('Finished executing clean().')
return Response(Status.OK, None)
except Exception as e:
print('Exception when executing clean():', e)
return Response(Status.ERROR, e)
async def close(self):
return Response(Status.OK, None)
async def load_cifar10(self):
print('-' * self.n, '\nExecuting load_cifar10()...')
nb_classes = 10
input_shape = (32, 32, 3)
try:
(self.x_train, self.y_train), (self.x_test, self.y_test) = datasets.cifar10.load_data()
# Normalize pixel values from [0, 255] to [-0.5, 0.5] to to make it easier to work with
self.x_train, self.x_test = (self.x_train / 255.0) - 0.5, (self.x_test / 255.0) - 0.5
# Convert class vectors to binary class matrices (for using with loss='categorical_crossentropy')
self.y_train = utils.to_categorical(self.y_train, nb_classes)
self.y_test = utils.to_categorical(self.y_test, nb_classes)
print('Finished executing load_cifar10().')
return Response(Status.OK, None)
except Exception as e:
print('Exception when executing load_cifar10():', e)
return Response(Status.ERROR, e)
async def load_mnist(self):
print('-' * self.n, '\nExecuting load_mnist()...')
nb_classes = 10
input_shape = (28, 28, 1)
try:
(self.x_train, self.y_train), (self.x_test, self.y_test) = datasets.mnist.load_data()
# Make sure images have shape (28, 28, 1)
self.x_train = np.expand_dims(self.x_train, -1)
self.x_test = np.expand_dims(self.x_test, -1)
# Normalize pixel values from [0, 255] to [-0.5, 0.5] to to make it easier to work with
self.x_train, self.x_test = (self.x_train / 255.0) - 0.5, (self.x_test / 255.0) - 0.5
# Convert class vectors to binary class matrices (for using with loss='categorical_crossentropy')
self.y_train = utils.to_categorical(self.y_train, nb_classes)
self.y_test = utils.to_categorical(self.y_test, nb_classes)
print('Finished executing load_mnist().')
return Response(Status.OK, None)
except Exception as e:
print('Exception when executing load_mnist():', e)
return Response(Status.ERROR, e)
async def load_partition(self, permutation):
"""
Load a partition of data for training the the model on a worker node.
"""
try:
n = partition_size = len(self.x_train) // self.N
i = self.id
self.x_train_partition = self.x_train[permutation][(i - 1) * n:i * n]
self.y_train_partition = self.y_train[permutation][(i - 1) * n:i * n]
return Response(Status.OK, None)
except Exception as e:
return Response(Status.ERROR, e)
async def ping(self):
print('-' * self.n, '\nExecuting ping()...')
return Response(Status.OK, None)
async def remove_method(self, method_name):
print('-' * self.n, f'\nExecuting remove_method({method_name})...')
try:
delattr(Worker, method_name)
print(f'Finished executing remove_method({method_name}).')
return Response(Status.OK, None)
except Exception as e:
print(f'Exception when executing remove_method({method_name}):', e)
return Response(Status.ERROR, e)
async def run(self, method_name, **kwargs):
# method_name = kwargs['method_name']
# args = { key:value for (key,value) in kwargs.items() if key not in ['method_name']}
print('-' * self.n, f'\nExecuting run({method_name})...')
print(f"Method name: {method_name}")
try:
code = f'''setattr(Worker, '_method', self.{method_name})'''
exec(code)
result = await self._method(**kwargs)
print(f'Finished executing run({method_name}).')
return Response(Status.OK, result)
except Exception as e:
print(f'Exception when executing run({method_name}):', e)
return Response(Status.ERROR, e)
async def run_code(self, code):
print('-' * self.n, '\nExecuting run_code()...')
try:
# re-indentation
code = dedent(code)
# print(f"Code length: {len(code)}")
# print(f"Code:\n{code}")
exec(code)
print('Finished executing run_code().')
return Response(Status.OK, None)
except Exception as e:
print('Exception when executing run_code():', e)
return Response(Status.ERROR, e)
async def run_method(self, method_code, method_name, **kwargs):
print('-' * self.n, f'\nExecuting run_method({method_name})...')
# re-indentation
method_code = dedent(method_code)
# print(f"Method name: {method_name}")
# print(f"Method length: {len(method_code)}")
# print(f"Method code:\n{method_code}")
try:
code = f'''{method_code}\nsetattr(Worker, '_method', {method_name})'''
exec(code)
result = await self._method(**kwargs)
# locals = {}
# code = f'''res = self.{method_name}(**kwargs)'''
# exec(code, None, locals)
print(f'Finished executing run_method({method_name}).')
return Response(Status.OK, result)
except Exception as e:
print(f'Exception when executing run_method({method_name}):', e)
return Response(Status.ERROR, e)
async def show_data(self):
print('-' * self.n, '\nExecuting show_data()...')
return Response(Status.OK, self.data)
async def shutdown(self):
print('-' * self.n, '\nExecuting shutdown()...')
await asyncio.sleep(0)
print("The master requests shutdown")
self.server.close()
print("The internal server has closed")
self.loop.stop()
print("The event loop has stopped")
print('Finished executing shutdown().')
async def train(self, weights, worker_epochs, batch_size):
"""
Train the model on a worker node.
"""
try:
self.model.set_weights(weights)
self.model.fit(self.x_train_partition, self.y_train_partition,
epochs=worker_epochs, batch_size=batch_size,
validation_split=0.1, verbose=2)
new_weights = self.model.get_weights()
gradients = subtract(weights, new_weights)
gradients = divide(gradients, self.N)
return Response(Status.OK, gradients)
except Exception as e:
print(f'Exception when executing train(weights={weights}, worker_epochs={worker_epochs}, batch_size={batch_size}):', e)
return Response(Status.ERROR, e)
# worker = Worker(id=int(sys.argv[1]), host=sys.argv[2], N=int(sys.argv[3]))
# worker.start()
# for test only
# worker = Worker(id=1, host='localhost', N=1)
# worker.start()
# from multiprocessing import Process
#
# def main(i):
# # hosts = ['192.168.146.1', '192.168.44.1', '192.168.1.8']
# worker = Worker(id=i+1, host='localhost', N=3, port=8888+i)
# worker.start()
# # worker = Worker(id=i+1, host=hosts[i], N=3)
# # worker.start()
#
# processes = []
# for i in range(3):
# p = Process(target=main, args=[i])
# processes.append(p)
#
# if __name__ == '__main__':
# for p in processes:
# p.start()
# for p in processes:
# p.join() | [
"textwrap.dedent",
"tensorflow.keras.utils.to_categorical",
"asyncio.sleep",
"tensorflow.keras.datasets.mnist.load_data",
"asyncio.start_server",
"tensorflow.keras.datasets.cifar10.load_data",
"gc.collect",
"numpy.expand_dims",
"asyncio.get_event_loop"
] | [((1222, 1246), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1244, 1246), False, 'import asyncio\n'), ((4360, 4379), 'textwrap.dedent', 'dedent', (['method_code'], {}), '(method_code)\n', (4366, 4379), False, 'from textwrap import dedent\n'), ((10228, 10247), 'textwrap.dedent', 'dedent', (['method_code'], {}), '(method_code)\n', (10234, 10247), False, 'from textwrap import dedent\n'), ((1298, 1354), 'asyncio.start_server', 'asyncio.start_server', (['self.control', 'self.host', 'self.port'], {}), '(self.control, self.host, self.port)\n', (1318, 1354), False, 'import asyncio\n'), ((5309, 5321), 'gc.collect', 'gc.collect', ([], {}), '()\n', (5319, 5321), False, 'import gc\n'), ((5857, 5885), 'tensorflow.keras.datasets.cifar10.load_data', 'datasets.cifar10.load_data', ([], {}), '()\n', (5883, 5885), False, 'from tensorflow.keras import datasets, layers, models, optimizers, utils\n'), ((6221, 6267), 'tensorflow.keras.utils.to_categorical', 'utils.to_categorical', (['self.y_train', 'nb_classes'], {}), '(self.y_train, nb_classes)\n', (6241, 6267), False, 'from tensorflow.keras import datasets, layers, models, optimizers, utils\n'), ((6294, 6339), 'tensorflow.keras.utils.to_categorical', 'utils.to_categorical', (['self.y_test', 'nb_classes'], {}), '(self.y_test, nb_classes)\n', (6314, 6339), False, 'from tensorflow.keras import datasets, layers, models, optimizers, utils\n'), ((6816, 6842), 'tensorflow.keras.datasets.mnist.load_data', 'datasets.mnist.load_data', ([], {}), '()\n', (6840, 6842), False, 'from tensorflow.keras import datasets, layers, models, optimizers, utils\n'), ((6924, 6956), 'numpy.expand_dims', 'np.expand_dims', (['self.x_train', '(-1)'], {}), '(self.x_train, -1)\n', (6938, 6956), True, 'import numpy as np\n'), ((6983, 7014), 'numpy.expand_dims', 'np.expand_dims', (['self.x_test', '(-1)'], {}), '(self.x_test, -1)\n', (6997, 7014), True, 'import numpy as np\n'), ((7350, 7396), 'tensorflow.keras.utils.to_categorical', 'utils.to_categorical', (['self.y_train', 'nb_classes'], {}), '(self.y_train, nb_classes)\n', (7370, 7396), False, 'from tensorflow.keras import datasets, layers, models, optimizers, utils\n'), ((7423, 7468), 'tensorflow.keras.utils.to_categorical', 'utils.to_categorical', (['self.y_test', 'nb_classes'], {}), '(self.y_test, nb_classes)\n', (7443, 7468), False, 'from tensorflow.keras import datasets, layers, models, optimizers, utils\n'), ((9682, 9694), 'textwrap.dedent', 'dedent', (['code'], {}), '(code)\n', (9688, 9694), False, 'from textwrap import dedent\n'), ((11200, 11216), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (11213, 11216), False, 'import asyncio\n')] |
import faiss
import torch
import logging
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Subset
# Compute R@1, R@5, R@10, R@20
RECALL_VALUES = [1, 5, 10, 20]
def test(args, eval_ds, model):
"""Compute descriptors of the given dataset and compute the recalls."""
model = model.eval()
with torch.no_grad():
logging.debug("Extracting database descriptors for evaluation/testing")
database_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num)))
database_dataloader = DataLoader(dataset=database_subset_ds, num_workers=args.num_workers,
batch_size=args.infer_batch_size, pin_memory=(args.device=="cuda"))
all_descriptors = np.empty((len(eval_ds), args.fc_output_dim), dtype="float32")
for images, indices in tqdm(database_dataloader, ncols=100):
descriptors = model(images.to(args.device))
descriptors = descriptors.cpu().numpy()
all_descriptors[indices.numpy(), :] = descriptors
logging.debug("Extracting queries descriptors for evaluation/testing using batch size 1")
queries_infer_batch_size = 1
queries_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num, eval_ds.database_num+eval_ds.queries_num)))
queries_dataloader = DataLoader(dataset=queries_subset_ds, num_workers=args.num_workers,
batch_size=queries_infer_batch_size, pin_memory=(args.device=="cuda"))
for images, indices in tqdm(queries_dataloader, ncols=100):
descriptors = model(images.to(args.device))
descriptors = descriptors.cpu().numpy()
all_descriptors[indices.numpy(), :] = descriptors
queries_descriptors = all_descriptors[eval_ds.database_num:]
database_descriptors = all_descriptors[:eval_ds.database_num]
# Use a kNN to find predictions
faiss_index = faiss.IndexFlatL2(args.fc_output_dim)
faiss_index.add(database_descriptors)
del database_descriptors, all_descriptors
logging.debug("Calculating recalls")
_, predictions = faiss_index.search(queries_descriptors, max(RECALL_VALUES))
#### For each query, check if the predictions are correct
positives_per_query = eval_ds.get_positives()
recalls = np.zeros(len(RECALL_VALUES))
for query_index, preds in enumerate(predictions):
for i, n in enumerate(RECALL_VALUES):
if np.any(np.in1d(preds[:n], positives_per_query[query_index])):
recalls[i:] += 1
break
# Divide by queries_num and multiply by 100, so the recalls are in percentages
recalls = recalls / eval_ds.queries_num * 100
recalls_str = ", ".join([f"R@{val}: {rec:.1f}" for val, rec in zip(RECALL_VALUES, recalls)])
return recalls, recalls_str
| [
"logging.debug",
"numpy.in1d",
"tqdm.tqdm",
"torch.no_grad",
"torch.utils.data.DataLoader",
"faiss.IndexFlatL2"
] | [((1997, 2034), 'faiss.IndexFlatL2', 'faiss.IndexFlatL2', (['args.fc_output_dim'], {}), '(args.fc_output_dim)\n', (2014, 2034), False, 'import faiss\n'), ((2132, 2168), 'logging.debug', 'logging.debug', (['"""Calculating recalls"""'], {}), "('Calculating recalls')\n", (2145, 2168), False, 'import logging\n'), ((380, 395), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (393, 395), False, 'import torch\n'), ((405, 476), 'logging.debug', 'logging.debug', (['"""Extracting database descriptors for evaluation/testing"""'], {}), "('Extracting database descriptors for evaluation/testing')\n", (418, 476), False, 'import logging\n'), ((587, 727), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'database_subset_ds', 'num_workers': 'args.num_workers', 'batch_size': 'args.infer_batch_size', 'pin_memory': "(args.device == 'cuda')"}), "(dataset=database_subset_ds, num_workers=args.num_workers,\n batch_size=args.infer_batch_size, pin_memory=args.device == 'cuda')\n", (597, 727), False, 'from torch.utils.data import DataLoader\n'), ((883, 919), 'tqdm.tqdm', 'tqdm', (['database_dataloader'], {'ncols': '(100)'}), '(database_dataloader, ncols=100)\n', (887, 919), False, 'from tqdm import tqdm\n'), ((1108, 1202), 'logging.debug', 'logging.debug', (['"""Extracting queries descriptors for evaluation/testing using batch size 1"""'], {}), "(\n 'Extracting queries descriptors for evaluation/testing using batch size 1')\n", (1121, 1202), False, 'import logging\n'), ((1385, 1527), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'queries_subset_ds', 'num_workers': 'args.num_workers', 'batch_size': 'queries_infer_batch_size', 'pin_memory': "(args.device == 'cuda')"}), "(dataset=queries_subset_ds, num_workers=args.num_workers,\n batch_size=queries_infer_batch_size, pin_memory=args.device == 'cuda')\n", (1395, 1527), False, 'from torch.utils.data import DataLoader\n'), ((1595, 1630), 'tqdm.tqdm', 'tqdm', (['queries_dataloader'], {'ncols': '(100)'}), '(queries_dataloader, ncols=100)\n', (1599, 1630), False, 'from tqdm import tqdm\n'), ((2532, 2584), 'numpy.in1d', 'np.in1d', (['preds[:n]', 'positives_per_query[query_index]'], {}), '(preds[:n], positives_per_query[query_index])\n', (2539, 2584), True, 'import numpy as np\n')] |
from __future__ import division
from math import ceil
import numpy as np
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
from chainercv.experimental.links.model.pspnet.transforms import \
convolution_crop
from chainercv.links import Conv2DBNActiv
from chainercv.links.model.resnet import ResBlock
from chainercv.links import PickableSequentialChain
from chainercv import transforms
from chainercv import utils
_imagenet_mean = np.array(
(123.68, 116.779, 103.939), dtype=np.float32)[:, None, None]
class PyramidPoolingModule(chainer.ChainList):
def __init__(self, in_channels, feat_size, pyramids, initialW=None):
out_channels = in_channels // len(pyramids)
super(PyramidPoolingModule, self).__init__(
Conv2DBNActiv(
in_channels, out_channels, 1, 1, 0, 1, initialW=initialW),
Conv2DBNActiv(
in_channels, out_channels, 1, 1, 0, 1, initialW=initialW),
Conv2DBNActiv(
in_channels, out_channels, 1, 1, 0, 1, initialW=initialW),
Conv2DBNActiv(
in_channels, out_channels, 1, 1, 0, 1, initialW=initialW),
)
kh = feat_size[0] // np.array(pyramids)
kw = feat_size[1] // np.array(pyramids)
self.ksizes = list(zip(kh, kw))
def __call__(self, x):
ys = [x]
H, W = x.shape[2:]
for f, ksize in zip(self, self.ksizes):
y = F.average_pooling_2d(x, ksize, ksize)
y = f(y)
y = F.resize_images(y, (H, W))
ys.append(y)
return F.concat(ys, axis=1)
class DilatedResNet(PickableSequentialChain):
_blocks = {
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
}
_models = {
50: {
'imagenet': {
'url': 'https://chainercv-models.preferred.jp/'
'pspnet_resnet50_imagenet_trained_2018_11_26.npz',
'cv2': True
},
},
101: {
'imagenet': {
'url': 'https://chainercv-models.preferred.jp/'
'pspnet_resnet101_imagenet_trained_2018_11_26.npz',
'cv2': True
},
},
}
def __init__(self, n_layer, pretrained_model=None,
initialW=None):
n_block = self._blocks[n_layer]
_, path = utils.prepare_pretrained_model(
{},
pretrained_model,
self._models[n_layer])
super(DilatedResNet, self).__init__()
with self.init_scope():
self.conv1_1 = Conv2DBNActiv(
None, 64, 3, 2, 1, 1, initialW=initialW)
self.conv1_2 = Conv2DBNActiv(
64, 64, 3, 1, 1, 1, initialW=initialW)
self.conv1_3 = Conv2DBNActiv(
64, 128, 3, 1, 1, 1, initialW=initialW)
self.pool1 = lambda x: F.max_pooling_2d(
x, ksize=3, stride=2, pad=1)
self.res2 = ResBlock(
n_block[0], 128, 64, 256, 1, 1,
initialW=initialW, stride_first=False)
self.res3 = ResBlock(
n_block[1], 256, 128, 512, 2, 1,
initialW=initialW, stride_first=False)
self.res4 = ResBlock(
n_block[2], 512, 256, 1024, 1, 2,
initialW=initialW, stride_first=False)
self.res5 = ResBlock(
n_block[3], 1024, 512, 2048, 1, 4,
initialW=initialW, stride_first=False)
if path:
chainer.serializers.load_npz(path, self, ignore_names=None)
class PSPNet(chainer.Chain):
"""Pyramid Scene Parsing Network.
This is a PSPNet [#]_ model for semantic segmentation. This is based on
the implementation found here_.
.. [#] <NAME>, <NAME>, <NAME>, <NAME> \
<NAME> "Pyramid Scene Parsing Network" \
CVPR, 2017
.. _here: https://github.com/hszhao/PSPNet
Args:
n_class (int): The number of channels in the last convolution layer.
pretrained_model (string): The weight file to be loaded.
This can take :obj:`'cityscapes'`, `filepath` or :obj:`None`.
The default value is :obj:`None`.
* :obj:`'cityscapes'`: Load weights trained on the train split of \
Cityscapes dataset. \
:obj:`n_class` must be :obj:`19` or :obj:`None`.
* :obj:`'imagenet'`: Load ImageNet pretrained weights for \
the extractor.
* `filepath`: A path of npz file. In this case, :obj:`n_class` \
must be specified properly.
* :obj:`None`: Do not load weights.
input_size (tuple): The size of the input.
This value is :math:`(height, width)`.
initialW (callable): Initializer for the weights of
convolution kernels.
"""
def __init__(self, n_class=None, pretrained_model=None,
input_size=None, initialW=None):
super(PSPNet, self).__init__()
if pretrained_model == 'imagenet':
extractor_pretrained_model = 'imagenet'
pretrained_model = None
else:
extractor_pretrained_model = None
param, path = utils.prepare_pretrained_model(
{'n_class': n_class, 'input_size': input_size},
pretrained_model, self._models,
default={'input_size': (713, 713)})
n_class = param['n_class']
input_size = param['input_size']
if not isinstance(input_size, (list, tuple)):
input_size = (int(input_size), int(input_size))
self.input_size = input_size
if initialW is None:
if pretrained_model:
initialW = initializers.constant.Zero()
kwargs = self._extractor_kwargs
kwargs.update({'pretrained_model': extractor_pretrained_model,
'initialW': initialW})
extractor = self._extractor_cls(**kwargs)
extractor.pick = self._extractor_pick
self.scales = None
self.mean = _imagenet_mean
pyramids = [6, 3, 2, 1]
feat_size = (input_size[0] // 8, input_size[1] // 8)
with self.init_scope():
self.extractor = extractor
self.ppm = PyramidPoolingModule(
2048, feat_size, pyramids, initialW=initialW)
self.head_conv1 = Conv2DBNActiv(
4096, 512, 3, 1, 1, initialW=initialW)
self.head_conv2 = L.Convolution2D(
512, n_class, 1, 1, 0, False, initialW)
if path:
chainer.serializers.load_npz(path, self)
@property
def n_class(self):
return self.head_conv2.out_channels
def __call__(self, x):
_, res5 = self.extractor(x)
h = self.ppm(res5)
h = self.head_conv1(h)
h = self.head_conv2(h)
h = F.resize_images(h, x.shape[2:])
return h
def _tile_predict(self, img):
if self.mean is not None:
img = img - self.mean
ori_H, ori_W = img.shape[1:]
long_size = max(ori_H, ori_W)
if long_size > max(self.input_size):
stride_rate = 2 / 3
stride = (int(ceil(self.input_size[0] * stride_rate)),
int(ceil(self.input_size[1] * stride_rate)))
imgs, param = convolution_crop(
img, self.input_size, stride, return_param=True)
counts = self.xp.zeros((1, ori_H, ori_W), dtype=np.float32)
preds = self.xp.zeros((1, self.n_class, ori_H, ori_W),
dtype=np.float32)
N = len(param['y_slices'])
for i in range(N):
img_i = imgs[i:i+1]
y_slice = param['y_slices'][i]
x_slice = param['x_slices'][i]
crop_y_slice = param['crop_y_slices'][i]
crop_x_slice = param['crop_x_slices'][i]
scores_i = self._predict(img_i)
# Flip horizontally flipped score maps again
flipped_scores_i = self._predict(
img_i[:, :, :, ::-1])[:, :, :, ::-1]
preds[0, :, y_slice, x_slice] +=\
scores_i[0, :, crop_y_slice, crop_x_slice]
preds[0, :, y_slice, x_slice] +=\
flipped_scores_i[0, :, crop_y_slice, crop_x_slice]
counts[0, y_slice, x_slice] += 2
scores = preds / counts[:, None]
else:
img, param = transforms.resize_contain(
img, self.input_size, return_param=True)
preds1 = self._predict(img[np.newaxis])
preds2 = self._predict(img[np.newaxis, :, :, ::-1])
preds = (preds1 + preds2[:, :, :, ::-1]) / 2
y_start = param['y_offset']
y_end = y_start + param['scaled_size'][0]
x_start = param['x_offset']
x_end = x_start + param['scaled_size'][1]
scores = preds[:, :, y_start:y_end, x_start:x_end]
scores = F.resize_images(scores, (ori_H, ori_W))[0].array
return scores
def _predict(self, imgs):
xs = chainer.Variable(self.xp.asarray(imgs))
with chainer.using_config('train', False):
scores = F.softmax(self(xs)).array
return scores
def predict(self, imgs):
"""Conduct semantic segmentation from images.
Args:
imgs (iterable of numpy.ndarray): Arrays holding images.
All images are in CHW and RGB format
and the range of their values are :math:`[0, 255]`.
Returns:
list of numpy.ndarray:
List of integer labels predicted from each image in the input \
list.
"""
labels = []
for img in imgs:
with chainer.using_config('train', False), \
chainer.function.no_backprop_mode():
if self.scales is not None:
scores = _multiscale_predict(
self._tile_predict, img, self.scales)
else:
scores = self._tile_predict(img)
labels.append(chainer.backends.cuda.to_cpu(
self.xp.argmax(scores, axis=0).astype(np.int32)))
return labels
class PSPNetResNet101(PSPNet):
"""PSPNet with Dilated ResNet101 as the feature extractor.
.. seealso::
:class:`chainercv.experimental.links.model.pspnet.PSPNet`
"""
_extractor_cls = DilatedResNet
_extractor_kwargs = {'n_layer': 101}
_extractor_pick = ('res4', 'res5')
_models = {
'cityscapes': {
'param': {'n_class': 19, 'input_size': (713, 713)},
'url': 'https://github.com/yuyu2172/share-weights/releases/'
'download/0.0.6/pspnet_resnet101_cityscapes_convert_2018_05_22.npz'
},
}
class PSPNetResNet50(PSPNet):
"""PSPNet with Dilated ResNet50 as the feature extractor.
.. seealso::
:class:`chainercv.experimental.links.model.pspnet.PSPNet`
"""
_extractor_cls = DilatedResNet
_extractor_kwargs = {'n_layer': 50}
_extractor_pick = ('res4', 'res5')
_models = {
}
def _multiscale_predict(predict_method, img, scales):
orig_H, orig_W = img.shape[1:]
scores = []
orig_img = img
for scale in scales:
img = orig_img.copy()
if scale != 1.0:
img = transforms.resize(
img, (int(orig_H * scale), int(orig_W * scale)))
# This method should return scores
y = predict_method(img)[None]
assert y.shape[2:] == img.shape[1:]
if scale != 1.0:
y = F.resize_images(y, (orig_H, orig_W)).array
scores.append(y)
xp = chainer.backends.cuda.get_array_module(scores[0])
scores = xp.stack(scores)
return scores.mean(0)[0] # (C, H, W)
| [
"chainercv.links.model.resnet.ResBlock",
"math.ceil",
"chainer.functions.concat",
"chainer.functions.average_pooling_2d",
"chainercv.transforms.resize_contain",
"chainer.functions.max_pooling_2d",
"numpy.array",
"chainer.backends.cuda.get_array_module",
"chainercv.links.Conv2DBNActiv",
"chainer.in... | [((492, 546), 'numpy.array', 'np.array', (['(123.68, 116.779, 103.939)'], {'dtype': 'np.float32'}), '((123.68, 116.779, 103.939), dtype=np.float32)\n', (500, 546), True, 'import numpy as np\n'), ((11780, 11829), 'chainer.backends.cuda.get_array_module', 'chainer.backends.cuda.get_array_module', (['scores[0]'], {}), '(scores[0])\n', (11818, 11829), False, 'import chainer\n'), ((1626, 1646), 'chainer.functions.concat', 'F.concat', (['ys'], {'axis': '(1)'}), '(ys, axis=1)\n', (1634, 1646), True, 'import chainer.functions as F\n'), ((2395, 2470), 'chainercv.utils.prepare_pretrained_model', 'utils.prepare_pretrained_model', (['{}', 'pretrained_model', 'self._models[n_layer]'], {}), '({}, pretrained_model, self._models[n_layer])\n', (2425, 2470), False, 'from chainercv import utils\n'), ((5256, 5411), 'chainercv.utils.prepare_pretrained_model', 'utils.prepare_pretrained_model', (["{'n_class': n_class, 'input_size': input_size}", 'pretrained_model', 'self._models'], {'default': "{'input_size': (713, 713)}"}), "({'n_class': n_class, 'input_size':\n input_size}, pretrained_model, self._models, default={'input_size': (\n 713, 713)})\n", (5286, 5411), False, 'from chainercv import utils\n'), ((6897, 6928), 'chainer.functions.resize_images', 'F.resize_images', (['h', 'x.shape[2:]'], {}), '(h, x.shape[2:])\n', (6912, 6928), True, 'import chainer.functions as F\n'), ((806, 877), 'chainercv.links.Conv2DBNActiv', 'Conv2DBNActiv', (['in_channels', 'out_channels', '(1)', '(1)', '(0)', '(1)'], {'initialW': 'initialW'}), '(in_channels, out_channels, 1, 1, 0, 1, initialW=initialW)\n', (819, 877), False, 'from chainercv.links import Conv2DBNActiv\n'), ((908, 979), 'chainercv.links.Conv2DBNActiv', 'Conv2DBNActiv', (['in_channels', 'out_channels', '(1)', '(1)', '(0)', '(1)'], {'initialW': 'initialW'}), '(in_channels, out_channels, 1, 1, 0, 1, initialW=initialW)\n', (921, 979), False, 'from chainercv.links import Conv2DBNActiv\n'), ((1010, 1081), 'chainercv.links.Conv2DBNActiv', 'Conv2DBNActiv', (['in_channels', 'out_channels', '(1)', '(1)', '(0)', '(1)'], {'initialW': 'initialW'}), '(in_channels, out_channels, 1, 1, 0, 1, initialW=initialW)\n', (1023, 1081), False, 'from chainercv.links import Conv2DBNActiv\n'), ((1112, 1183), 'chainercv.links.Conv2DBNActiv', 'Conv2DBNActiv', (['in_channels', 'out_channels', '(1)', '(1)', '(0)', '(1)'], {'initialW': 'initialW'}), '(in_channels, out_channels, 1, 1, 0, 1, initialW=initialW)\n', (1125, 1183), False, 'from chainercv.links import Conv2DBNActiv\n'), ((1241, 1259), 'numpy.array', 'np.array', (['pyramids'], {}), '(pyramids)\n', (1249, 1259), True, 'import numpy as np\n'), ((1289, 1307), 'numpy.array', 'np.array', (['pyramids'], {}), '(pyramids)\n', (1297, 1307), True, 'import numpy as np\n'), ((1484, 1521), 'chainer.functions.average_pooling_2d', 'F.average_pooling_2d', (['x', 'ksize', 'ksize'], {}), '(x, ksize, ksize)\n', (1504, 1521), True, 'import chainer.functions as F\n'), ((1559, 1585), 'chainer.functions.resize_images', 'F.resize_images', (['y', '(H, W)'], {}), '(y, (H, W))\n', (1574, 1585), True, 'import chainer.functions as F\n'), ((2614, 2668), 'chainercv.links.Conv2DBNActiv', 'Conv2DBNActiv', (['None', '(64)', '(3)', '(2)', '(1)', '(1)'], {'initialW': 'initialW'}), '(None, 64, 3, 2, 1, 1, initialW=initialW)\n', (2627, 2668), False, 'from chainercv.links import Conv2DBNActiv\n'), ((2713, 2765), 'chainercv.links.Conv2DBNActiv', 'Conv2DBNActiv', (['(64)', '(64)', '(3)', '(1)', '(1)', '(1)'], {'initialW': 'initialW'}), '(64, 64, 3, 1, 1, 1, initialW=initialW)\n', (2726, 2765), False, 'from chainercv.links import Conv2DBNActiv\n'), ((2810, 2863), 'chainercv.links.Conv2DBNActiv', 'Conv2DBNActiv', (['(64)', '(128)', '(3)', '(1)', '(1)', '(1)'], {'initialW': 'initialW'}), '(64, 128, 3, 1, 1, 1, initialW=initialW)\n', (2823, 2863), False, 'from chainercv.links import Conv2DBNActiv\n'), ((3003, 3082), 'chainercv.links.model.resnet.ResBlock', 'ResBlock', (['n_block[0]', '(128)', '(64)', '(256)', '(1)', '(1)'], {'initialW': 'initialW', 'stride_first': '(False)'}), '(n_block[0], 128, 64, 256, 1, 1, initialW=initialW, stride_first=False)\n', (3011, 3082), False, 'from chainercv.links.model.resnet import ResBlock\n'), ((3140, 3225), 'chainercv.links.model.resnet.ResBlock', 'ResBlock', (['n_block[1]', '(256)', '(128)', '(512)', '(2)', '(1)'], {'initialW': 'initialW', 'stride_first': '(False)'}), '(n_block[1], 256, 128, 512, 2, 1, initialW=initialW, stride_first=False\n )\n', (3148, 3225), False, 'from chainercv.links.model.resnet import ResBlock\n'), ((3278, 3364), 'chainercv.links.model.resnet.ResBlock', 'ResBlock', (['n_block[2]', '(512)', '(256)', '(1024)', '(1)', '(2)'], {'initialW': 'initialW', 'stride_first': '(False)'}), '(n_block[2], 512, 256, 1024, 1, 2, initialW=initialW, stride_first=\n False)\n', (3286, 3364), False, 'from chainercv.links.model.resnet import ResBlock\n'), ((3417, 3504), 'chainercv.links.model.resnet.ResBlock', 'ResBlock', (['n_block[3]', '(1024)', '(512)', '(2048)', '(1)', '(4)'], {'initialW': 'initialW', 'stride_first': '(False)'}), '(n_block[3], 1024, 512, 2048, 1, 4, initialW=initialW, stride_first\n =False)\n', (3425, 3504), False, 'from chainercv.links.model.resnet import ResBlock\n'), ((3563, 3622), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['path', 'self'], {'ignore_names': 'None'}), '(path, self, ignore_names=None)\n', (3591, 3622), False, 'import chainer\n'), ((6405, 6457), 'chainercv.links.Conv2DBNActiv', 'Conv2DBNActiv', (['(4096)', '(512)', '(3)', '(1)', '(1)'], {'initialW': 'initialW'}), '(4096, 512, 3, 1, 1, initialW=initialW)\n', (6418, 6457), False, 'from chainercv.links import Conv2DBNActiv\n'), ((6505, 6560), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(512)', 'n_class', '(1)', '(1)', '(0)', '(False)', 'initialW'], {}), '(512, n_class, 1, 1, 0, False, initialW)\n', (6520, 6560), True, 'import chainer.links as L\n'), ((6608, 6648), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['path', 'self'], {}), '(path, self)\n', (6636, 6648), False, 'import chainer\n'), ((7363, 7428), 'chainercv.experimental.links.model.pspnet.transforms.convolution_crop', 'convolution_crop', (['img', 'self.input_size', 'stride'], {'return_param': '(True)'}), '(img, self.input_size, stride, return_param=True)\n', (7379, 7428), False, 'from chainercv.experimental.links.model.pspnet.transforms import convolution_crop\n'), ((8538, 8604), 'chainercv.transforms.resize_contain', 'transforms.resize_contain', (['img', 'self.input_size'], {'return_param': '(True)'}), '(img, self.input_size, return_param=True)\n', (8563, 8604), False, 'from chainercv import transforms\n'), ((9232, 9268), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (9252, 9268), False, 'import chainer\n'), ((2916, 2961), 'chainer.functions.max_pooling_2d', 'F.max_pooling_2d', (['x'], {'ksize': '(3)', 'stride': '(2)', 'pad': '(1)'}), '(x, ksize=3, stride=2, pad=1)\n', (2932, 2961), True, 'import chainer.functions as F\n'), ((5757, 5785), 'chainer.initializers.constant.Zero', 'initializers.constant.Zero', ([], {}), '()\n', (5783, 5785), False, 'from chainer import initializers\n'), ((9064, 9103), 'chainer.functions.resize_images', 'F.resize_images', (['scores', '(ori_H, ori_W)'], {}), '(scores, (ori_H, ori_W))\n', (9079, 9103), True, 'import chainer.functions as F\n'), ((9851, 9887), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (9871, 9887), False, 'import chainer\n'), ((9911, 9946), 'chainer.function.no_backprop_mode', 'chainer.function.no_backprop_mode', ([], {}), '()\n', (9944, 9946), False, 'import chainer\n'), ((11703, 11739), 'chainer.functions.resize_images', 'F.resize_images', (['y', '(orig_H, orig_W)'], {}), '(y, (orig_H, orig_W))\n', (11718, 11739), True, 'import chainer.functions as F\n'), ((7228, 7266), 'math.ceil', 'ceil', (['(self.input_size[0] * stride_rate)'], {}), '(self.input_size[0] * stride_rate)\n', (7232, 7266), False, 'from math import ceil\n'), ((7295, 7333), 'math.ceil', 'ceil', (['(self.input_size[1] * stride_rate)'], {}), '(self.input_size[1] * stride_rate)\n', (7299, 7333), False, 'from math import ceil\n')] |
"""Tests for timeseries anomalies detection and imputation."""
from typing import Tuple
import numpy as np
import pytest
import pudl.analysis.timeseries_cleaning
def simulate_series(
n: int = 10,
periods: int = 20,
frequency: int = 24,
amplitude_range: Tuple[float, float] = (0.0, 1.0),
offset_range: Tuple[float, float] = (1.0, 2.0),
shift_range: Tuple[int, int] = (-3, 3),
seed=None,
) -> np.ndarray:
"""Generate synthetic multivariate series from sin functions.
Args:
n: Number of variables.
periods: Number of periods.
frequency: Number of values in each period.
amplitude_range: Range of amplitudes.
offset_range: Range of offsets.
shift_range: Range of phase shifts (by number of values).
Returns:
Multivariate series with shape (`periods * frequency`, `n`).
"""
rng = np.random.default_rng(seed=seed)
t = np.arange(periods * frequency) * (2 * np.pi / frequency)
amplitudes = rng.uniform(*amplitude_range, size=n)
offsets = rng.uniform(*offset_range, size=n)
shifts = rng.integers(*shift_range, size=n)
return np.column_stack(
[
offset + np.roll(amplitude * np.sin(t), shift)
for amplitude, offset, shift in zip(amplitudes, offsets, shifts)
]
)
def simulate_anomalies(
x: np.ndarray,
n: int = 100,
sigma: float = 1,
seed=None,
) -> Tuple[np.ndarray, np.ndarray]:
"""Simulate anomalies in series.
Args:
x: Multivariate series with shape (m observations, n variables).
n: Total number of anomalies to simulate.
sigma: Standard deviation of the anomalous deviations from `x`.
Returns:
Values and flat indices in `x` of the simulated anomalies.
"""
# nrows, ncols = x.shape
rng = np.random.default_rng(seed=seed)
indices = rng.choice(x.size, size=n, replace=False)
values = rng.normal(scale=sigma, size=n)
return x.flat[indices] + values, indices
@pytest.mark.parametrize(
"series_seed,anomalies_seed",
[
(16662093832, 741013840),
(7088438834, 382046123),
(11357816575, 18413484987),
(5150844305, 5634704703),
(5248964137, 8991153078),
(2654087352, 8105685070),
(18949329570, 5605034834),
(16844944928, 11661181582),
(5473292783, 5189943010),
(7173817266, 19937484751),
],
)
def test_flags_and_imputes_anomalies(series_seed, anomalies_seed) -> None:
"""Flags and imputes anomalies within modest thresholds of success."""
x = simulate_series(seed=series_seed)
# Insert anomalies
values, indices = simulate_anomalies(x, seed=anomalies_seed)
x.flat[indices] = values
# Flag anomalies
s = pudl.analysis.timeseries_cleaning.Timeseries(x)
s.flag_ruggles()
flag_indices = np.flatnonzero(~np.equal(s.flags, None))
# Flag summary table has the right flag count
assert s.summarize_flags()["count"].sum() == flag_indices.size
# Flagged values are 90%+ inserted anomalous values
assert np.isin(flag_indices, indices).sum() > 0.9 * flag_indices.size
# Add additional null values alongside nulled anomalies
mask = s.simulate_nulls()
for method in "tubal", "tnn":
# Impute null values
imputed0 = s.impute(mask=mask, method=method, rho0=1, maxiter=1)
imputed = s.impute(mask=mask, method=method, rho0=1, maxiter=10)
# Deviations between original and imputed values
fit0 = s.summarize_imputed(imputed0, mask)
fit = s.summarize_imputed(imputed, mask)
# Mean MAPE (mean absolute percent error) is converging
assert fit["mape"].mean() < fit0["mape"].mean()
| [
"numpy.random.default_rng",
"numpy.equal",
"numpy.isin",
"pytest.mark.parametrize",
"numpy.sin",
"numpy.arange"
] | [((2014, 2354), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""series_seed,anomalies_seed"""', '[(16662093832, 741013840), (7088438834, 382046123), (11357816575, \n 18413484987), (5150844305, 5634704703), (5248964137, 8991153078), (\n 2654087352, 8105685070), (18949329570, 5605034834), (16844944928, \n 11661181582), (5473292783, 5189943010), (7173817266, 19937484751)]'], {}), "('series_seed,anomalies_seed', [(16662093832, \n 741013840), (7088438834, 382046123), (11357816575, 18413484987), (\n 5150844305, 5634704703), (5248964137, 8991153078), (2654087352, \n 8105685070), (18949329570, 5605034834), (16844944928, 11661181582), (\n 5473292783, 5189943010), (7173817266, 19937484751)])\n", (2037, 2354), False, 'import pytest\n'), ((885, 917), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': 'seed'}), '(seed=seed)\n', (906, 917), True, 'import numpy as np\n'), ((1832, 1864), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': 'seed'}), '(seed=seed)\n', (1853, 1864), True, 'import numpy as np\n'), ((926, 956), 'numpy.arange', 'np.arange', (['(periods * frequency)'], {}), '(periods * frequency)\n', (935, 956), True, 'import numpy as np\n'), ((2875, 2898), 'numpy.equal', 'np.equal', (['s.flags', 'None'], {}), '(s.flags, None)\n', (2883, 2898), True, 'import numpy as np\n'), ((3084, 3114), 'numpy.isin', 'np.isin', (['flag_indices', 'indices'], {}), '(flag_indices, indices)\n', (3091, 3114), True, 'import numpy as np\n'), ((1214, 1223), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (1220, 1223), True, 'import numpy as np\n')] |
# coding:utf-8
# Unit test for Dense class
# Created : 1, 30, 2018
# Revised : 1, 30, 2018
# All rights reserved
#------------------------------------------------------------------------------------------------
__author__ = 'dawei.leng'
import os, sys
os.environ['THEANO_FLAGS'] = "floatX=float32, mode=FAST_RUN, warn_float64='raise'"
import theano
from theano import tensor
from dandelion.module import *
from dandelion.activation import *
from lasagne.layers import InputLayer, DenseLayer, get_output
import lasagne.nonlinearities as LACT
import dandelion
dandelion_path = os.path.split(dandelion.__file__)[0]
print('dandelion path = %s\n' % dandelion_path)
class build_model_D(Module):
def __init__(self, in_dim=3, out_dim=3):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.dense = Dense(input_dims=self.in_dim, output_dim=self.out_dim)
self.predict = self.forward
def forward(self, x):
x = self.dense.forward(x)
x = relu(x)
return x
def build_model_L(in_dim=3, out_dim=3):
input_var = tensor.fmatrix('x')
input0 = InputLayer(shape=(None, in_dim), input_var=input_var, name='input0')
dense0 = DenseLayer(input0, num_units=out_dim, nonlinearity=LACT.rectify, name='dense0')
return dense0
def test_case_0(in_dim=1, out_dim=1):
import numpy as np
from lasagne_ext.utils import get_layer_by_name
model_D = build_model_D(in_dim=in_dim, out_dim=out_dim)
model_L = build_model_L(in_dim=in_dim, out_dim=out_dim)
W = np.random.rand(in_dim, out_dim).astype(np.float32)
b = np.random.rand(out_dim).astype(np.float32)
model_D.dense.W.set_value(W)
model_D.dense.b.set_value(b)
get_layer_by_name(model_L, 'dense0').W.set_value(W)
get_layer_by_name(model_L, 'dense0').b.set_value(b)
X = get_layer_by_name(model_L, 'input0').input_var
y_D = model_D.forward(X)
y_L = get_output(model_L)
fn_D = theano.function([X], y_D, no_default_updates=True)
fn_L = theano.function([X], y_L, no_default_updates=True)
for i in range(20):
x = np.random.rand(16, in_dim).astype(np.float32)
y_D = fn_D(x)
y_L = fn_L(x)
diff = np.sum(np.abs(y_D - y_L))
print('i=%d, diff=%0.6f' % (i, diff))
if diff>1e-4:
raise ValueError('diff is too big')
if __name__ == '__main__':
test_case_0(3, 2)
print('Test passed')
| [
"numpy.abs",
"lasagne_ext.utils.get_layer_by_name",
"theano.function",
"numpy.random.rand",
"lasagne.layers.InputLayer",
"os.path.split",
"lasagne.layers.get_output",
"theano.tensor.fmatrix",
"lasagne.layers.DenseLayer"
] | [((585, 618), 'os.path.split', 'os.path.split', (['dandelion.__file__'], {}), '(dandelion.__file__)\n', (598, 618), False, 'import os, sys\n'), ((1099, 1118), 'theano.tensor.fmatrix', 'tensor.fmatrix', (['"""x"""'], {}), "('x')\n", (1113, 1118), False, 'from theano import tensor\n'), ((1132, 1200), 'lasagne.layers.InputLayer', 'InputLayer', ([], {'shape': '(None, in_dim)', 'input_var': 'input_var', 'name': '"""input0"""'}), "(shape=(None, in_dim), input_var=input_var, name='input0')\n", (1142, 1200), False, 'from lasagne.layers import InputLayer, DenseLayer, get_output\n'), ((1214, 1293), 'lasagne.layers.DenseLayer', 'DenseLayer', (['input0'], {'num_units': 'out_dim', 'nonlinearity': 'LACT.rectify', 'name': '"""dense0"""'}), "(input0, num_units=out_dim, nonlinearity=LACT.rectify, name='dense0')\n", (1224, 1293), False, 'from lasagne.layers import InputLayer, DenseLayer, get_output\n'), ((1932, 1951), 'lasagne.layers.get_output', 'get_output', (['model_L'], {}), '(model_L)\n', (1942, 1951), False, 'from lasagne.layers import InputLayer, DenseLayer, get_output\n'), ((1964, 2014), 'theano.function', 'theano.function', (['[X]', 'y_D'], {'no_default_updates': '(True)'}), '([X], y_D, no_default_updates=True)\n', (1979, 2014), False, 'import theano\n'), ((2026, 2076), 'theano.function', 'theano.function', (['[X]', 'y_L'], {'no_default_updates': '(True)'}), '([X], y_L, no_default_updates=True)\n', (2041, 2076), False, 'import theano\n'), ((1846, 1882), 'lasagne_ext.utils.get_layer_by_name', 'get_layer_by_name', (['model_L', '"""input0"""'], {}), "(model_L, 'input0')\n", (1863, 1882), False, 'from lasagne_ext.utils import get_layer_by_name\n'), ((1557, 1588), 'numpy.random.rand', 'np.random.rand', (['in_dim', 'out_dim'], {}), '(in_dim, out_dim)\n', (1571, 1588), True, 'import numpy as np\n'), ((1616, 1639), 'numpy.random.rand', 'np.random.rand', (['out_dim'], {}), '(out_dim)\n', (1630, 1639), True, 'import numpy as np\n'), ((2226, 2243), 'numpy.abs', 'np.abs', (['(y_D - y_L)'], {}), '(y_D - y_L)\n', (2232, 2243), True, 'import numpy as np\n'), ((1729, 1765), 'lasagne_ext.utils.get_layer_by_name', 'get_layer_by_name', (['model_L', '"""dense0"""'], {}), "(model_L, 'dense0')\n", (1746, 1765), False, 'from lasagne_ext.utils import get_layer_by_name\n'), ((1785, 1821), 'lasagne_ext.utils.get_layer_by_name', 'get_layer_by_name', (['model_L', '"""dense0"""'], {}), "(model_L, 'dense0')\n", (1802, 1821), False, 'from lasagne_ext.utils import get_layer_by_name\n'), ((2114, 2140), 'numpy.random.rand', 'np.random.rand', (['(16)', 'in_dim'], {}), '(16, in_dim)\n', (2128, 2140), True, 'import numpy as np\n')] |
import numpy as np
from collections.abc import Iterable
import dash_vtk
import dash_html_components as html
from dash_vtk.utils import to_mesh_state
class MeshViewType:
POINTS = 0
WIREFRAME = 1
SURFACE = 2
class ScalarMode:
DEFAULT = 0
USE_POINT_DATA = 1
USE_CELL_DATA = 2
USE_POINT_FIELD_DATA = 3
USE_CELL_FIELD_DATA = 4
USE_FIELD_DATA = 5
class GetArray:
BY_ID = 0
BY_NAME = 1
def to_dash_vtk_mesh(
mesh,
representation=MeshViewType.SURFACE,
opacity=1.0,
color_attribute=None,
showCubeAxes=False,
colorMapPreset="erdc_rainbow_bright",
color_range=None,
custom_fields: dict = None,
threshold: dict = None,
):
"""
threshold must be in the form:
"""
vtk_mesh = mesh.to_vtk_mesh()
if threshold is not None:
vtk_mesh = vtk_mesh.threshold(
value=threshold["value"],
scalars=threshold["scalars"],
invert=threshold["invert"],
continuous=threshold["continuous"],
)
if custom_fields is not None:
for key in custom_fields:
vtk_mesh[key] = custom_fields[key]
mesh_state = None
if color_attribute is None:
mesh_state = to_mesh_state(vtk_mesh)
color_range = [0, 1]
else:
mesh_state = to_mesh_state(vtk_mesh, field_to_keep=color_attribute)
if color_range is None:
try:
color_range = [
np.nanmin(vtk_mesh[color_attribute]),
np.nanmax(vtk_mesh[color_attribute]),
]
except KeyError:
pass
return dash_vtk.GeometryRepresentation(
showCubeAxes=showCubeAxes,
colorMapPreset=colorMapPreset,
colorDataRange=color_range,
children=[dash_vtk.Mesh(state=mesh_state)],
property={
"edgeVisibility": True,
"opacity": opacity,
"representation": representation,
},
)
def vtk_view(
mesh,
color_attribute=None,
sets=None,
show_cube_axes=False,
show_layers_in_range=None,
bg_color: list = None,
):
children = []
threshold = None
primary_opacity = 1.0
primary_color_attribute = color_attribute
primary_representation = MeshViewType.SURFACE
if bg_color is None:
bg_color = [0.2, 0.3, 0.4]
if sets or show_layers_in_range:
primary_opacity = 0.25
primary_representation = MeshViewType.WIREFRAME
primary_color_attribute = None
children.append(
to_dash_vtk_mesh(
mesh,
opacity=primary_opacity,
representation=primary_representation,
color_attribute=primary_color_attribute,
showCubeAxes=show_cube_axes,
)
)
if show_layers_in_range:
children.append(
to_dash_vtk_mesh(
mesh,
opacity=1.0,
representation=MeshViewType.SURFACE,
color_attribute=color_attribute,
threshold={
"value": show_layers_in_range,
"scalars": "cell_layer_id",
"invert": True,
"continuous": True,
},
)
)
if isinstance(sets, Iterable):
for (set_id, set_mesh) in enumerate(sets):
num_cells = len(set_mesh.primary_cells)
children.append(
to_dash_vtk_mesh(
set_mesh,
color_attribute="set_id",
custom_fields={
"set_id": np.array([set_id + 1] * num_cells, dtype=int)
},
color_range=[1, len(sets)],
opacity=1.0,
representation=MeshViewType.SURFACE,
showCubeAxes=False,
)
)
# cameraParallelProjection (boolean; default False): Use parallel projection (default: False).
# cameraPosition (list; default \[0, 0, 1\]): Initial camera position from an object in [0,0,0].
# cameraViewUp (list; default \[0, 1, 0\]): Initial camera position from an object in [0,0,0].
# savefig: https://discourse.vtk.org/t/save-window-rendering-results-to-image/3772/2
# or: https://kitware.github.io/vtk-js/api/Common_Core_ImageHelper.html
# or: https://github.com/Kitware/vtk-js/issues/1598
return dash_vtk.View(
children=children,
background=bg_color,
)
def get_layout(
mesh,
sets=None,
color_with_attribute="Material Id",
show_cube_axes=False,
show_layers_in_range: tuple = None,
bg_color: list = None,
height: str = "calc(100vh - 0px)",
width: str = "100%",
):
if isinstance(color_with_attribute, str):
mat = (
color_with_attribute.lower()
.strip()
.replace(" ", "")
.replace("_", "")
.replace("-", "")
)
if mat == "materialid":
color_with_attribute = "Material Id"
return html.Div(
style={"width": width, "height": height},
children=[
vtk_view(
mesh,
sets=sets,
color_attribute=color_with_attribute,
show_cube_axes=show_cube_axes,
show_layers_in_range=show_layers_in_range,
),
],
)
| [
"dash_vtk.utils.to_mesh_state",
"numpy.array",
"dash_vtk.Mesh",
"numpy.nanmax",
"dash_vtk.View",
"numpy.nanmin"
] | [((4438, 4491), 'dash_vtk.View', 'dash_vtk.View', ([], {'children': 'children', 'background': 'bg_color'}), '(children=children, background=bg_color)\n', (4451, 4491), False, 'import dash_vtk\n'), ((1226, 1249), 'dash_vtk.utils.to_mesh_state', 'to_mesh_state', (['vtk_mesh'], {}), '(vtk_mesh)\n', (1239, 1249), False, 'from dash_vtk.utils import to_mesh_state\n'), ((1310, 1364), 'dash_vtk.utils.to_mesh_state', 'to_mesh_state', (['vtk_mesh'], {'field_to_keep': 'color_attribute'}), '(vtk_mesh, field_to_keep=color_attribute)\n', (1323, 1364), False, 'from dash_vtk.utils import to_mesh_state\n'), ((1804, 1835), 'dash_vtk.Mesh', 'dash_vtk.Mesh', ([], {'state': 'mesh_state'}), '(state=mesh_state)\n', (1817, 1835), False, 'import dash_vtk\n'), ((1467, 1503), 'numpy.nanmin', 'np.nanmin', (['vtk_mesh[color_attribute]'], {}), '(vtk_mesh[color_attribute])\n', (1476, 1503), True, 'import numpy as np\n'), ((1525, 1561), 'numpy.nanmax', 'np.nanmax', (['vtk_mesh[color_attribute]'], {}), '(vtk_mesh[color_attribute])\n', (1534, 1561), True, 'import numpy as np\n'), ((3625, 3670), 'numpy.array', 'np.array', (['([set_id + 1] * num_cells)'], {'dtype': 'int'}), '([set_id + 1] * num_cells, dtype=int)\n', (3633, 3670), True, 'import numpy as np\n')] |
"""
Copyright (C) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import ngraph
from .model import Model
from .utils import Detection, resize_image, resize_image_letterbox, load_labels
ANCHORS = {
'YOLOV3': [10.0, 13.0, 16.0, 30.0, 33.0, 23.0,
30.0, 61.0, 62.0, 45.0, 59.0, 119.0,
116.0, 90.0, 156.0, 198.0, 373.0, 326.0],
'YOLOV4': [12.0, 16.0, 19.0, 36.0, 40.0, 28.0,
36.0, 75.0, 76.0, 55.0, 72.0, 146.0,
142.0, 110.0, 192.0, 243.0, 459.0, 401.0],
'YOLOV4-TINY': [10.0, 14.0, 23.0, 27.0, 37.0, 58.0,
81.0, 82.0, 135.0, 169.0, 344.0, 319.0]
}
class YOLO(Model):
class Params:
# Magic numbers are copied from yolo samples
def __init__(self, param, sides):
self.num = param.get('num', 3)
self.coords = param.get('coord', 4)
self.classes = param.get('classes', 80)
self.sides = sides
self.anchors = param.get('anchors', ANCHORS['YOLOV3'])
self.isYoloV3 = False
mask = param.get('mask', None)
if mask:
self.num = len(mask)
masked_anchors = []
for idx in mask:
masked_anchors += [self.anchors[idx * 2], self.anchors[idx * 2 + 1]]
self.anchors = masked_anchors
self.isYoloV3 = True # Weak way to determine but the only one.
def __init__(self, ie, model_path, labels=None, keep_aspect_ratio=False, threshold=0.5, iou_threshold=0.5):
super().__init__(ie, model_path)
self.is_tiny = self.net.name.lower().find('tiny') != -1 # Weak way to distinguish between YOLOv4 and YOLOv4-tiny
if isinstance(labels, (list, tuple)):
self.labels = labels
else:
self.labels = load_labels(labels) if labels else None
self.threshold = threshold
self.iou_threshold = iou_threshold
self.keep_aspect_ratio = keep_aspect_ratio
self.resize_image = resize_image_letterbox if self.keep_aspect_ratio else resize_image
assert len(self.net.input_info) == 1, "Expected 1 input blob"
self.image_blob_name = next(iter(self.net.input_info))
if self.net.input_info[self.image_blob_name].input_data.shape[1] == 3:
self.n, self.c, self.h, self.w = self.net.input_info[self.image_blob_name].input_data.shape
self.nchw_shape = True
else:
self.n, self.h, self.w, self.c = self.net.input_info[self.image_blob_name].input_data.shape
self.nchw_shape = False
self.yolo_layer_params = self._get_output_info()
def _get_output_info(self):
def get_parent(node):
return node.inputs()[0].get_source_output().get_node()
ng_func = ngraph.function_from_cnn(self.net)
output_info = {}
for node in ng_func.get_ordered_ops():
layer_name = node.get_friendly_name()
if layer_name not in self.net.outputs:
continue
shape = list(get_parent(node).shape)
yolo_params = self.Params(node._get_attributes(), shape[2:4])
output_info[layer_name] = (shape, yolo_params)
return output_info
def preprocess(self, inputs):
image = inputs
resized_image = self.resize_image(image, (self.w, self.h))
meta = {'original_shape': image.shape,
'resized_shape': resized_image.shape}
if self.nchw_shape:
resized_image = resized_image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
resized_image = resized_image.reshape((self.n, self.c, self.h, self.w))
else:
resized_image = resized_image.reshape((self.n, self.h, self.w, self.c))
dict_inputs = {self.image_blob_name: resized_image}
return dict_inputs, meta
@staticmethod
def _parse_yolo_region(predictions, input_size, params, threshold, multiple_labels=True):
# ------------------------------------------ Extracting layer parameters ---------------------------------------
objects = []
size_normalizer = input_size if params.isYoloV3 else params.sides
bbox_size = params.coords + 1 + params.classes
# ------------------------------------------- Parsing YOLO Region output ---------------------------------------
for row, col, n in np.ndindex(params.sides[0], params.sides[1], params.num):
# Getting raw values for each detection bounding bFox
bbox = predictions[0, n * bbox_size:(n + 1) * bbox_size, row, col]
x, y, width, height, object_probability = bbox[:5]
class_probabilities = bbox[5:]
if object_probability < threshold:
continue
# Process raw value
x = (col + x) / params.sides[1]
y = (row + y) / params.sides[0]
# Value for exp is very big number in some cases so following construction is using here
try:
width = np.exp(width)
height = np.exp(height)
except OverflowError:
continue
# Depends on topology we need to normalize sizes by feature maps (up to YOLOv3) or by input shape (YOLOv3)
width = width * params.anchors[2 * n] / size_normalizer[0]
height = height * params.anchors[2 * n + 1] / size_normalizer[1]
if multiple_labels:
for class_id, class_probability in enumerate(class_probabilities):
confidence = object_probability * class_probability
if confidence > threshold:
objects.append(Detection(x - width / 2, y - height / 2, x + width / 2, y + height / 2,
confidence, class_id))
else:
class_id = np.argmax(class_probabilities)
confidence = class_probabilities[class_id] * object_probability
if confidence < threshold:
continue
objects.append(Detection(x - width / 2, y - height / 2, x + width / 2, y + height / 2,
confidence.item(), class_id.item()))
return objects
@staticmethod
def _filter(detections, iou_threshold):
def iou(box_1, box_2):
width_of_overlap_area = min(box_1.xmax, box_2.xmax) - max(box_1.xmin, box_2.xmin)
height_of_overlap_area = min(box_1.ymax, box_2.ymax) - max(box_1.ymin, box_2.ymin)
if width_of_overlap_area < 0 or height_of_overlap_area < 0:
area_of_overlap = 0
else:
area_of_overlap = width_of_overlap_area * height_of_overlap_area
box_1_area = (box_1.ymax - box_1.ymin) * (box_1.xmax - box_1.xmin)
box_2_area = (box_2.ymax - box_2.ymin) * (box_2.xmax - box_2.xmin)
area_of_union = box_1_area + box_2_area - area_of_overlap
if area_of_union == 0:
return 0
return area_of_overlap / area_of_union
detections = sorted(detections, key=lambda obj: obj.score, reverse=True)
for i in range(len(detections)):
if detections[i].score == 0:
continue
for j in range(i + 1, len(detections)):
# We perform IOU only on objects of same class
if detections[i].id != detections[j].id:
continue
if iou(detections[i], detections[j]) > iou_threshold:
detections[j].score = 0
return [det for det in detections if det.score > 0]
@staticmethod
def _resize_detections(detections, original_shape):
for detection in detections:
detection.xmin *= original_shape[0]
detection.xmax *= original_shape[0]
detection.ymin *= original_shape[1]
detection.ymax *= original_shape[1]
return detections
@staticmethod
def _resize_detections_letterbox(detections, original_shape, resized_shape):
scales = [x / y for x, y in zip(resized_shape, original_shape)]
scale = min(scales)
scales = (scale / scales[0], scale / scales[1])
offset = [0.5 * (1 - x) for x in scales]
for detection in detections:
detection.xmin = ((detection.xmin - offset[0]) / scales[0]) * original_shape[0]
detection.xmax = ((detection.xmax - offset[0]) / scales[0]) * original_shape[0]
detection.ymin = ((detection.ymin - offset[1]) / scales[1]) * original_shape[1]
detection.ymax = ((detection.ymax - offset[1]) / scales[1]) * original_shape[1]
return detections
def postprocess(self, outputs, meta):
detections = []
for layer_name in self.yolo_layer_params.keys():
out_blob = outputs[layer_name]
layer_params = self.yolo_layer_params[layer_name]
out_blob.shape = layer_params[0]
detections += self._parse_yolo_region(out_blob, meta['resized_shape'], layer_params[1], self.threshold)
detections = self._filter(detections, self.iou_threshold)
if self.keep_aspect_ratio:
detections = self._resize_detections_letterbox(detections, meta['original_shape'][1::-1],
meta['resized_shape'][1::-1])
else:
detections = self._resize_detections(detections, meta['original_shape'][1::-1])
return detections
class YoloV4(YOLO):
class Params:
def __init__(self, classes, num, sides, anchors, mask):
self.num = num
self.coords = 4
self.classes = classes
self.sides = sides
masked_anchors = []
for idx in mask:
masked_anchors += [anchors[idx * 2], anchors[idx * 2 + 1]]
self.anchors = masked_anchors
def __init__(self, ie, model_path, labels=None, keep_aspect_ratio=False, threshold=0.5, iou_threshold=0.5,
anchors=None, masks=None):
self.anchors = anchors
self.masks = masks
super().__init__(ie, model_path, labels, keep_aspect_ratio, threshold, iou_threshold)
def _get_output_info(self):
if not self.anchors:
self.anchors = ANCHORS['YOLOV4-TINY'] if self.is_tiny else ANCHORS['YOLOV4']
if not self.masks:
self.masks = [1, 2, 3, 3, 4, 5] if self.is_tiny else [0, 1, 2, 3, 4, 5, 6, 7, 8]
outputs = sorted(self.net.outputs.items(), key=lambda x: x[1].shape[2], reverse=True)
output_info = {}
num = 3
for i, (name, layer) in enumerate(outputs):
shape = layer.shape
classes = shape[1] // num - 5
if shape[1] % num != 0:
raise RuntimeError("The output blob {} has wrong 2nd dimension".format(name))
yolo_params = self.Params(classes, num, shape[2:4], self.anchors, self.masks[i*num : (i+1)*num])
output_info[name] = (shape, yolo_params)
return output_info
@staticmethod
def _parse_yolo_region(predictions, input_size, params, threshold, multiple_labels=True):
def sigmoid(x):
return 1. / (1. + np.exp(-x))
# ------------------------------------------ Extracting layer parameters ---------------------------------------
objects = []
bbox_size = params.coords + 1 + params.classes
# ------------------------------------------- Parsing YOLO Region output ---------------------------------------
for row, col, n in np.ndindex(params.sides[0], params.sides[1], params.num):
# Getting raw values for each detection bounding bFox
bbox = predictions[0, n * bbox_size:(n + 1) * bbox_size, row, col]
x, y = sigmoid(bbox[:2])
width, height = bbox[2:4]
object_probability = sigmoid(bbox[4])
class_probabilities = sigmoid(bbox[5:])
if object_probability < threshold:
continue
# Process raw value
x = (col + x) / params.sides[1]
y = (row + y) / params.sides[0]
# Value for exp is very big number in some cases so following construction is using here
try:
width = np.exp(width)
height = np.exp(height)
except OverflowError:
continue
width = width * params.anchors[2 * n] / input_size[0]
height = height * params.anchors[2 * n + 1] / input_size[1]
if multiple_labels:
for class_id, class_probability in enumerate(class_probabilities):
confidence = object_probability * class_probability
if confidence > threshold:
objects.append(Detection(x - width / 2, y - height / 2, x + width / 2, y + height / 2,
confidence, class_id))
else:
class_id = np.argmax(class_probabilities)
confidence = class_probabilities[class_id] * object_probability
if confidence < threshold:
continue
objects.append(Detection(x - width / 2, y - height / 2, x + width / 2, y + height / 2,
confidence.item(), class_id.item()))
return objects
| [
"numpy.exp",
"numpy.argmax",
"ngraph.function_from_cnn",
"numpy.ndindex"
] | [((3345, 3379), 'ngraph.function_from_cnn', 'ngraph.function_from_cnn', (['self.net'], {}), '(self.net)\n', (3369, 3379), False, 'import ngraph\n'), ((4952, 5008), 'numpy.ndindex', 'np.ndindex', (['params.sides[0]', 'params.sides[1]', 'params.num'], {}), '(params.sides[0], params.sides[1], params.num)\n', (4962, 5008), True, 'import numpy as np\n'), ((12191, 12247), 'numpy.ndindex', 'np.ndindex', (['params.sides[0]', 'params.sides[1]', 'params.num'], {}), '(params.sides[0], params.sides[1], params.num)\n', (12201, 12247), True, 'import numpy as np\n'), ((5595, 5608), 'numpy.exp', 'np.exp', (['width'], {}), '(width)\n', (5601, 5608), True, 'import numpy as np\n'), ((5634, 5648), 'numpy.exp', 'np.exp', (['height'], {}), '(height)\n', (5640, 5648), True, 'import numpy as np\n'), ((6438, 6468), 'numpy.argmax', 'np.argmax', (['class_probabilities'], {}), '(class_probabilities)\n', (6447, 6468), True, 'import numpy as np\n'), ((12906, 12919), 'numpy.exp', 'np.exp', (['width'], {}), '(width)\n', (12912, 12919), True, 'import numpy as np\n'), ((12945, 12959), 'numpy.exp', 'np.exp', (['height'], {}), '(height)\n', (12951, 12959), True, 'import numpy as np\n'), ((13620, 13650), 'numpy.argmax', 'np.argmax', (['class_probabilities'], {}), '(class_probabilities)\n', (13629, 13650), True, 'import numpy as np\n'), ((11834, 11844), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (11840, 11844), True, 'import numpy as np\n')] |
import numpy as np
import pydart2 as pydart
import QPsolver
import IKsolve_one
import momentum_con
import motionPlan
from scipy import optimize
import yulTrajectoryOpt
from fltk import *
from PyCommon.modules.GUI import hpSimpleViewer as hsv
from PyCommon.modules.Renderer import ysRenderer as yr
from PyCommon.modules.Simulator import hpDartQpSimulator as hqp
render_vector = []
render_vector_origin = []
push_force = []
push_force_origin = []
blade_force = []
blade_force_origin = []
rd_footCenter = []
class State(object):
def __init__(self, name, dt, c_d, c_v, angles):
self.name = name
self.dt = dt
self.c_d = c_d
self.c_v = c_v
self.angles = angles
class MyWorld(pydart.World):
def __init__(self, ):
pydart.World.__init__(self, 1.0 / 1000.0, './data/skel/cart_pole_blade_3dof.skel')
# pydart.World.__init__(self, 1.0 / 1000.0, './data/skel/cart_pole_blade.skel')
# pydart.World.__init__(self, 1.0 / 2000.0, './data/skel/cart_pole.skel')
self.force = None
self.duration = 0
self.skeletons[0].body('ground').set_friction_coeff(0.02)
skel = self.skeletons[2]
# print("mass: ", skel.m, "kg")
# print('[Joint]')
# for joint in skel.joints:
# print("\t" + str(joint))
# print("\t\tparent = " + str(joint.parent_bodynode))
# print("\t\tchild = " + str(joint.child_bodynode))
# print("\t\tdofs = " + str(joint.dofs))
# skel.joint("j_abdomen").set_position_upper_limit(10, 0.0)
# skel.joint("j_heel_left").set_position_upper_limit(0, 0.0)
# skel.joint("j_heel_left").set_position_lower_limit(0, -0.0)
pelvis_x = skel.dof_indices((["j_pelvis_rot_x"]))
pelvis = skel.dof_indices((["j_pelvis_rot_y", "j_pelvis_rot_z"]))
upper_body = skel.dof_indices(["j_abdomen_x", "j_abdomen_y", "j_abdomen_z"])
spine = skel.dof_indices(["j_spine_x", "j_spine_y", "j_spine_z"])
right_leg = skel.dof_indices(["j_thigh_right_x", "j_thigh_right_y", "j_thigh_right_z", "j_shin_right_z"])
left_leg = skel.dof_indices(["j_thigh_left_x", "j_thigh_left_y", "j_thigh_left_z", "j_shin_left_z"])
knee = skel.dof_indices(["j_shin_left_x", "j_shin_right_x"])
arms = skel.dof_indices(["j_bicep_left_x", "j_bicep_right_x"])
foot = skel.dof_indices(["j_heel_left_x", "j_heel_left_y", "j_heel_left_z", "j_heel_right_x", "j_heel_right_y", "j_heel_right_z"])
leg_y = skel.dof_indices(["j_thigh_right_y", "j_thigh_left_y"])
# blade = skel.dof_indices(["j_heel_right_2"])
# #----------------------------------
# # pushing side to side new (180718)
# #----------------------------------
#
s0q = np.zeros(skel.ndofs)
# s0q[pelvis] = 0., -0.
# s0q[upper_body] = 0.0, -0.5
s0q[right_leg] = -0., -0., -0.0, -0.0
s0q[left_leg] = 0., 0., 0.0, -0.0
# s0q[leg_y] = -0.785, 0.785
s0q[arms] = 1.5, -1.5
# s0q[foot] = 0.1, 0.0, 0.1, -0.0
state0 = State("state0", 0.2, 0.0, 0.2, s0q)
# s01q = np.zeros(skel.ndofs)
# # s01q[pelvis] = 0., -0.3
# s01q[upper_body] = 0.0, 0., -0.2
# s01q[left_leg] = 0.1, 0.3, 0.3, -0.3
# # s01q[right_leg] = -0.0, -0.785, -0.66, -0.0
# s01q[right_leg] = -0.2, -0.9, 0.2, -0.5
# s01q[arms] = 1.5, -1.5
# # s01q[blade] = -0.3
# s01q[foot] = -0.0, 0.0, 0., 0.0, 0.0, 0.
# state01 = State("state01", 0.5, 2.2, 0.0, s01q)
s01q = np.zeros(skel.ndofs)
# s01q[pelvis] = 0., -0.3
s01q[upper_body] = 0.0, 0., -0.5
s01q[spine] = 0.0, 0., 0.5
s01q[left_leg] = -0., 0., 0.3, -0.5
# s01q[right_leg] = -0.0, -0.785, -0.66, -0.0
s01q[right_leg] = -0., -0., 0.3, -0.5
s01q[arms] = 1.5, -1.5
# s01q[blade] = -0.3
s01q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.2
state01 = State("state01", 0.5, 2.2, 0.0, s01q)
# s011q = np.zeros(skel.ndofs)
# # s01q[pelvis] = 0., -0.3
# s011q[upper_body] = 0.0, 0., -0.5
# s011q[spine] = 0.0, 0., 0.5
# s011q[left_leg] = -0.1, 0., 0.3, -0.5
# # s01q[right_leg] = -0.0, -0.785, -0.66, -0.0
# s011q[right_leg] = -0., -0.785, 0.0, -0.5
# s011q[arms] = 1.5, -1.5
# # s01q[blade] = -0.3
# s011q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.2
# state011 = State("state011", 0.5, 2.2, 0.0, s011q)
#
# s1q = np.zeros(skel.ndofs)
# # s1q[pelvis] = 0., -0.1
# s1q[upper_body] = 0.0, 0., -0.5
# s1q[spine] = 0.0, 0., 0.5
# s1q[left_leg] = -0.1, 0., 0.3, -0.5
# # s1q[right_leg] = -0.0, -0.785, -0.66, -0.0
# s1q[right_leg] = -0., -0.785, 0., -0.5
# s1q[arms] = 1.5, -1.5
# # s1q[blade] = -0.3
# s1q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.2
# state1 = State("state1", 0.5, 2.2, 0.0, s1q)
#
# s2q = np.zeros(skel.ndofs)
# # s2q[pelvis] = -0.3, -0.0
# s2q[upper_body] = -0., 0, -0.
# s2q[left_leg] = 0., 0., 0., -0.17
# s2q[right_leg] = -0.4, -0.785, -0.2, -0.17
# s2q[arms] = 1.5, -1.5
# s2q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, -0.
# state2 = State("state2", 0.5, 0.0, 0.2, s2q)
#
# s3q = np.zeros(skel.ndofs)
# # s3q[upper_body] = 0.0, 0., 0.3
# s3q[left_leg] = -0.1, -0., 0., -0.
# s3q[right_leg] = 0.0, 0., 0.5, -1.5
# s3q[arms] = 1.5, -1.5
# # s3q[foot] = -0.0, 0.0, 0.3, 0.0, 0.0, -0.
# state3 = State("state3", 1.0, 0.0, 0.2, s3q)
# #s1q[pelvis] = -0.3", 0.2, 2.2, 0.0, s3q)
# self.state_list = [state0, state01, state011, state1, state2, state3]
s011q = np.zeros(skel.ndofs)
# s01q[pelvis] = 0., -0.3
s011q[upper_body] = 0.0, 0., -0.5
s011q[spine] = 0.0, 0., 0.5
s011q[left_leg] = -0., 0., 0.3, -0.5
# s01q[right_leg] = -0.0, -0.785, -0.66, -0.0
s011q[right_leg] = -0., -0., 0.3, -0.5
s011q[arms] = 1.5, -1.5
# s01q[blade] = -0.3
s011q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.2
state011 = State("state011", 0.5, 2.2, 0.0, s011q)
s1q = np.zeros(skel.ndofs)
# s1q[pelvis] = 0., -0.1
s1q[upper_body] = 0., 0., -0.2
s1q[spine] = 0.0, 0., 0.2
s1q[left_leg] = -0.1, 0., 0.3, -0.5
# s1q[right_leg] = -0.0, -0.785, -0.66, -0.0
s1q[right_leg] = 0.1, -0., 0.9, -1.2
s1q[arms] = 1.5, -1.5
# s1q[blade] = -0.3
s1q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.2
state1 = State("state1", 0.8, 2.2, 0.0, s1q)
s2q = np.zeros(skel.ndofs)
# s2q[pelvis] = -0.3, -0.0
s2q[upper_body] = -0., 0, -0.3
s2q[left_leg] = 0., 0., 0.3, -0.5
s2q[right_leg] = -0., -0.2, 0.3, -0.2
s2q[arms] = 1.5, -1.5
s2q[foot] = -0.0, 0.0, 0.2, 0.0, -0., 0.2
# state2 = State("state2", 0.25, 0.0, 0.2, s2q)
state2 = State("state2", 0.3, 0.0, 0.2, s2q)
# s02q = np.zeros(skel.ndofs)
# # s02q[pelvis] = -0.3, -0.0
# s02q[upper_body] = -0., 0, -0.3
# s02q[left_leg] = 0., 0., 0.2, -0.5
# s02q[right_leg] = -0., -0., -0.2, -0.2
# s02q[arms] = 1.5, -1.5
# s02q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.2
# # state02 = State("state02", 0.25, 0.0, 0.2, s02q)
# state02 = State("state02", 0.3, 0.0, 0.2, s02q)
s3q = np.zeros(skel.ndofs)
s3q[upper_body] = 0.0, 0., -0.3
s3q[spine] = 0.0, 0., 0.3
s3q[left_leg] = 0.1, -0., 0.9, -1.2
s3q[right_leg] = -0., -0., 0.3, -0.3
s3q[arms] = 1.5, -1.5
s3q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.
state3 = State("state3", 0.8, 0.0, 0.2, s3q)
#s1q[pelvis] = -0.3", 0.2, 2.2, 0.0, s3q)
s03q = np.zeros(skel.ndofs)
s03q[upper_body] = 0.0, 0., -0.3
# s03q[spine] = 0.0, 0., 0.3
s03q[left_leg] = 0.1, 0.3, 0.7, -0.3
s03q[right_leg] = -0., 0., 0.3, -0.3
s03q[arms] = 1.5, -1.5
s03q[foot] = -0.0, 0.0, 0.2, 0.0, 0.0, 0.
state03 = State("state03", 0.5, 0.0, 0.2, s03q)
# s1q[pelvis] = -0.3", 0.2, 2.2, 0.0, s3q)
s4q = np.zeros(skel.ndofs)
s4q[arms] = 1.5, -1.5
# s4q[upper_body] = 0., 0., 0.
# s4q[left_leg] = 0.1, -0., 0., -0.
# s4q[right_leg] = 0., -0., 0.5, -0.5
# s4q[knee] = 0., -0.2
state4 = State("state4", 0.5, 0.0, 0.2, s4q)
s04q = np.zeros(skel.ndofs)
s04q[arms] = 1.5, -1.5
# s04q[left_leg] = 0.2, -0., 0., -0.
state04 = State("state04", 10.0, 0.0, 0.2, s04q)
self.state_list = [state0, state01, state011, state1, state2, state3, state03, state1, state2, state3, state03]
# self.state_list = [state0, state01, state011, state1, state2, state3, state03, state4, state04]
# self.state_list = [state0, state1]
state_num = len(self.state_list)
self.state_num = state_num
# print("state_num: ", state_num)
self.curr_state = self.state_list[0]
self.elapsedTime = 0.0
self.curr_state_index = 0
# print("backup angle: ", backup_q)
# print("cur angle: ", self.curr_state.angles)
self.controller = QPsolver.Controller(skel, self.skeletons[3], self.dt, self.curr_state.name)
self.mo_con = momentum_con.momentum_control(self.skeletons[2], self.skeletons[3], self.time_step())
self.skeletons[3].set_positions(self.curr_state.angles)
# self.skeletons[3].set_positions(np.zeros(skel.ndofs))
# self.ik = IKsolve_one.IKsolver(self.skeletons[2], self.dt)
# merged_target = self.curr_state.angles
# self.ik.update_target(self.curr_state.name)
# merged_target = np.zeros(skel.ndofs)
# merged_target[:6] = self.curr_state.angles[:6]
# merged_target[6:18] = self.ik.solve()
# merged_target[18:] = self.curr_state.angles[18:]
# print("ik res: ", self.ik.solve())
# print("merged_target: ", merged_target)
# self.controller.target = merged_target
self.controller.target = self.curr_state.angles
# self.controller.target = skel.q
# skel.set_controller(self.controller)
print('create controller OK')
self.contact_force = []
self.contactPositionLocals = []
self.bodyIDs = []
# print("dof: ", skel.ndofs)
def step(self):
# print("self.curr_state: ", self.curr_state.name)
# if self.curr_state.name == "state2" or self.curr_state.name == "state3":
# if self.curr_state.name == "state1":
# if self.time() > 1.0 and self.time() < 2.0:
# self.force = np.array([20.0, 0.0, 0.0])
# else:
# self.force = None
# print("left foot pos:", self.skeletons[2].body('h_blade_left').to_world([0.0, 0.0, 0.0]))
# self.force = np.array([20.0, 0.0, 0.0])
# self.skeletons[2].body('h_pelvis').add_ext_force(self.force)
# if self.curr_state.name == "state1":
# self.force = np.array([10.0, 0.0, 0.0])
# else:
# self.force = None
self.controller.cur_state = self.curr_state.name
if self.force is not None:
self.skeletons[2].body('h_pelvis').add_ext_force(self.force)
# self.skeletons[2].body('h_spine').add_ext_force(self.force)
# if self.curr_state.name == "state2":
# self.skeletons[2].body('h_pelvis').add_ext_force(self.force)
# if self.curr_state.name == "state3":
# self.skeletons[2].body('h_pelvis').add_ext_force(self.force)
# if self.force is not None and self.duration >= 0:
# self.duration -= 1
# self.skeletons[2].body('h_spine').add_ext_force(self.force)
#a = self.skeletons[2].get_positions()
self.skeletons[3].set_positions(self.curr_state.angles)
# self.skeletons[3].set_positions(np.zeros(skel.ndofs))
if self.curr_state.dt < self.time() - self.elapsedTime:
# print("change the state!!!", self.curr_state_index)
self.curr_state_index = self.curr_state_index + 1
self.curr_state_index = self.curr_state_index % self.state_num
self.elapsedTime = self.time()
self.curr_state = self.state_list[self.curr_state_index]
# print("state_", self.curr_state_index)
# print(self.curr_state.angles)
# self.controller.target = skel.q
# self.controller.target = self.curr_state.angles
# print("Current state name: ", self.curr_state.name)
# if self.curr_state.name == "state2":
# self.ik.update_target(self.curr_state.name)
# merged_target = np.zeros(skel.ndofs)
# merged_target[:12] = self.curr_state.angles[:12]
# merged_target[12:18] = self.ik.solve()
# merged_target[18:] = self.curr_state.angles[18:]
# # print("ik res: ", self.ik.solve())
# # print("merged_target: ", merged_target)
# self.controller.target = merged_target
# # self.controller.target = self.curr_state.angles
# if self.curr_state.name == "state2":
# self.ik.update_target(self.curr_state.name)
# merged_target = np.zeros(skel.ndofs)
# merged_target[:6] = self.curr_state.angles[:6]
# merged_target[6:12] = self.ik.solve()
# merged_target[12:] = self.curr_state.angles[12:]
# # print("ik res: ", self.ik.solve())
# # print("merged_target: ", merged_target)
# # self.controller.target = merged_target
# self.controller.target = self.curr_state.angles
# else:
# # self.controller.target = self.curr_state.angles
# self.ik.update_target(self.curr_state.name)
# merged_target = np.zeros(skel.ndofs)
# merged_target[:6] = self.curr_state.angles[:6]
# merged_target[6:18] = self.ik.solve()
# merged_target[18:] = self.curr_state.angles[18:]
# # print("ik res: ", self.ik.solve())
# # print("merged_target: ", merged_target)
# # self.controller.target = merged_target
# self.controller.target = self.curr_state.angles
self.controller.target = self.curr_state.angles
# self.controller.target = self.curr_state.angles
# print(self.curr_state.angles)
contact_list = self.mo_con.check_contact()
# gain_value = 25.0
gain_value = 50.0
# if self.mo_con.contact_num == 0:
# ndofs = skel.num_dofs()
# h = self.time_step()
# Kp = np.diagflat([0.0] * 6 + [gain_value] * (ndofs - 6))
# Kd = np.diagflat([0.0] * 6 + [2.*(gain_value**.5)] * (ndofs - 6))
# invM = np.linalg.inv(skel.M + Kd * h)
# p = -Kp.dot(skel.q - self.curr_state.angles + skel.dq * h)
# d = -Kd.dot(skel.dq)
# qddot = invM.dot(-skel.c + p + d + skel.constraint_forces())
# des_accel = p + d + qddot
# else:
# # print("contact num: ", self.mo_con.contact_num )
# self.mo_con.target = self.curr_state.angles
# des_accel = self.mo_con.compute(contact_list)
ndofs = skel.num_dofs()
h = self.time_step()
Kp = np.diagflat([0.0] * 6 + [gain_value] * (ndofs - 6))
Kd = np.diagflat([0.0] * 6 + [2. * (gain_value ** .5)] * (ndofs - 6))
invM = np.linalg.inv(skel.M + Kd * h)
p = -Kp.dot(skel.q - self.curr_state.angles + skel.dq * h)
d = -Kd.dot(skel.dq)
qddot = invM.dot(-skel.c + p + d + skel.constraint_forces())
des_accel = p + d + qddot
ddc = np.zeros(6)
# if self.curr_state.name == "state3":
# # print("com control : state3!!", skel.body('h_blade_left').to_world([0., 0.98, 0.]), skel.com())
# # ddc[0:3] = 400. * (skel.body('h_blade_left').to_world([0., 0.98, 0.]) - skel.com()) - 10. * skel.dC
# ddc[0:3] = 400. * (np.array([0.52, 0., -0.09]) - skel.com()) - 10. * skel.dC
# print(skel.body('h_blade_left').to_world([0., 0, 0.]), skel.com())
# HP QP solve
_ddq, _tau, _bodyIDs, _contactPositions, _contactPositionLocals, _contactForces = hqp.calc_QP(
skel, des_accel, ddc, 1./self.time_step())
del self.contact_force[:]
del self.bodyIDs[:]
del self.contactPositionLocals[:]
self.bodyIDs = _bodyIDs
for i in range(len(_bodyIDs)):
skel.body(_bodyIDs[i]).add_ext_force(_contactForces[i], _contactPositionLocals[i])
self.contact_force.append(_contactForces[i])
self.contactPositionLocals.append(_contactPositionLocals[i])
# dartModel.applyPenaltyForce(_bodyIDs, _contactPositionLocals, _contactForces)
#Jacobian transpose control
jaco_r = skel.body("h_blade_right").linear_jacobian()
jaco_l = skel.body("h_blade_left").linear_jacobian()
jaco_hip_r = skel.body("h_thigh_right").linear_jacobian()
if self.curr_state.name == "state011":
force_r = 10. * np.array([-1.0, -8., 1.0])
force_l = 10. * np.array([1.0, -.0, -1.0])
t_r = self.add_JTC_force(jaco_r, force_r)
t_l = self.add_JTC_force(jaco_l, force_l)
_tau += t_r + t_l
if self.curr_state.name == "state1":
force_r = 10. * np.array([1.0, -.0, .0])
force_l = 10. * np.array([-.0, 0., 1.0])
t_r = self.add_JTC_force(jaco_r, force_r)
t_l = self.add_JTC_force(jaco_l, force_l)
_tau += t_r + t_l
if self.curr_state.name == "state2":
force_r = 10. * np.array([1.0, -1., 1.0])
force_l = 10. * np.array([-1.0, -0., -1.0])
t_r = self.add_JTC_force(jaco_r, force_r)
t_l = self.add_JTC_force(jaco_l, force_l)
_tau += t_r + t_l
if self.curr_state.name == "state3":
force_r = 10. * np.array([1.0, -1., 1.0])
t_r = self.add_JTC_force(jaco_r, force_r)
force_hip_r = 3. * np.array([-.0, 0., -1.0])
t_hip_r = self.add_JTC_force(jaco_hip_r, force_hip_r)
_tau += t_r + t_hip_r
if self.curr_state.name == "state03":
force_r = 10. * np.array([-1.0, -8., 1.0])
force_l = 10. * np.array([1.0, -8.0, -5.0])
t_r = self.add_JTC_force(jaco_r, force_r)
t_l = self.add_JTC_force(jaco_l, force_l)
_tau += t_r + t_l
# if self.curr_state.name == "state04":
# # jaco = skel.body("h_thigh_left").linear_jacobian()
# # jaco_t = jaco.transpose()
# # _force = 30. * np.array([.0, 0., 1.0])
# # my_tau = np.dot(jaco_t, _force)
# #
# # jaco1 = skel.body("h_thigh_right").linear_jacobian()
# # jaco_t1 = jaco1.transpose()
# # force1 = 20. * np.array([.0, 0., 1.0])
# # my_tau1 = np.dot(jaco_t1, force1)
#
# jaco = skel.body("h_blade_left").linear_jacobian()
# jaco_t = jaco.transpose()
# _force = 20. * np.array([.0, -8., -1.0])
# my_tau = np.dot(jaco_t, _force)
#
# _tau += my_tau #+ my_tau1
# if self.curr_state.name == "state1":
# my_jaco2 = skel.body("h_blade_left").linear_jacobian()
# my_jaco_t2 = my_jaco2.transpose()
# my_force2 = 50. * np.array([1.0, 0.0, .0])
# my_tau2 = np.dot(my_jaco_t2, my_force2)
#
# my_jaco = skel.body("h_blade_right").linear_jacobian()
# my_jaco_t = my_jaco.transpose()
# my_force = 10. * np.array([-1.0, -10., 2.0])
# # my_force = 50. * np.array([-1.0, 0., .0])
# my_tau = np.dot(my_jaco_t, my_force)
# _tau += my_tau
#
# if self.curr_state.name == "state2":
# my_jaco2 = skel.body("h_blade_left").linear_jacobian()
# my_jaco_t2 = my_jaco2.transpose()
# my_force2 = 50. * np.array([1.0, 0.0, -.0])
# my_tau2 = np.dot(my_jaco_t2, my_force2)
#
# my_jaco = skel.body("h_blade_right").linear_jacobian()
# my_jaco_t = my_jaco.transpose()
# my_force = 10. * np.array([-.0, .0, 1.0])
# # my_force = 50. * np.array([-1.0, 0., .0])
# my_tau = np.dot(my_jaco_t, my_force)
#
# _tau = _tau + my_tau2 + my_tau
# if self.curr_state.name == "state3":
# my_jaco = skel.body("h_blade_right").linear_jacobian()
# my_jaco_t = my_jaco.transpose()
# my_force = 10. * np.array([0., 1.0, 0.0])
# my_tau = np.dot(my_jaco_t, my_force)
#
# _tau += my_tau
skel.set_forces(_tau)
'''
del self.contact_force[:]
if len(self.controller.sol_lambda) != 0:
f_vec = self.controller.V_c.dot(self.controller.sol_lambda)
# print("f", f_vec)
f_vec = np.asarray(f_vec)
# print("contact num ?? : ", self.controller.contact_num)
# self.contact_force = np.zeros(self.controller.contact_num)
for ii in range(self.controller.contact_num):
self.contact_force.append(np.array([f_vec[3*ii], f_vec[3*ii+1], f_vec[3*ii+2]]))
# self.contact_force[ii] = np.array([f_vec[3*ii], f_vec[3*ii+1], f_vec[3*ii+2]])
# print("contact_force:", ii, self.contact_force[ii])
# print("contact_force:\n", self.contact_force)
for ii in range(self.controller.contact_num):
self.skeletons[2].body(self.controller.contact_list[2 * ii])\
.add_ext_force(self.contact_force[ii], self.controller.contact_list[2 * ii+1])
'''
super(MyWorld, self).step()
# skel.set_positions(q)
def add_JTC_force(self, jaco, force):
jaco_t = jaco.transpose()
tau = np.dot(jaco_t, force)
return tau
def on_key_press(self, key):
if key == '1':
self.force = np.array([100.0, 0.0, 0.0])
self.duration = 1000
print('push backward: f = %s' % self.force)
elif key == '2':
self.force = np.array([-100.0, 0.0, 0.0])
self.duration = 100
print('push backward: f = %s' % self.force)
def render_with_ri(self, ri):
# if self.force is not None and self.duration >= 0:
if self.force is not None:
# if self.curr_state.name == "state2":
# p0 = self.skeletons[2].body('h_heel_right').C
# p1 = p0 + 0.01 * self.force
# ri.set_color(1.0, 0.0, 0.0)
# ri.render_arrow(p0, p1, r_base=0.03, head_width=0.1, head_len=0.1)
# if self.curr_state.name == "state3":
# p0 = self.skeletons[2].body('h_heel_left').C
# p1 = p0 + 0.01 * self.force
# ri.set_color(1.0, 0.0, 0.0)
# ri.render_arrow(p0, p1, r_base=0.03, head_width=0.1, head_len=0.1)
p0 = self.skeletons[2].body('h_spine').C
p1 = p0 + 0.05 * self.force
ri.set_color(1.0, 0.0, 0.0)
ri.render_arrow(p0, p1, r_base=0.03, head_width=0.1, head_len=0.1)
# render contact force --yul
contact_force = self.contact_force
if len(contact_force) != 0:
# print(len(contact_force), len(self.controller.contact_list))
# print("contact_force.size?", contact_force.size, len(contact_force))
ri.set_color(1.0, 0.0, 0.0)
for ii in range(len(contact_force)):
if 2 * len(contact_force) == len(self.controller.contact_list):
body = self.skeletons[2].body(self.controller.contact_list[2*ii])
contact_offset = self.controller.contact_list[2*ii+1]
# print("contact force : ", contact_force[ii])
ri.render_line(body.to_world(contact_offset), contact_force[ii]/100.)
ri.set_color(1, 0, 0)
ri.render_sphere(np.array([self.skeletons[2].C[0], -0.99, self.skeletons[2].C[2]]), 0.05)
ri.set_color(1, 0, 1)
ri.render_sphere(self.ik.target_foot + np.array([0.0, 0.0, -0.1]), 0.05)
# COP = self.skeletons[2].body('h_heel_right').to_world([0.05, 0, 0])
# ri.set_color(0, 0, 1)
# ri.render_sphere(COP, 0.05)
# ground rendering
# ri.render_chessboard(5)
# render axes
ri.render_axes(np.array([0, 0, 0]), 0.5)
#Height
# ri.render_sphere(np.array([0.0, 1.56-0.92, 0.0]), 0.01)
def render_with_ys(self):
# render contact force --yul
contact_force = self.contact_force
del render_vector[:]
del render_vector_origin[:]
del push_force[:]
del push_force_origin[:]
del blade_force[:]
del blade_force_origin[:]
del rd_footCenter[:]
com = self.skeletons[2].C
com[1] = -0.99 +0.05
# com = self.skeletons[2].body('h_blade_left').to_world(np.array([0.1040 + 0.0216, +0.80354016 - 0.85354016, -0.054]))
rd_footCenter.append(com)
# if self.curr_state.name == "state3":
# blade_force.append(np.array([1.0, -1.0, 1.0]))
# blade_force_origin.append(self.skeletons[2].body('h_heel_right').to_world())
# if self.curr_state.name == "state1" or self.curr_state.name == "state11" :
# blade_force.append(np.array([1.0, 0.0, 0.0]))
# blade_force_origin.append(self.skeletons[2].body('h_heel_left').to_world())
# # blade_force.append(-self.controller.blade_direction_L)
# # blade_force_origin.append(self.skeletons[2].body('h_heel_right').to_world())
# if self.curr_state.name == "state12":
# blade_force.append(np.array([-0.7, 1.0, 0.0]))
# blade_force_origin.append(self.skeletons[2].body('h_heel_right').to_world())
# if self.curr_state.name == "state011":
# blade_force.append(np.array([1.0, -7., 1.0]))
# blade_force_origin.append(self.skeletons[2].body('h_heel_right').to_world())
#
# if self.curr_state.name == "state2":
# blade_force.append(np.array([-0., -7.0, -1.0]))
# blade_force_origin.append(self.skeletons[2].body('h_heel_left').to_world())
# if self.curr_state.name == "state2":
# # blade_force.append(np.array([-1.0, 0., 1.0]))
# # blade_force_origin.append(self.skeletons[2].body('h_heel_right').to_world())
# blade_force.append(np.array([1., .0, -1.0]))
# blade_force_origin.append(self.skeletons[2].body('h_heel_left').to_world())
if self.force is not None:
push_force.append(self.force*0.05)
push_force_origin.append(self.skeletons[2].body('h_pelvis').to_world())
if len(self.bodyIDs) != 0:
print(len(self.bodyIDs))
for ii in range(len(self.contact_force)):
if self.bodyIDs[ii] == 4:
body = self.skeletons[2].body('h_blade_left')
else:
body = self.skeletons[2].body('h_blade_right')
render_vector.append(contact_force[ii] / 100.)
render_vector_origin.append(body.to_world(self.contactPositionLocals[ii]))
# render_vector_origin.append(body.to_world(contact_offset))
# if len(contact_force) != 0:
# # print(len(contact_force), len(self.controller.contact_list))
# # print("contact_force.size?", contact_force.size, len(contact_force))
# # ri.set_color(1.0, 0.0, 0.0)
# for ii in range(len(contact_force)):
# if 2 * len(contact_force) == len(self.controller.contact_list):
# body = self.skeletons[2].body(self.controller.contact_list[2*ii])
# contact_offset = self.controller.contact_list[2*ii+1]
# # print("contact force : ", contact_force[ii])
# # ri.render_line(body.to_world(contact_offset), contact_force[ii]/100.)
# render_vector.append(contact_force[ii]/100.)
# render_vector_origin.append(body.to_world(contact_offset))
if __name__ == '__main__':
print('Example: Skating -- pushing side to side')
pydart.init()
print('pydart initialization OK')
world = MyWorld()
print('MyWorld OK')
ground = pydart.World(1. / 1000., './data/skel/ground.skel')
skel = world.skeletons[2]
q = skel.q
# q["j_abdomen_1"] = -0.2
# q["j_abdomen_2"] = -0.2
# q["j_thigh_right_x", "j_thigh_right_y", "j_thigh_right_z", "j_shin_right_z"] = -0., -0., 0.9, -1.5
# q["j_heel_left_1", "j_heel_left_2", "j_heel_right_1", "j_heel_right_2"] = 0., 0.1, 0., 0.
# q["j_thigh_right_y", "j_thigh_left_y"] = -0.785, 0.785
# q["j_shin_right", "j_shin_left"] = 0., 0.
# q["j_thigh_right_x", "j_thigh_right_y", "j_thigh_right_z"] = -0.1, -0.5, 0.2
# q["j_thigh_left_x", "j_thigh_left_y"] = 0.2, 0.5
# q["j_thigh_left_z", "j_shin_left"] = 0.2, -0.2
# q["j_thigh_right_z", "j_shin_right"] = 0.2, -0.2
# q["j_heel_left_1"] = 0.2
# q["j_heel_right_1"] = 0.2
#
# # both arm T-pose
q["j_bicep_left_x", "j_bicep_left_y", "j_bicep_left_z"] = 1.5, 0.0, 0.0
q["j_bicep_right_x", "j_bicep_right_y", "j_bicep_right_z"] = -1.5, 0.0, 0.0
skel.set_positions(q)
print('skeleton position OK')
# print('[Joint]')
# for joint in skel.joints:
# print("\t" + str(joint))
# print("\t\tparent = " + str(joint.parent_bodynode))
# print("\t\tchild = " + str(joint.child_bodynode))
# print("\t\tdofs = " + str(joint.dofs))
# pydart.gui.viewer.launch_pyqt5(world)
viewer = hsv.hpSimpleViewer(viewForceWnd=False)
viewer.setMaxFrame(1000)
viewer.doc.addRenderer('controlModel', yr.DartRenderer(world, (255,255,255), yr.POLYGON_FILL))
viewer.doc.addRenderer('ground', yr.DartRenderer(ground, (255, 255, 255), yr.POLYGON_FILL), visible=False)
viewer.doc.addRenderer('contactForce', yr.VectorsRenderer(render_vector, render_vector_origin, (255, 0, 0)))
viewer.doc.addRenderer('pushForce', yr.WideArrowRenderer(push_force, push_force_origin, (0, 255,0)))
viewer.doc.addRenderer('rd_footCenter', yr.PointsRenderer(rd_footCenter))
viewer.startTimer(1/25.)
viewer.motionViewWnd.glWindow.pOnPlaneshadow = (0., -0.99+0.0251, 0.)
viewer.doc.addRenderer('bladeForce', yr.WideArrowRenderer(blade_force, blade_force_origin, (0, 0, 255)))
viewer.motionViewWnd.glWindow.planeHeight = -0.98 + 0.0251
def simulateCallback(frame):
for i in range(10):
world.step()
world.render_with_ys()
viewer.setSimulateCallback(simulateCallback)
viewer.show()
Fl.run()
| [
"PyCommon.modules.GUI.hpSimpleViewer.hpSimpleViewer",
"PyCommon.modules.Renderer.ysRenderer.VectorsRenderer",
"pydart2.init",
"pydart2.World.__init__",
"numpy.zeros",
"numpy.diagflat",
"numpy.linalg.inv",
"numpy.dot",
"PyCommon.modules.Renderer.ysRenderer.PointsRenderer",
"numpy.array",
"pydart2... | [((28790, 28803), 'pydart2.init', 'pydart.init', ([], {}), '()\n', (28801, 28803), True, 'import pydart2 as pydart\n'), ((28903, 28956), 'pydart2.World', 'pydart.World', (['(1.0 / 1000.0)', '"""./data/skel/ground.skel"""'], {}), "(1.0 / 1000.0, './data/skel/ground.skel')\n", (28915, 28956), True, 'import pydart2 as pydart\n'), ((30250, 30288), 'PyCommon.modules.GUI.hpSimpleViewer.hpSimpleViewer', 'hsv.hpSimpleViewer', ([], {'viewForceWnd': '(False)'}), '(viewForceWnd=False)\n', (30268, 30288), True, 'from PyCommon.modules.GUI import hpSimpleViewer as hsv\n'), ((768, 854), 'pydart2.World.__init__', 'pydart.World.__init__', (['self', '(1.0 / 1000.0)', '"""./data/skel/cart_pole_blade_3dof.skel"""'], {}), "(self, 1.0 / 1000.0,\n './data/skel/cart_pole_blade_3dof.skel')\n", (789, 854), True, 'import pydart2 as pydart\n'), ((2792, 2812), 'numpy.zeros', 'np.zeros', (['skel.ndofs'], {}), '(skel.ndofs)\n', (2800, 2812), True, 'import numpy as np\n'), ((3593, 3613), 'numpy.zeros', 'np.zeros', (['skel.ndofs'], {}), '(skel.ndofs)\n', (3601, 3613), True, 'import numpy as np\n'), ((5840, 5860), 'numpy.zeros', 'np.zeros', (['skel.ndofs'], {}), '(skel.ndofs)\n', (5848, 5860), True, 'import numpy as np\n'), ((6306, 6326), 'numpy.zeros', 'np.zeros', (['skel.ndofs'], {}), '(skel.ndofs)\n', (6314, 6326), True, 'import numpy as np\n'), ((6751, 6771), 'numpy.zeros', 'np.zeros', (['skel.ndofs'], {}), '(skel.ndofs)\n', (6759, 6771), True, 'import numpy as np\n'), ((7556, 7576), 'numpy.zeros', 'np.zeros', (['skel.ndofs'], {}), '(skel.ndofs)\n', (7564, 7576), True, 'import numpy as np\n'), ((7938, 7958), 'numpy.zeros', 'np.zeros', (['skel.ndofs'], {}), '(skel.ndofs)\n', (7946, 7958), True, 'import numpy as np\n'), ((8330, 8350), 'numpy.zeros', 'np.zeros', (['skel.ndofs'], {}), '(skel.ndofs)\n', (8338, 8350), True, 'import numpy as np\n'), ((8610, 8630), 'numpy.zeros', 'np.zeros', (['skel.ndofs'], {}), '(skel.ndofs)\n', (8618, 8630), True, 'import numpy as np\n'), ((9393, 9468), 'QPsolver.Controller', 'QPsolver.Controller', (['skel', 'self.skeletons[3]', 'self.dt', 'self.curr_state.name'], {}), '(skel, self.skeletons[3], self.dt, self.curr_state.name)\n', (9412, 9468), False, 'import QPsolver\n'), ((15563, 15614), 'numpy.diagflat', 'np.diagflat', (['([0.0] * 6 + [gain_value] * (ndofs - 6))'], {}), '([0.0] * 6 + [gain_value] * (ndofs - 6))\n', (15574, 15614), True, 'import numpy as np\n'), ((15628, 15692), 'numpy.diagflat', 'np.diagflat', (['([0.0] * 6 + [2.0 * gain_value ** 0.5] * (ndofs - 6))'], {}), '([0.0] * 6 + [2.0 * gain_value ** 0.5] * (ndofs - 6))\n', (15639, 15692), True, 'import numpy as np\n'), ((15708, 15738), 'numpy.linalg.inv', 'np.linalg.inv', (['(skel.M + Kd * h)'], {}), '(skel.M + Kd * h)\n', (15721, 15738), True, 'import numpy as np\n'), ((15953, 15964), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (15961, 15964), True, 'import numpy as np\n'), ((22308, 22329), 'numpy.dot', 'np.dot', (['jaco_t', 'force'], {}), '(jaco_t, force)\n', (22314, 22329), True, 'import numpy as np\n'), ((30361, 30417), 'PyCommon.modules.Renderer.ysRenderer.DartRenderer', 'yr.DartRenderer', (['world', '(255, 255, 255)', 'yr.POLYGON_FILL'], {}), '(world, (255, 255, 255), yr.POLYGON_FILL)\n', (30376, 30417), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((30454, 30511), 'PyCommon.modules.Renderer.ysRenderer.DartRenderer', 'yr.DartRenderer', (['ground', '(255, 255, 255)', 'yr.POLYGON_FILL'], {}), '(ground, (255, 255, 255), yr.POLYGON_FILL)\n', (30469, 30511), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((30571, 30639), 'PyCommon.modules.Renderer.ysRenderer.VectorsRenderer', 'yr.VectorsRenderer', (['render_vector', 'render_vector_origin', '(255, 0, 0)'], {}), '(render_vector, render_vector_origin, (255, 0, 0))\n', (30589, 30639), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((30681, 30745), 'PyCommon.modules.Renderer.ysRenderer.WideArrowRenderer', 'yr.WideArrowRenderer', (['push_force', 'push_force_origin', '(0, 255, 0)'], {}), '(push_force, push_force_origin, (0, 255, 0))\n', (30701, 30745), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((30790, 30822), 'PyCommon.modules.Renderer.ysRenderer.PointsRenderer', 'yr.PointsRenderer', (['rd_footCenter'], {}), '(rd_footCenter)\n', (30807, 30822), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((30970, 31036), 'PyCommon.modules.Renderer.ysRenderer.WideArrowRenderer', 'yr.WideArrowRenderer', (['blade_force', 'blade_force_origin', '(0, 0, 255)'], {}), '(blade_force, blade_force_origin, (0, 0, 255))\n', (30990, 31036), True, 'from PyCommon.modules.Renderer import ysRenderer as yr\n'), ((22431, 22458), 'numpy.array', 'np.array', (['[100.0, 0.0, 0.0]'], {}), '([100.0, 0.0, 0.0])\n', (22439, 22458), True, 'import numpy as np\n'), ((24458, 24523), 'numpy.array', 'np.array', (['[self.skeletons[2].C[0], -0.99, self.skeletons[2].C[2]]'], {}), '([self.skeletons[2].C[0], -0.99, self.skeletons[2].C[2]])\n', (24466, 24523), True, 'import numpy as np\n'), ((24898, 24917), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (24906, 24917), True, 'import numpy as np\n'), ((17384, 17411), 'numpy.array', 'np.array', (['[-1.0, -8.0, 1.0]'], {}), '([-1.0, -8.0, 1.0])\n', (17392, 17411), True, 'import numpy as np\n'), ((17439, 17466), 'numpy.array', 'np.array', (['[1.0, -0.0, -1.0]'], {}), '([1.0, -0.0, -1.0])\n', (17447, 17466), True, 'import numpy as np\n'), ((17678, 17704), 'numpy.array', 'np.array', (['[1.0, -0.0, 0.0]'], {}), '([1.0, -0.0, 0.0])\n', (17686, 17704), True, 'import numpy as np\n'), ((17731, 17757), 'numpy.array', 'np.array', (['[-0.0, 0.0, 1.0]'], {}), '([-0.0, 0.0, 1.0])\n', (17739, 17757), True, 'import numpy as np\n'), ((17968, 17994), 'numpy.array', 'np.array', (['[1.0, -1.0, 1.0]'], {}), '([1.0, -1.0, 1.0])\n', (17976, 17994), True, 'import numpy as np\n'), ((18022, 18050), 'numpy.array', 'np.array', (['[-1.0, -0.0, -1.0]'], {}), '([-1.0, -0.0, -1.0])\n', (18030, 18050), True, 'import numpy as np\n'), ((18262, 18288), 'numpy.array', 'np.array', (['[1.0, -1.0, 1.0]'], {}), '([1.0, -1.0, 1.0])\n', (18270, 18288), True, 'import numpy as np\n'), ((18373, 18400), 'numpy.array', 'np.array', (['[-0.0, 0.0, -1.0]'], {}), '([-0.0, 0.0, -1.0])\n', (18381, 18400), True, 'import numpy as np\n'), ((18575, 18602), 'numpy.array', 'np.array', (['[-1.0, -8.0, 1.0]'], {}), '([-1.0, -8.0, 1.0])\n', (18583, 18602), True, 'import numpy as np\n'), ((18630, 18657), 'numpy.array', 'np.array', (['[1.0, -8.0, -5.0]'], {}), '([1.0, -8.0, -5.0])\n', (18638, 18657), True, 'import numpy as np\n'), ((22598, 22626), 'numpy.array', 'np.array', (['[-100.0, 0.0, 0.0]'], {}), '([-100.0, 0.0, 0.0])\n', (22606, 22626), True, 'import numpy as np\n'), ((24608, 24634), 'numpy.array', 'np.array', (['[0.0, 0.0, -0.1]'], {}), '([0.0, 0.0, -0.1])\n', (24616, 24634), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 12:13:26 2019
@author: DiPu
"""
import pandas as pd
import numpy as np
data=pd.read_csv("Bahubali2_vs_Dangal.csv")
features =data.iloc[:, 0].values
features=features.reshape(9,1)
labels = data.iloc[:,1:3 ].values
"""
train the model now
"""
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(features, labels)
val = np.array([10]).reshape(1,-1)
temp=regressor.predict(val)
if temp[0,0]>temp[0,1]:
print("Bahubali 2 will collect more")
else:
print("Dangal will collect more")
| [
"numpy.array",
"sklearn.linear_model.LinearRegression",
"pandas.read_csv"
] | [((128, 166), 'pandas.read_csv', 'pd.read_csv', (['"""Bahubali2_vs_Dangal.csv"""'], {}), "('Bahubali2_vs_Dangal.csv')\n", (139, 166), True, 'import pandas as pd\n'), ((362, 380), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (378, 380), False, 'from sklearn.linear_model import LinearRegression\n'), ((422, 436), 'numpy.array', 'np.array', (['[10]'], {}), '([10])\n', (430, 436), True, 'import numpy as np\n')] |
import numpy
from numpy.random import normal
import matplotlib.pyplot as plt
def matlab_hist(v):
plt.hist(v, bins=50, normed=False)
plt.show()
def numpy_hist(v):
(n,bins) = numpy.histogram(v, bins=50, normed=False)
plt.plot(.5*(bins[1:]+bins[:-1]), n)
plt.show()
if __name__ == "__main__":
mu, sigma = 100, 0.5
v = normal(mu, sigma, 10000)
matlab_hist(v)
numpy_hist(v) | [
"numpy.random.normal",
"numpy.histogram",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
] | [((102, 136), 'matplotlib.pyplot.hist', 'plt.hist', (['v'], {'bins': '(50)', 'normed': '(False)'}), '(v, bins=50, normed=False)\n', (110, 136), True, 'import matplotlib.pyplot as plt\n'), ((141, 151), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (149, 151), True, 'import matplotlib.pyplot as plt\n'), ((187, 228), 'numpy.histogram', 'numpy.histogram', (['v'], {'bins': '(50)', 'normed': '(False)'}), '(v, bins=50, normed=False)\n', (202, 228), False, 'import numpy\n'), ((233, 274), 'matplotlib.pyplot.plot', 'plt.plot', (['(0.5 * (bins[1:] + bins[:-1]))', 'n'], {}), '(0.5 * (bins[1:] + bins[:-1]), n)\n', (241, 274), True, 'import matplotlib.pyplot as plt\n'), ((274, 284), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (282, 284), True, 'import matplotlib.pyplot as plt\n'), ((346, 370), 'numpy.random.normal', 'normal', (['mu', 'sigma', '(10000)'], {}), '(mu, sigma, 10000)\n', (352, 370), False, 'from numpy.random import normal\n')] |
import nltk
import numpy as np
import pyjsonrpc
from features import Feature
from stst.data import dict_utils
from stst.libs.kernel import vector_kernel as vk
class Embedding(object):
def __init__(self):
self.http_client = pyjsonrpc.HttpClient(
url="http://localhost:8084",
)
def get_word2vec(self, word):
"""
:param word:
:return: (st, vec)
"""
vec = self.http_client.word2vec(word)
return vec
def get_glove(self, word):
vec = self.http_client.glove(word)
return vec
def get_paragram(self, word):
vec = self.http_client.paragram(word)
return vec
def get_glove300(self, word):
vec = self.http_client.glove300(word)
return vec
def pooling(word_sa, emb_type, dim, pooling_types='avg', convey='idf'):
idf_weight = dict_utils.DictLoader().load_dict('idf')
embedding = Embedding()
vdist = nltk.FreqDist(word_sa)
length = float(len(word_sa))
if pooling_types == 'avg':
function = np.average
elif pooling_types == 'min':
function = np.amin
elif pooling_types == 'max':
function = np.amax
else:
print(pooling_types)
raise NotImplementedError
vec = []
for word in word_sa:
if emb_type == 'word2vec':
st, w2v = embedding.get_word2vec(word)
elif emb_type == 'glove':
st, w2v = embedding.get_glove(word)
elif emb_type == 'paragram':
st, w2v = embedding.get_paragram(word)
elif emb_type == 'glove300':
st, w2v = embedding.get_glove300(word)
if convey == 'idf':
w = idf_weight.get(word, 10.0)
elif convey == 'tfidf':
w = vdist[word] * idf_weight.get(word, 10.0)
else:
raise NotImplementedError
w2v = w * np.array(w2v)
vec.append(w2v)
if len(vec) == 0:
vec = np.zeros((dim,))
else:
vec = function(vec, axis=0)
return vec
def minavgmaxpooling(word_sa, emb_type, dim, convey='idf'):
vecs = []
for pooling_types in ['avg', 'min', 'max']:
vec = pooling(word_sa, emb_type, dim, pooling_types, convey)
vecs.append(vec)
vecs = np.reshape(vecs, [-1])
return vecs
class MinAvgMaxEmbeddingFeature(Feature):
def __init__(self, emb_type, dim, lower=True, **kwargs):
super(MinAvgMaxEmbeddingFeature, self).__init__(**kwargs)
self.lower = lower
if 'emb_type' is None:
print('please init with emb_type and dimension!')
exit()
self.emb_type = emb_type
self.dim = dim
self.feature_name = self.feature_name + '-%s' % (emb_type)
def extract(self, train_instance):
lower = self.lower
emb_type = self.emb_type
dim = self.dim
word_sa, word_sb = train_instance.get_word(type='word', stopwords=True, lower=lower)
pooling_vec_sa = minavgmaxpooling(word_sa, emb_type, dim)
pooling_vec_sb = minavgmaxpooling(word_sb, emb_type, dim)
all_feats, all_names = vk.get_all_kernel(pooling_vec_sa, pooling_vec_sb)
features = all_feats
infos = [emb_type, lower]
return features, infos
class MinAvgMaxPoolingFeature(Feature):
def __init__(self, emb_name, dim, emb_file, binary=False, lower=True, **kwargs):
pass
| [
"numpy.reshape",
"nltk.FreqDist",
"stst.data.dict_utils.DictLoader",
"numpy.array",
"numpy.zeros",
"stst.libs.kernel.vector_kernel.get_all_kernel",
"pyjsonrpc.HttpClient"
] | [((951, 973), 'nltk.FreqDist', 'nltk.FreqDist', (['word_sa'], {}), '(word_sa)\n', (964, 973), False, 'import nltk\n'), ((2262, 2284), 'numpy.reshape', 'np.reshape', (['vecs', '[-1]'], {}), '(vecs, [-1])\n', (2272, 2284), True, 'import numpy as np\n'), ((238, 287), 'pyjsonrpc.HttpClient', 'pyjsonrpc.HttpClient', ([], {'url': '"""http://localhost:8084"""'}), "(url='http://localhost:8084')\n", (258, 287), False, 'import pyjsonrpc\n'), ((1953, 1969), 'numpy.zeros', 'np.zeros', (['(dim,)'], {}), '((dim,))\n', (1961, 1969), True, 'import numpy as np\n'), ((3118, 3167), 'stst.libs.kernel.vector_kernel.get_all_kernel', 'vk.get_all_kernel', (['pooling_vec_sa', 'pooling_vec_sb'], {}), '(pooling_vec_sa, pooling_vec_sb)\n', (3135, 3167), True, 'from stst.libs.kernel import vector_kernel as vk\n'), ((869, 892), 'stst.data.dict_utils.DictLoader', 'dict_utils.DictLoader', ([], {}), '()\n', (890, 892), False, 'from stst.data import dict_utils\n'), ((1878, 1891), 'numpy.array', 'np.array', (['w2v'], {}), '(w2v)\n', (1886, 1891), True, 'import numpy as np\n')] |
import os
import numpy as np
from scipy.special import logit, expit
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from genEM3.data.wkwdata import WkwData, DataSplit
from genEM3.model.autoencoder2d import Encoder_4_sampling_bn_1px_deep_convonly_skip, AE_Encoder_Classifier, Classifier3Layered
from genEM3.inference.inference import Predictor
torch.multiprocessing.set_sharing_strategy('file_system')
run_root = os.path.dirname(os.path.abspath(__file__))
cache_HDD_root = os.path.join(run_root, '.cache/')
datasources_json_path = os.path.join(run_root, 'datasources_predict.json')
state_dict_path = os.path.join(run_root, '../../training/ae_classify_v09_3layer_unfreeze_latent_debris_clean_transform_add_clean2_wiggle/.log/run_w_pr/epoch_700/model_state_dict')
device = 'cpu'
output_wkw_root = '/tmpscratch/webknossos/Connectomics_Department/2018-11-13_scMS109_1to7199_v01_l4_06_24_fixed_mag8_artifact_pred'
output_label = 'probs_sparse'
batch_size = 128
input_shape = (140, 140, 1)
output_shape = (1, 1, 1)
num_workers = 12
kernel_size = 3
stride = 1
n_fmaps = 16
n_latent = 2048
input_size = 140
output_size = input_size
model = AE_Encoder_Classifier(
Encoder_4_sampling_bn_1px_deep_convonly_skip(input_size, kernel_size, stride, n_latent=n_latent),
Classifier3Layered(n_latent=n_latent))
datasources = WkwData.datasources_from_json(datasources_json_path)
dataset = WkwData(
input_shape=input_shape,
target_shape=output_shape,
data_sources=datasources,
stride=(35, 35, 1),
cache_HDD=False,
cache_RAM=True,
cache_HDD_root=cache_HDD_root
)
prediction_loader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=batch_size, num_workers=num_workers)
checkpoint = torch.load(state_dict_path, map_location=lambda storage, loc: storage)
state_dict = checkpoint['model_state_dict']
model.load_state_dict(state_dict)
output_prob_fn = lambda x: np.exp(x[:, 1, 0, 0])
# output_dtype = np.uint8
output_dtype = np.float32
# output_dtype_fn = lambda x: (logit(x) + 16) * 256 / 32
output_dtype_fn = lambda x: x
# output_dtype_fni = lambda x: expit(x / 256 * 32 - 16)
output_dtype_fni = lambda x: x
predictor = Predictor(
model=model,
dataloader=prediction_loader,
output_prob_fn=output_prob_fn,
output_dtype_fn=output_dtype_fn,
output_dtype=output_dtype,
output_label=output_label,
output_wkw_root=output_wkw_root,
output_wkw_compress=False,
device=device,
interpolate=None)
predictor.predict()
print('done')
| [
"genEM3.data.wkwdata.WkwData",
"genEM3.inference.inference.Predictor",
"torch.load",
"os.path.join",
"torch.multiprocessing.set_sharing_strategy",
"numpy.exp",
"genEM3.data.wkwdata.WkwData.datasources_from_json",
"torch.utils.data.DataLoader",
"os.path.abspath",
"genEM3.model.autoencoder2d.Classif... | [((409, 466), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (451, 466), False, 'import torch\n'), ((539, 572), 'os.path.join', 'os.path.join', (['run_root', '""".cache/"""'], {}), "(run_root, '.cache/')\n", (551, 572), False, 'import os\n'), ((597, 647), 'os.path.join', 'os.path.join', (['run_root', '"""datasources_predict.json"""'], {}), "(run_root, 'datasources_predict.json')\n", (609, 647), False, 'import os\n'), ((666, 836), 'os.path.join', 'os.path.join', (['run_root', '"""../../training/ae_classify_v09_3layer_unfreeze_latent_debris_clean_transform_add_clean2_wiggle/.log/run_w_pr/epoch_700/model_state_dict"""'], {}), "(run_root,\n '../../training/ae_classify_v09_3layer_unfreeze_latent_debris_clean_transform_add_clean2_wiggle/.log/run_w_pr/epoch_700/model_state_dict'\n )\n", (678, 836), False, 'import os\n'), ((1384, 1436), 'genEM3.data.wkwdata.WkwData.datasources_from_json', 'WkwData.datasources_from_json', (['datasources_json_path'], {}), '(datasources_json_path)\n', (1413, 1436), False, 'from genEM3.data.wkwdata import WkwData, DataSplit\n'), ((1447, 1625), 'genEM3.data.wkwdata.WkwData', 'WkwData', ([], {'input_shape': 'input_shape', 'target_shape': 'output_shape', 'data_sources': 'datasources', 'stride': '(35, 35, 1)', 'cache_HDD': '(False)', 'cache_RAM': '(True)', 'cache_HDD_root': 'cache_HDD_root'}), '(input_shape=input_shape, target_shape=output_shape, data_sources=\n datasources, stride=(35, 35, 1), cache_HDD=False, cache_RAM=True,\n cache_HDD_root=cache_HDD_root)\n', (1454, 1625), False, 'from genEM3.data.wkwdata import WkwData, DataSplit\n'), ((1668, 1764), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'batch_size', 'num_workers': 'num_workers'}), '(dataset=dataset, batch_size=batch_size,\n num_workers=num_workers)\n', (1695, 1764), False, 'import torch\n'), ((1780, 1850), 'torch.load', 'torch.load', (['state_dict_path'], {'map_location': '(lambda storage, loc: storage)'}), '(state_dict_path, map_location=lambda storage, loc: storage)\n', (1790, 1850), False, 'import torch\n'), ((2218, 2501), 'genEM3.inference.inference.Predictor', 'Predictor', ([], {'model': 'model', 'dataloader': 'prediction_loader', 'output_prob_fn': 'output_prob_fn', 'output_dtype_fn': 'output_dtype_fn', 'output_dtype': 'output_dtype', 'output_label': 'output_label', 'output_wkw_root': 'output_wkw_root', 'output_wkw_compress': '(False)', 'device': 'device', 'interpolate': 'None'}), '(model=model, dataloader=prediction_loader, output_prob_fn=\n output_prob_fn, output_dtype_fn=output_dtype_fn, output_dtype=\n output_dtype, output_label=output_label, output_wkw_root=\n output_wkw_root, output_wkw_compress=False, device=device, interpolate=None\n )\n', (2227, 2501), False, 'from genEM3.inference.inference import Predictor\n'), ((495, 520), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (510, 520), False, 'import os\n'), ((1228, 1328), 'genEM3.model.autoencoder2d.Encoder_4_sampling_bn_1px_deep_convonly_skip', 'Encoder_4_sampling_bn_1px_deep_convonly_skip', (['input_size', 'kernel_size', 'stride'], {'n_latent': 'n_latent'}), '(input_size, kernel_size,\n stride, n_latent=n_latent)\n', (1272, 1328), False, 'from genEM3.model.autoencoder2d import Encoder_4_sampling_bn_1px_deep_convonly_skip, AE_Encoder_Classifier, Classifier3Layered\n'), ((1330, 1367), 'genEM3.model.autoencoder2d.Classifier3Layered', 'Classifier3Layered', ([], {'n_latent': 'n_latent'}), '(n_latent=n_latent)\n', (1348, 1367), False, 'from genEM3.model.autoencoder2d import Encoder_4_sampling_bn_1px_deep_convonly_skip, AE_Encoder_Classifier, Classifier3Layered\n'), ((1957, 1978), 'numpy.exp', 'np.exp', (['x[:, 1, 0, 0]'], {}), '(x[:, 1, 0, 0])\n', (1963, 1978), True, 'import numpy as np\n')] |
from src.classification.knn_classify import KNNClassify
from sklearn.neighbors import KNeighborsClassifier
import numpy as np
def test_knn_sklearn():
'''Compare knn predictions to sklearn.'''
n = 100
d = 5
neighbor_counts = [1, 3, 5, 7]
trials = 5
for k in neighbor_counts:
for _ in range(trials):
X = np.random.rand(n, d)
y = np.random.randint(0, d, n)
model = KNNClassify(X, y, standardized=False, k = k)
train_features = model.train_features
test_features = model.test_features
train_output = model.train_output
test_output = model.test_output
neigh = KNeighborsClassifier(n_neighbors=k)
neigh.fit(train_features, train_output)
assert np.allclose(neigh.predict(test_features), model.predict_class())
| [
"sklearn.neighbors.KNeighborsClassifier",
"numpy.random.randint",
"src.classification.knn_classify.KNNClassify",
"numpy.random.rand"
] | [((348, 368), 'numpy.random.rand', 'np.random.rand', (['n', 'd'], {}), '(n, d)\n', (362, 368), True, 'import numpy as np\n'), ((385, 411), 'numpy.random.randint', 'np.random.randint', (['(0)', 'd', 'n'], {}), '(0, d, n)\n', (402, 411), True, 'import numpy as np\n'), ((432, 474), 'src.classification.knn_classify.KNNClassify', 'KNNClassify', (['X', 'y'], {'standardized': '(False)', 'k': 'k'}), '(X, y, standardized=False, k=k)\n', (443, 474), False, 'from src.classification.knn_classify import KNNClassify\n'), ((704, 739), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'k'}), '(n_neighbors=k)\n', (724, 739), False, 'from sklearn.neighbors import KNeighborsClassifier\n')] |
"""This file can be used to create a new python file that will return
the dictionary of Levelsymmetric quadratures."""
import numpy as np
import sys
def createdict():
"""Create a dictionary based on the quadrature files stored in data/"""
orders = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
D = dict()
for order in orders:
xyzw = np.loadtxt("data/" + str(order) + "_levelsym.txt", delimiter=",")
xyzw[:, 3] = xyzw[:, 3] / sum(xyzw[:, 3]) * 4 * np.pi
D[order] = xyzw
return D
def writedict():
"""Dump a dictionary to a python file and add a function definition
first. That way, we can read the dictionary later from that file without
using the files in data/"""
d = createdict()
np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(precision=15)
with open("writtendict.py", "w") as f:
mystring = (
"from numpy import array\n"
"def levelsymmetricdictionary():\n"
"\treturn (" + str(d) + ")"
)
print(mystring, file=f)
| [
"numpy.set_printoptions"
] | [((742, 784), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (761, 784), True, 'import numpy as np\n'), ((789, 822), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(15)'}), '(precision=15)\n', (808, 822), True, 'import numpy as np\n')] |
import numpy as np
data = np.loadtxt('GSRM_plate_outlines.gmt',dtype=str)
data = np.flip(data,1)
# Locate the starting position of each plate
bnds_index, = np.where(data[:,1] == '>')
n = len(bnds_index)
# Separate the boundaries of each plate and write it in a file
for i in range(n):
vi = bnds_index[i]
j1 = vi+1
if i == n-1:
np.savetxt(data[vi][0], data[j1:-1], fmt='%10s %9s',header=data[vi][0],comments='')
else:
j2 = bnds_index[i+1]
np.savetxt(data[vi][0], data[j1:j2], fmt='%10s %9s',header=data[vi][0],comments='') | [
"numpy.where",
"numpy.flip",
"numpy.loadtxt",
"numpy.savetxt"
] | [((27, 75), 'numpy.loadtxt', 'np.loadtxt', (['"""GSRM_plate_outlines.gmt"""'], {'dtype': 'str'}), "('GSRM_plate_outlines.gmt', dtype=str)\n", (37, 75), True, 'import numpy as np\n'), ((82, 98), 'numpy.flip', 'np.flip', (['data', '(1)'], {}), '(data, 1)\n', (89, 98), True, 'import numpy as np\n'), ((157, 184), 'numpy.where', 'np.where', (["(data[:, 1] == '>')"], {}), "(data[:, 1] == '>')\n", (165, 184), True, 'import numpy as np\n'), ((350, 439), 'numpy.savetxt', 'np.savetxt', (['data[vi][0]', 'data[j1:-1]'], {'fmt': '"""%10s %9s"""', 'header': 'data[vi][0]', 'comments': '""""""'}), "(data[vi][0], data[j1:-1], fmt='%10s %9s', header=data[vi][0],\n comments='')\n", (360, 439), True, 'import numpy as np\n'), ((481, 570), 'numpy.savetxt', 'np.savetxt', (['data[vi][0]', 'data[j1:j2]'], {'fmt': '"""%10s %9s"""', 'header': 'data[vi][0]', 'comments': '""""""'}), "(data[vi][0], data[j1:j2], fmt='%10s %9s', header=data[vi][0],\n comments='')\n", (491, 570), True, 'import numpy as np\n')] |
#%%
#%%
import os
import time
import shutil
import numpy as np
import tensorflow as tf
from PIL import Image
import random
import matplotlib.pyplot as plt
import cv2
from cv2 import cv2
scal = 224
sampleModel = tf.keras.applications.ResNet50V2(weights='imagenet',
include_top=False,
input_shape=(scal, scal, 3))
sampleModel.trianable = False
for l in sampleModel.layers:
print(l.name)
if l.name == 'conv4_block5_out':
print(l)
#%%
c=[]
name=['conv2_block2_out','conv3_block3_out','conv4_block5_out','conv5_block3_out']
i=0
for l in sampleModel.layers:
if l.name == name[i]:
i+=1
print(l.name)
c.append(l.output)
if i == 4:
break
print(c)
model = tf.keras.models.Model(inputs=sampleModel.input, outputs=c)
tf.keras.utils.plot_model(model, to_file='rennetRpn.png', show_shapes=True, show_layer_names=True)
#%%
model.outputs
#%%
sampleModel.layers['conv4_block5_out']
#%%
img = cv2.imread('hua.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img/255.0
img = cv2.resize(img,(224,224))
plt.imshow(img)
o = sampleModel(np.expand_dims(img,0))
# %%
probs = tf.nn.softmax(o)
probs=probs.numpy()
np.max(probs)
# %%
np.argmax(probs)
# %%
probs
# %%
print('Predicted:', tf.keras.applications.resnet_v2.decode_predictions(o, top=3)[0])
# %%
img.shape
# %%
w = sampleModel.get_weights()
w[0]
# %%
for l in sampleModel.layers:
print(l.name)
# %%
# %%
| [
"matplotlib.pyplot.imshow",
"cv2.cv2.imread",
"tensorflow.keras.applications.resnet_v2.decode_predictions",
"numpy.argmax",
"tensorflow.keras.applications.ResNet50V2",
"tensorflow.keras.utils.plot_model",
"numpy.max",
"cv2.cv2.resize",
"tensorflow.nn.softmax",
"numpy.expand_dims",
"tensorflow.ke... | [((212, 316), 'tensorflow.keras.applications.ResNet50V2', 'tf.keras.applications.ResNet50V2', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_shape': '(scal, scal, 3)'}), "(weights='imagenet', include_top=False,\n input_shape=(scal, scal, 3))\n", (244, 316), True, 'import tensorflow as tf\n'), ((797, 855), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'sampleModel.input', 'outputs': 'c'}), '(inputs=sampleModel.input, outputs=c)\n', (818, 855), True, 'import tensorflow as tf\n'), ((856, 958), 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['model'], {'to_file': '"""rennetRpn.png"""', 'show_shapes': '(True)', 'show_layer_names': '(True)'}), "(model, to_file='rennetRpn.png', show_shapes=True,\n show_layer_names=True)\n", (881, 958), True, 'import tensorflow as tf\n'), ((1028, 1049), 'cv2.cv2.imread', 'cv2.imread', (['"""hua.jpg"""'], {}), "('hua.jpg')\n", (1038, 1049), False, 'from cv2 import cv2\n'), ((1056, 1092), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1068, 1092), False, 'from cv2 import cv2\n'), ((1115, 1142), 'cv2.cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (1125, 1142), False, 'from cv2 import cv2\n'), ((1141, 1156), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1151, 1156), True, 'import matplotlib.pyplot as plt\n'), ((1209, 1225), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['o'], {}), '(o)\n', (1222, 1225), True, 'import tensorflow as tf\n'), ((1246, 1259), 'numpy.max', 'np.max', (['probs'], {}), '(probs)\n', (1252, 1259), True, 'import numpy as np\n'), ((1265, 1281), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (1274, 1281), True, 'import numpy as np\n'), ((1173, 1195), 'numpy.expand_dims', 'np.expand_dims', (['img', '(0)'], {}), '(img, 0)\n', (1187, 1195), True, 'import numpy as np\n'), ((1318, 1378), 'tensorflow.keras.applications.resnet_v2.decode_predictions', 'tf.keras.applications.resnet_v2.decode_predictions', (['o'], {'top': '(3)'}), '(o, top=3)\n', (1368, 1378), True, 'import tensorflow as tf\n')] |
import sys
import numpy as np
import hdf5storage as h5
np.set_printoptions(threshold=sys.maxsize)
uv_path = 'uvmat/101_6-pp_Page_605-S0H0001.mat'
uv = h5.loadmat(uv_path)['uv']
print(type(uv)) # np.ndarray
print(uv.shape)
first = uv[:, :, 0]
second = uv[:, :, 1]
third = uv[:, :, 2]
# print(first)
# print(second)
# print(third)
print(np.unique(first)) # [0, 1] mask
print(np.unique(second)) # 0.0 ~ 1.0 x
print(np.unique(third)) # 0.0 ~ 1.0 y
| [
"hdf5storage.loadmat",
"numpy.unique",
"numpy.set_printoptions"
] | [((55, 97), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (74, 97), True, 'import numpy as np\n'), ((152, 171), 'hdf5storage.loadmat', 'h5.loadmat', (['uv_path'], {}), '(uv_path)\n', (162, 171), True, 'import hdf5storage as h5\n'), ((340, 356), 'numpy.unique', 'np.unique', (['first'], {}), '(first)\n', (349, 356), True, 'import numpy as np\n'), ((381, 398), 'numpy.unique', 'np.unique', (['second'], {}), '(second)\n', (390, 398), True, 'import numpy as np\n'), ((422, 438), 'numpy.unique', 'np.unique', (['third'], {}), '(third)\n', (431, 438), True, 'import numpy as np\n')] |
import os, torch, random, cv2, math, glob
import numpy as np
from torch.utils import data
from torchvision import transforms as T
from PIL import Image
from torch.nn import functional as F
from collections import defaultdict
import random
import copy
from torch.utils.data.sampler import Sampler
class IdentityCameraSampler(Sampler):
def __init__(self, data_source, batch_size, num_instances,cams_of_dataset=None,len_of_real_data=None):
if batch_size < num_instances:
raise ValueError('batch_size={} must be no less '
'than num_instances={}'.format(batch_size, num_instances))
self.data_source = data_source
self.batch_size = batch_size
self.num_instances = num_instances
self.num_pids_per_batch = self.batch_size // self.num_instances # approximate
self.num_cams_per_batch = 8
self.index_dic = defaultdict(list)
self.cam_index_dic = dict()
self.num_pids_per_cam = self.num_pids_per_batch//self.num_cams_per_batch
for index, (_, pid, camid) in enumerate(self.data_source):
self.index_dic[pid].append(index)
if camid not in self.cam_index_dic.keys():
self.cam_index_dic[camid]=defaultdict(list)
self.cam_index_dic[camid][pid].append(index)
self.pids = list(self.index_dic.keys())
self.cams_of_dataset=cams_of_dataset
self.len_of_real_data = len_of_real_data
def __iter__(self):
final_idxs = []
length = 2*self.len_of_real_data if self.len_of_real_data is not None else len(self.data_source)
# F setting
#length = len(self.data_source)
while(len(final_idxs) < length):
if self.cams_of_dataset is not None:
# C setting
#c_rnd = np.random.choice(list(self.cam_index_dic.keys()),size=1)[0]
#for cams_of_data in self.cams_of_dataset:
# if c_rnd in cams_of_data:
# cams = np.random.choice(list(cams_of_data),size=self.num_cams_per_batch,replace=True)
# break
# D setting
c_rnd = np.random.choice([i for i in range(len(self.cams_of_dataset))],size=1)[0]
cams = np.random.choice(list(self.cams_of_dataset[c_rnd]),size=self.num_cams_per_batch,replace=True)
else:
cams = np.random.choice(list(self.cam_index_dic.keys()),size=self.num_cams_per_batch,replace=True)
for c in cams:
pids = np.random.choice(list(self.cam_index_dic[c].keys()),size=self.num_pids_per_cam, replace=True)
for p in pids:
idxs =np.random.choice(self.cam_index_dic[c][p],size=self.num_instances,replace=True)
random.shuffle(idxs)
final_idxs.extend(idxs)
self.length=len(final_idxs)
return iter(final_idxs)
def __len__(self):
return self.length
class RandomErasing(object):
def __init__(self, EPSILON=0.5, mean=[0.485, 0.456, 0.406]):
self.EPSILON = EPSILON
self.mean = mean
def __call__(self, img):
if random.uniform(0, 1) > self.EPSILON:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(0.02, 0.2) * area
aspect_ratio = random.uniform(0.3, 3)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size()[2] and h <= img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
return img
return img
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_transform = T.Compose([
T.Resize((256,128)),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalizer,
RandomErasing(EPSILON=0.5)
])
test_transform = T.Compose([
T.Resize((256,128)),
T.ToTensor(),
normalizer ])
class imgdataset_withsource(data.Dataset):
def __init__(self, data_source):
self.data_source = data_source
self.transform = train_transform
def __getitem__(self,index):
im_path, pid, cam = self.data_source[index]
image = Image.open(im_path).convert('RGB')
image = self.transform(image)
return image,pid, cam
def __len__(self):
return len(self.data_source)
class imgdataset(data.Dataset):
def __init__(self, dataset_dir, txt_path, transformer = 'train'):
self.mode = transformer
self.transform = train_transform if transformer == 'train' else test_transform
with open(txt_path) as f:
line = f.readlines()
self.img_list = [os.path.join(dataset_dir, i.split()[0]) for i in line]
self.label_list = [int(i.split()[1]) for i in line]
self.cam_list = [int(i.split()[2]) for i in line]
if self.mode=='test':
self.frame_list = [int(i.split()[3]) for i in line]
#self.cam_list = [int(i.split('c')[1][0]) for i in line]
self.cams = np.unique(self.cam_list)
self.pids = np.unique(self.label_list)
pid2label = {pid:ind for ind,pid in enumerate(self.pids)}
labels = []
for l in self.label_list:
labels.append(pid2label[l])
self.label_list = labels
self.data_source = []
for i in range(len(self.label_list)):
self.data_source.append((self.img_list[i],self.label_list[i],self.cam_list[i]))
def __getitem__(self, index):
im_path = self.img_list[index]
image = Image.open(im_path).convert('RGB')
image = self.transform(image)
if self.mode=='train':
return image, self.label_list[index], self.cam_list[index]
elif self.mode=='test':
return image, self.label_list[index], self.cam_list[index], self.frame_list[index]
def __len__(self):
return len(self.label_list)
class imgdataset_cam(data.Dataset):
def __init__(self, dataset_dir, txt_path,camid, transformer = 'train'):
self.mode = transformer
self.transform = train_transform if transformer == 'train' else test_transform
with open(txt_path) as f:
line = f.readlines()
self.img_list = np.array([os.path.join(dataset_dir, i.split()[0]) for i in line])
self.label_list = np.array([int(i.split()[1]) for i in line])
self.cam_list = np.array([int(i.split()[2]) for i in line])
self.query_list = np.array([True if 'query' in i else False for i in line])
if self.mode=='test':
self.frame_list =np.array([int(i.split()[3]) for i in line])
select = self.cam_list==camid
self.img_list = self.img_list[select]
self.label_list = self.label_list[select]
self.cam_list = self.cam_list[select]
self.frame_list = self.frame_list[select]
self.query_list = self.query_list[select]
#self.cam_list = [int(i.split('c')[1][0]) for i in line]
self.cams = np.unique(self.cam_list)
def __getitem__(self, index):
im_path = self.img_list[index]
image = Image.open(im_path).convert('RGB')
image = self.transform(image)
if self.mode=='train':
return image, self.label_list[index], self.cam_list[index]
elif self.mode=='test':
return image, self.label_list[index], self.cam_list[index], self.frame_list[index], self.query_list[index]
def __len__(self):
return len(self.label_list)
class imgdataset_camtrans(data.Dataset):
def __init__(self, dataset_dir, txt_path, transformer = 'train', num_cam=8, K=4):
self.num_cam = num_cam
self.mode = transformer
self.transform = train_transform if transformer == 'train' else test_transform
self.K = K
with open(txt_path) as f:
line = f.readlines()
self.img_list = [os.path.join(dataset_dir, i.split()[0]) for i in line]
self.label_list = [int(i.split()[1]) for i in line]
#self.cam_list = [int(i.split('c')[1][0]) for i in line]
self.cam_list = [int(i.split()[2]) for i in line]
def __getitem__(self, index):
im_path = self.img_list[index]
camid = self.cam_list[index]
cams = torch.randperm(self.num_cam) + 1
imgs = []
cam_labels = []
index_labels = []
for sel_cam in cams[0:self.K]:
if sel_cam != camid:
if 'msmt' in im_path:
im_path_cam = im_path[:-4]+'_fake_'+str(sel_cam.numpy())+'.jpg'
else:
im_path_cam = im_path[:-4] + '_fake_' + str(camid) + 'to' + str(sel_cam.numpy()) + '.jpg'
else:
im_path_cam = im_path
#print('im_path', camid, sel_cam,im_path_cam)
image = Image.open(im_path_cam).convert('RGB')
image = self.transform(image)
imgs.append(image.numpy())
#imgs.append(image)
cam_labels.append(sel_cam)
index_labels.append(index)
imgs = np.array(imgs, np.float32)
imgs = torch.from_numpy(imgs).float()
cam_labels = np.array(cam_labels)
cam_labels = torch.from_numpy(cam_labels)
index_labels = np.array(index_labels)
index_labels = torch.from_numpy(index_labels)
return imgs, self.label_list[index], index_labels, cam_labels
def __len__(self):
return len(self.label_list)
class NormalCollateFn:
def __call__(self, batch):
img_tensor = [x[0] for x in batch]
pids = np.array([x[1] for x in batch])
camids = np.array([x[2] for x in batch])
return torch.stack(img_tensor, dim=0), torch.from_numpy(pids), torch.from_numpy(np.array(camids))
| [
"random.uniform",
"PIL.Image.open",
"numpy.unique",
"torch.randperm",
"random.shuffle",
"numpy.random.choice",
"torch.stack",
"torchvision.transforms.RandomHorizontalFlip",
"math.sqrt",
"torch.from_numpy",
"numpy.array",
"collections.defaultdict",
"torchvision.transforms.Normalize",
"torch... | [((3988, 4054), 'torchvision.transforms.Normalize', 'T.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3999, 4054), True, 'from torchvision import transforms as T\n'), ((900, 917), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (911, 917), False, 'from collections import defaultdict\n'), ((4089, 4109), 'torchvision.transforms.Resize', 'T.Resize', (['(256, 128)'], {}), '((256, 128))\n', (4097, 4109), True, 'from torchvision import transforms as T\n'), ((4114, 4138), 'torchvision.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {}), '()\n', (4136, 4138), True, 'from torchvision import transforms as T\n'), ((4144, 4156), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (4154, 4156), True, 'from torchvision import transforms as T\n'), ((4248, 4268), 'torchvision.transforms.Resize', 'T.Resize', (['(256, 128)'], {}), '((256, 128))\n', (4256, 4268), True, 'from torchvision import transforms as T\n'), ((4273, 4285), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (4283, 4285), True, 'from torchvision import transforms as T\n'), ((5429, 5453), 'numpy.unique', 'np.unique', (['self.cam_list'], {}), '(self.cam_list)\n', (5438, 5453), True, 'import numpy as np\n'), ((5474, 5500), 'numpy.unique', 'np.unique', (['self.label_list'], {}), '(self.label_list)\n', (5483, 5500), True, 'import numpy as np\n'), ((7489, 7513), 'numpy.unique', 'np.unique', (['self.cam_list'], {}), '(self.cam_list)\n', (7498, 7513), True, 'import numpy as np\n'), ((9626, 9652), 'numpy.array', 'np.array', (['imgs', 'np.float32'], {}), '(imgs, np.float32)\n', (9634, 9652), True, 'import numpy as np\n'), ((9720, 9740), 'numpy.array', 'np.array', (['cam_labels'], {}), '(cam_labels)\n', (9728, 9740), True, 'import numpy as np\n'), ((9762, 9790), 'torch.from_numpy', 'torch.from_numpy', (['cam_labels'], {}), '(cam_labels)\n', (9778, 9790), False, 'import os, torch, random, cv2, math, glob\n'), ((9814, 9836), 'numpy.array', 'np.array', (['index_labels'], {}), '(index_labels)\n', (9822, 9836), True, 'import numpy as np\n'), ((9860, 9890), 'torch.from_numpy', 'torch.from_numpy', (['index_labels'], {}), '(index_labels)\n', (9876, 9890), False, 'import os, torch, random, cv2, math, glob\n'), ((10135, 10166), 'numpy.array', 'np.array', (['[x[1] for x in batch]'], {}), '([x[1] for x in batch])\n', (10143, 10166), True, 'import numpy as np\n'), ((10184, 10215), 'numpy.array', 'np.array', (['[x[2] for x in batch]'], {}), '([x[2] for x in batch])\n', (10192, 10215), True, 'import numpy as np\n'), ((3184, 3204), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3198, 3204), False, 'import random\n'), ((3416, 3438), 'random.uniform', 'random.uniform', (['(0.3)', '(3)'], {}), '(0.3, 3)\n', (3430, 3438), False, 'import random\n'), ((6927, 6986), 'numpy.array', 'np.array', (["[(True if 'query' in i else False) for i in line]"], {}), "([(True if 'query' in i else False) for i in line])\n", (6935, 6986), True, 'import numpy as np\n'), ((8791, 8819), 'torch.randperm', 'torch.randperm', (['self.num_cam'], {}), '(self.num_cam)\n', (8805, 8819), False, 'import os, torch, random, cv2, math, glob\n'), ((10231, 10261), 'torch.stack', 'torch.stack', (['img_tensor'], {'dim': '(0)'}), '(img_tensor, dim=0)\n', (10242, 10261), False, 'import os, torch, random, cv2, math, glob\n'), ((10263, 10285), 'torch.from_numpy', 'torch.from_numpy', (['pids'], {}), '(pids)\n', (10279, 10285), False, 'import os, torch, random, cv2, math, glob\n'), ((1245, 1262), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1256, 1262), False, 'from collections import defaultdict\n'), ((3356, 3381), 'random.uniform', 'random.uniform', (['(0.02)', '(0.2)'], {}), '(0.02, 0.2)\n', (3370, 3381), False, 'import random\n'), ((4575, 4594), 'PIL.Image.open', 'Image.open', (['im_path'], {}), '(im_path)\n', (4585, 4594), False, 'from PIL import Image\n'), ((5969, 5988), 'PIL.Image.open', 'Image.open', (['im_path'], {}), '(im_path)\n', (5979, 5988), False, 'from PIL import Image\n'), ((7604, 7623), 'PIL.Image.open', 'Image.open', (['im_path'], {}), '(im_path)\n', (7614, 7623), False, 'from PIL import Image\n'), ((9668, 9690), 'torch.from_numpy', 'torch.from_numpy', (['imgs'], {}), '(imgs)\n', (9684, 9690), False, 'import os, torch, random, cv2, math, glob\n'), ((10304, 10320), 'numpy.array', 'np.array', (['camids'], {}), '(camids)\n', (10312, 10320), True, 'import numpy as np\n'), ((2706, 2792), 'numpy.random.choice', 'np.random.choice', (['self.cam_index_dic[c][p]'], {'size': 'self.num_instances', 'replace': '(True)'}), '(self.cam_index_dic[c][p], size=self.num_instances, replace\n =True)\n', (2722, 2792), True, 'import numpy as np\n'), ((2806, 2826), 'random.shuffle', 'random.shuffle', (['idxs'], {}), '(idxs)\n', (2820, 2826), False, 'import random\n'), ((3466, 3503), 'math.sqrt', 'math.sqrt', (['(target_area * aspect_ratio)'], {}), '(target_area * aspect_ratio)\n', (3475, 3503), False, 'import os, torch, random, cv2, math, glob\n'), ((3532, 3569), 'math.sqrt', 'math.sqrt', (['(target_area / aspect_ratio)'], {}), '(target_area / aspect_ratio)\n', (3541, 3569), False, 'import os, torch, random, cv2, math, glob\n'), ((9380, 9403), 'PIL.Image.open', 'Image.open', (['im_path_cam'], {}), '(im_path_cam)\n', (9390, 9403), False, 'from PIL import Image\n')] |
# -*- coding: utf-8 -*-
"""
Module to provide a simulator to render a laparoscopic view comprising
models of anatomy along with a laparoscopic ultrasound probe.
"""
import numpy as np
import vtk
import sksurgerycore.transforms.matrix as cmu
import sksurgeryvtk.widgets.vtk_rendering_generator as rg
import sksurgeryvtk.utils.matrix_utils as vmu
class VTKLUSSimulator(rg.VTKRenderingGenerator):
"""
Class derived from VTKRenderingGenerator to provide additional
functions to set up the position of anatomy and LUS probe with
respect to a stationary camera, placed at the world origin,
and pointing along +ve z axis, as per OpenCV conventions.
Note: The mesh representing the LUS probe body must be called 'probe',
and there must be at least one other mesh called 'liver'. Any other
meshes, e.g. gallbladder, arterties etc., will have the same transform
applied as the liver surface.
:param models_json_file: JSON file describing VTK models, in SNAPPY format
:param background_image_file: RGB image to render in background
:param camera_intrinsics_file: [3x3] matrix in text file, in numpy format
:param liver2camera_reference_file: [4x4] matrix in text file, numpy format
:param probe2camera_reference_file: [4x4] matrix in text file, numpy format
"""
def __init__(self,
models_json_file,
background_image_file,
camera_intrinsics_file,
liver2camera_reference_file,
probe2camera_reference_file,
camera_to_world=None,
left_to_right=None,
clipping_range=(1, 1000)
):
super().__init__(models_json_file,
background_image_file,
camera_intrinsics_file,
camera_to_world=camera_to_world,
left_to_right=left_to_right,
zbuffer=False,
gaussian_sigma=0,
gaussian_window_size=11,
clipping_range=clipping_range
)
self.reference_l2c = np.loadtxt(liver2camera_reference_file)
self.reference_p2c = np.loadtxt(probe2camera_reference_file)
self.cyl = vtk.vtkCylinderSource()
self.cyl.SetResolution(88)
self.cyl.SetRadius(5)
self.cyl.SetHeight(1000)
self.cyl.SetCenter((0, self.cyl.GetHeight() / 2.0, 0))
self.cyl.Update()
self.cyl_matrix = vtk.vtkMatrix4x4()
self.cyl_matrix.Identity()
self.cyl_trans = vtk.vtkTransform()
self.cyl_trans.SetMatrix(self.cyl_matrix)
self.cyl_transform_filter = vtk.vtkTransformPolyDataFilter()
self.cyl_transform_filter.SetInputData(self.cyl.GetOutput())
self.cyl_transform_filter.SetTransform(self.cyl_trans)
self.cyl_mapper = vtk.vtkPolyDataMapper()
self.cyl_mapper.SetInputConnection(
self.cyl_transform_filter.GetOutputPort())
self.cyl_mapper.Update()
self.cyl_actor = vtk.vtkActor()
self.cyl_actor.SetMapper(self.cyl_mapper)
probe_model = self.model_loader.get_surface_model('probe')
probe_colour = probe_model.get_colour()
self.cyl_actor.GetProperty().SetColor(probe_colour)
if probe_model.get_no_shading():
self.cyl_actor.GetProperty().SetAmbient(1)
self.cyl_actor.GetProperty().SetDiffuse(0)
self.cyl_actor.GetProperty().SetSpecular(0)
self.overlay.add_vtk_actor(self.cyl_actor)
self.set_clipping_range(clipping_range[0], clipping_range[1])
self.setup_camera_extrinsics(camera_to_world, left_to_right)
def set_pose(self,
anatomy_pose_params,
probe_pose_params,
angle_of_handle,
anatomy_location=None
):
"""
This is the main method to call to setup the pose of all anatomy and
for the LUS probe, and the handle.
You can then call get_image() to get the rendered image,
or call get_masks() to get a set of rendered masks,
and the relevant pose parameters for ML purposes.
The liver2camera and probe2camera are returned as 4x4 matrices.
This is because there are multiple different parameterisations
that the user might be working in. e.g. Euler angles, Rodrigues etc.
:param anatomy_pose_params: [rx, ry, rz, tx, ty, tz] in deg/mm
:param probe_pose_params: [rx, ry, rz, tx, ty, tz] in deg/mm
:param angle_of_handle: angle in deg
:param anatomy_location: [1x3] location of random point on liver surface
:return: [liver2camera4x4, probe2camera4x4, angle, anatomy_location1x3]
"""
# The 'anatomy_location' picks a point on the surface and moves
# the LUS probe to have it's centroid based there. This is in effect
# updating the so-called 'reference' position of the probe.
# Subsequent offsets in [rx, ry, rz, tx, ty, tz] are from this new posn.
p2c = self.reference_p2c
if anatomy_location is not None:
picked = np.zeros((4, 1))
picked[0][0] = anatomy_location[0]
picked[1][0] = anatomy_location[1]
picked[2][0] = anatomy_location[2]
picked[3][0] = 1
picked_point = self.reference_l2c @ picked
# This p2c then becomes the 'reference_probe2camera'.
p2c[0][3] = picked_point[0]
p2c[1][3] = picked_point[1]
p2c[2][3] = picked_point[2]
# Compute the transformation for the anatomy.
# We assume that the anatomy has been normalised (zero-centred).
rotation_tx = vmu.create_matrix_from_list([anatomy_pose_params[0],
anatomy_pose_params[1],
anatomy_pose_params[2],
0, 0, 0],
is_in_radians=False)
translation_tx = vmu.create_matrix_from_list([0, 0, 0,
anatomy_pose_params[3],
anatomy_pose_params[4],
anatomy_pose_params[5]],
is_in_radians=False)
anatomy_tx = translation_tx @ self.reference_l2c @ rotation_tx
full_anatomy_tx_vtk = \
vmu.create_vtk_matrix_from_numpy(anatomy_tx)
# Now we compute the position of the probe.
# We assume that the probe model has been normalised (zero-centred).
probe_tx = vmu.create_matrix_from_list(probe_pose_params,
is_in_radians=False)
p2l = np.linalg.inv(self.reference_l2c) @ p2c
probe_actor_tx = p2l @ probe_tx
full_probe_actor_tx = anatomy_tx @ probe_actor_tx
full_probe_actor_tx_vtk = \
vmu.create_vtk_matrix_from_numpy(full_probe_actor_tx)
# Apply the transforms to each actor.
self.set_pose_with_matrices(full_probe_actor_tx_vtk,
full_anatomy_tx_vtk,
angle_of_handle)
# Return parameters for final solution.
liver_model = self.model_loader.get_surface_model('liver')
final_l2c = \
vmu.create_numpy_matrix_from_vtk(liver_model.actor.GetMatrix())
probe_model = self.model_loader.get_surface_model('probe')
final_p2c = \
vmu.create_numpy_matrix_from_vtk(probe_model.actor.GetMatrix())
return [final_l2c, final_p2c, angle_of_handle, anatomy_location]
def set_pose_with_matrices(self, p2c, l2c, angle_of_handle):
"""
Method to apply 4x4 transformations to actors.
:param p2c: 4x4 matrix, either numpy or vtk matrix.
:param l2c: 4x4 matrix, either numpy or vtk matrix.
:param angle_of_handle: angle in deg.
:return: N/A
"""
# First we can compute the angle of the handle.
# This applies directly to the data, as it comes out
# of the vtkTransformPolyDataFilter, before the actor transformation.
probe_offset = np.eye(4)
probe_offset[0][3] = 0.007877540588378196
probe_offset[1][3] = 36.24640712738037
probe_offset[2][3] = -3.8626091003417997
r_x = \
cmu.construct_rx_matrix(angle_of_handle, is_in_radians=False)
rotation_about_x = \
cmu.construct_rigid_transformation(r_x, np.zeros((3, 1)))
self.cyl_trans.SetMatrix(
vmu.create_vtk_matrix_from_numpy(probe_offset @ rotation_about_x))
self.cyl_transform_filter.Update()
# Check p2c, l2c: if numpy, convert to vtk.
if isinstance(p2c, np.ndarray):
p2c = vmu.create_vtk_matrix_from_numpy(p2c)
if isinstance(l2c, np.ndarray):
l2c = vmu.create_vtk_matrix_from_numpy(l2c)
# This is where we apply transforms to each actor.
# Apply p2c to probe
self.cyl_actor.PokeMatrix(p2c)
probe_model = self.model_loader.get_surface_model('probe')
probe_model.actor.PokeMatrix(p2c)
# Apply l2c to organs in scene.
for model in self.model_loader.get_surface_models():
if model.get_name() != 'probe':
model.actor.PokeMatrix(l2c)
# Force re-render
self.overlay.Render()
self.repaint()
| [
"vtk.vtkTransformPolyDataFilter",
"sksurgerycore.transforms.matrix.construct_rx_matrix",
"numpy.eye",
"vtk.vtkCylinderSource",
"vtk.vtkPolyDataMapper",
"vtk.vtkTransform",
"vtk.vtkActor",
"numpy.zeros",
"numpy.linalg.inv",
"sksurgeryvtk.utils.matrix_utils.create_matrix_from_list",
"sksurgeryvtk.... | [((2183, 2222), 'numpy.loadtxt', 'np.loadtxt', (['liver2camera_reference_file'], {}), '(liver2camera_reference_file)\n', (2193, 2222), True, 'import numpy as np\n'), ((2252, 2291), 'numpy.loadtxt', 'np.loadtxt', (['probe2camera_reference_file'], {}), '(probe2camera_reference_file)\n', (2262, 2291), True, 'import numpy as np\n'), ((2312, 2335), 'vtk.vtkCylinderSource', 'vtk.vtkCylinderSource', ([], {}), '()\n', (2333, 2335), False, 'import vtk\n'), ((2550, 2568), 'vtk.vtkMatrix4x4', 'vtk.vtkMatrix4x4', ([], {}), '()\n', (2566, 2568), False, 'import vtk\n'), ((2629, 2647), 'vtk.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (2645, 2647), False, 'import vtk\n'), ((2734, 2766), 'vtk.vtkTransformPolyDataFilter', 'vtk.vtkTransformPolyDataFilter', ([], {}), '()\n', (2764, 2766), False, 'import vtk\n'), ((2926, 2949), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (2947, 2949), False, 'import vtk\n'), ((3107, 3121), 'vtk.vtkActor', 'vtk.vtkActor', ([], {}), '()\n', (3119, 3121), False, 'import vtk\n'), ((5804, 5939), 'sksurgeryvtk.utils.matrix_utils.create_matrix_from_list', 'vmu.create_matrix_from_list', (['[anatomy_pose_params[0], anatomy_pose_params[1], anatomy_pose_params[2], 0,\n 0, 0]'], {'is_in_radians': '(False)'}), '([anatomy_pose_params[0], anatomy_pose_params[1],\n anatomy_pose_params[2], 0, 0, 0], is_in_radians=False)\n', (5831, 5939), True, 'import sksurgeryvtk.utils.matrix_utils as vmu\n'), ((6164, 6299), 'sksurgeryvtk.utils.matrix_utils.create_matrix_from_list', 'vmu.create_matrix_from_list', (['[0, 0, 0, anatomy_pose_params[3], anatomy_pose_params[4],\n anatomy_pose_params[5]]'], {'is_in_radians': '(False)'}), '([0, 0, 0, anatomy_pose_params[3],\n anatomy_pose_params[4], anatomy_pose_params[5]], is_in_radians=False)\n', (6191, 6299), True, 'import sksurgeryvtk.utils.matrix_utils as vmu\n'), ((6626, 6670), 'sksurgeryvtk.utils.matrix_utils.create_vtk_matrix_from_numpy', 'vmu.create_vtk_matrix_from_numpy', (['anatomy_tx'], {}), '(anatomy_tx)\n', (6658, 6670), True, 'import sksurgeryvtk.utils.matrix_utils as vmu\n'), ((6820, 6887), 'sksurgeryvtk.utils.matrix_utils.create_matrix_from_list', 'vmu.create_matrix_from_list', (['probe_pose_params'], {'is_in_radians': '(False)'}), '(probe_pose_params, is_in_radians=False)\n', (6847, 6887), True, 'import sksurgeryvtk.utils.matrix_utils as vmu\n'), ((7136, 7189), 'sksurgeryvtk.utils.matrix_utils.create_vtk_matrix_from_numpy', 'vmu.create_vtk_matrix_from_numpy', (['full_probe_actor_tx'], {}), '(full_probe_actor_tx)\n', (7168, 7189), True, 'import sksurgeryvtk.utils.matrix_utils as vmu\n'), ((8412, 8421), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (8418, 8421), True, 'import numpy as np\n'), ((8596, 8657), 'sksurgerycore.transforms.matrix.construct_rx_matrix', 'cmu.construct_rx_matrix', (['angle_of_handle'], {'is_in_radians': '(False)'}), '(angle_of_handle, is_in_radians=False)\n', (8619, 8657), True, 'import sksurgerycore.transforms.matrix as cmu\n'), ((5225, 5241), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (5233, 5241), True, 'import numpy as np\n'), ((6949, 6982), 'numpy.linalg.inv', 'np.linalg.inv', (['self.reference_l2c'], {}), '(self.reference_l2c)\n', (6962, 6982), True, 'import numpy as np\n'), ((8739, 8755), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (8747, 8755), True, 'import numpy as np\n'), ((8803, 8868), 'sksurgeryvtk.utils.matrix_utils.create_vtk_matrix_from_numpy', 'vmu.create_vtk_matrix_from_numpy', (['(probe_offset @ rotation_about_x)'], {}), '(probe_offset @ rotation_about_x)\n', (8835, 8868), True, 'import sksurgeryvtk.utils.matrix_utils as vmu\n'), ((9024, 9061), 'sksurgeryvtk.utils.matrix_utils.create_vtk_matrix_from_numpy', 'vmu.create_vtk_matrix_from_numpy', (['p2c'], {}), '(p2c)\n', (9056, 9061), True, 'import sksurgeryvtk.utils.matrix_utils as vmu\n'), ((9120, 9157), 'sksurgeryvtk.utils.matrix_utils.create_vtk_matrix_from_numpy', 'vmu.create_vtk_matrix_from_numpy', (['l2c'], {}), '(l2c)\n', (9152, 9157), True, 'import sksurgeryvtk.utils.matrix_utils as vmu\n')] |
import numpy as np
from pysmore.libs.util import fast_sigmoid
from numba import jit
@jit(nopython=True, fastmath=True)
def get_dotproduct_loss(from_embedding, to_embedding, weight):
weight = float(weight)
prediction = np.dot(from_embedding, to_embedding.T)
gradient = weight - prediction
from_loss = gradient * to_embedding
to_loss = gradient * from_embedding
return from_loss, to_loss
@jit(nopython=True, fastmath=True)
def get_loglikelihood_loss(from_embedding, to_embedding, weight):
weight = float(weight)
prediction = np.dot(from_embedding, to_embedding.T)
gradient = weight - fast_sigmoid(prediction)
from_loss = gradient * to_embedding
to_loss = gradient * from_embedding
return from_loss, to_loss
@jit(nopython=True, fastmath=True)
def get_margin_bpr_loss(from_embedding, to_embedding_pos, to_embedding_neg, margin=8.0):
diff_to_embedding = to_embedding_pos - to_embedding_neg
prediction = np.dot(from_embedding, diff_to_embedding.T) - margin
gradient = 0.0 - fast_sigmoid(prediction)
from_loss = gradient * diff_to_embedding
to_loss_pos = gradient * from_embedding
to_loss_neg = -gradient * from_embedding
return from_loss, to_loss_pos, to_loss_neg
@jit(nopython=True, fastmath=True)
def get_bpr_loss(from_embedding, to_embedding_pos, to_embedding_neg):
diff_to_embedding = to_embedding_pos - to_embedding_neg
prediction = np.dot(from_embedding, diff_to_embedding.T)
gradient = 0.0 - fast_sigmoid(prediction)
from_loss = gradient * diff_to_embedding
to_loss_pos = gradient * from_embedding
to_loss_neg = to_loss_pos
return from_loss, to_loss_pos, to_loss_neg
#TODO
@jit(nopython=True, fastmath=True)
def get_convolutional_loss(from_embedding, to_embedding_pos, to_embedding_negs, margin=8.0):
diff_to_embedding = to_embedding_pos - to_embedding_negs
prediction = np.dot(from_embedding, diff_to_embedding.T) - margin
gradient = np.apply_along_axis(lambda x: 0.0 - fast_sigmoid(x), axis=0, arr=prediction)
from_loss = gradient * diff_to_embedding
to_loss_pos = gradient * from_embedding
to_loss_negs = -gradient * from_embedding
return from_loss, to_loss_pos, to_loss_negs | [
"pysmore.libs.util.fast_sigmoid",
"numpy.dot",
"numba.jit"
] | [((86, 119), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'fastmath': '(True)'}), '(nopython=True, fastmath=True)\n', (89, 119), False, 'from numba import jit\n'), ((415, 448), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'fastmath': '(True)'}), '(nopython=True, fastmath=True)\n', (418, 448), False, 'from numba import jit\n'), ((765, 798), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'fastmath': '(True)'}), '(nopython=True, fastmath=True)\n', (768, 798), False, 'from numba import jit\n'), ((1253, 1286), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'fastmath': '(True)'}), '(nopython=True, fastmath=True)\n', (1256, 1286), False, 'from numba import jit\n'), ((1704, 1737), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'fastmath': '(True)'}), '(nopython=True, fastmath=True)\n', (1707, 1737), False, 'from numba import jit\n'), ((227, 265), 'numpy.dot', 'np.dot', (['from_embedding', 'to_embedding.T'], {}), '(from_embedding, to_embedding.T)\n', (233, 265), True, 'import numpy as np\n'), ((559, 597), 'numpy.dot', 'np.dot', (['from_embedding', 'to_embedding.T'], {}), '(from_embedding, to_embedding.T)\n', (565, 597), True, 'import numpy as np\n'), ((1434, 1477), 'numpy.dot', 'np.dot', (['from_embedding', 'diff_to_embedding.T'], {}), '(from_embedding, diff_to_embedding.T)\n', (1440, 1477), True, 'import numpy as np\n'), ((622, 646), 'pysmore.libs.util.fast_sigmoid', 'fast_sigmoid', (['prediction'], {}), '(prediction)\n', (634, 646), False, 'from pysmore.libs.util import fast_sigmoid\n'), ((965, 1008), 'numpy.dot', 'np.dot', (['from_embedding', 'diff_to_embedding.T'], {}), '(from_embedding, diff_to_embedding.T)\n', (971, 1008), True, 'import numpy as np\n'), ((1039, 1063), 'pysmore.libs.util.fast_sigmoid', 'fast_sigmoid', (['prediction'], {}), '(prediction)\n', (1051, 1063), False, 'from pysmore.libs.util import fast_sigmoid\n'), ((1499, 1523), 'pysmore.libs.util.fast_sigmoid', 'fast_sigmoid', (['prediction'], {}), '(prediction)\n', (1511, 1523), False, 'from pysmore.libs.util import fast_sigmoid\n'), ((1909, 1952), 'numpy.dot', 'np.dot', (['from_embedding', 'diff_to_embedding.T'], {}), '(from_embedding, diff_to_embedding.T)\n', (1915, 1952), True, 'import numpy as np\n'), ((2013, 2028), 'pysmore.libs.util.fast_sigmoid', 'fast_sigmoid', (['x'], {}), '(x)\n', (2025, 2028), False, 'from pysmore.libs.util import fast_sigmoid\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.