code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import unittest
from specklepy.io.filearchive import FileArchive
from specklepy.core.alignment import FrameAlignment
from specklepy.plotting.utils import imshow
class TestAlignment(unittest.TestCase):
def setUp(self):
self.path = 'specklepy/tests/files/'
self.files = FileArchive('synthetic/glao_600ms*.fits', file_path=self.path).files
self.shifts = [(0, 0), (34, -20), (-14, -51)]
self.image_shape = (512, 512)
self.cube_shape = (10, 512, 512)
def test_get_pad_vectors(self):
alignment = FrameAlignment()
alignment.derive_pad_vectors(self.shifts)
def test_pad_array(self):
alignment = FrameAlignment()
pad_vectors, ref_pad_vector = alignment.derive_pad_vectors(self.shifts)
padded = alignment.pad_array(np.ones(self.image_shape), pad_vector_index=1, mode='same')
imshow(padded)
pad_vectors, ref_pad_vector = alignment.derive_pad_vectors(self.shifts)
for p, pad_vector in enumerate(pad_vectors):
padded = alignment.pad_array(np.ones(self.cube_shape), pad_vector_index=p, mode='same')
if __name__ == "__main__":
unittest.main()
| [
"specklepy.io.filearchive.FileArchive",
"numpy.ones",
"specklepy.core.alignment.FrameAlignment",
"specklepy.plotting.utils.imshow",
"unittest.main"
] | [((1171, 1186), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1184, 1186), False, 'import unittest\n'), ((570, 586), 'specklepy.core.alignment.FrameAlignment', 'FrameAlignment', ([], {}), '()\n', (584, 586), False, 'from specklepy.core.alignment import FrameAlignment\n'), ((688, 704), 'specklepy.core.alignment.FrameAlignment', 'FrameAlignment', ([], {}), '()\n', (702, 704), False, 'from specklepy.core.alignment import FrameAlignment\n'), ((890, 904), 'specklepy.plotting.utils.imshow', 'imshow', (['padded'], {}), '(padded)\n', (896, 904), False, 'from specklepy.plotting.utils import imshow\n'), ((311, 373), 'specklepy.io.filearchive.FileArchive', 'FileArchive', (['"""synthetic/glao_600ms*.fits"""'], {'file_path': 'self.path'}), "('synthetic/glao_600ms*.fits', file_path=self.path)\n", (322, 373), False, 'from specklepy.io.filearchive import FileArchive\n'), ((822, 847), 'numpy.ones', 'np.ones', (['self.image_shape'], {}), '(self.image_shape)\n', (829, 847), True, 'import numpy as np\n'), ((1079, 1103), 'numpy.ones', 'np.ones', (['self.cube_shape'], {}), '(self.cube_shape)\n', (1086, 1103), True, 'import numpy as np\n')] |
import numpy as np
import featureflow as ff
import zounds
from torch import nn
import torch
from torch.autograd import Variable
import argparse
import glob
import os
from pytorch_wgan2 import \
BaseGenerator, BaseCritic, CriticLayer, GeneratorLayer, FinalGeneratorLayer
from scipy.signal import resample, tukey
from random import choice
import torch.nn.functional as F
from uuid import uuid4
from functools import reduce
samplerate = zounds.SR11025()
BaseModel = zounds.resampled(resample_to=samplerate, store_resampled=True)
LATENT_DIM = 100
SAMPLE_SIZE = 8192
bands = (8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096)
FIRST_FEATURE_MAP_SIZE = 64
FACTOR = 1.2
stops = tuple(np.cumsum(bands))
slices = [slice(start, stop) for (start, stop) in zip((0,) + stops, stops)]
SCALING = [
0.035643139891883342,
0.041599504468638721,
0.043825312492803623,
0.089081457396139319,
0.11216649030248733,
0.1755375826822119,
0.3011956255933676,
0.50373631894723525,
0.72654767098659556,
1.0668680716129715
]
def perceptual(x):
coeffs = np.fft.rfft(x, norm='ortho', axis=-1)
scale = zounds.LinearScale.from_sample_rate(samplerate, coeffs.shape[-1])
arr = zounds.ArrayWithUnits(
coeffs, [x.dimensions[0], zounds.FrequencyDimension(scale)])
arr *= zounds.AWeighting()
samples = np.fft.irfft(arr, norm='ortho', axis=-1)
return zounds.ArrayWithUnits(samples, x.dimensions)
@zounds.simple_lmdb_settings('wgan', map_size=1e10, user_supplied_id=True)
class Sound(BaseModel):
windowed = zounds.ArrayWithUnitsFeature(
zounds.SlidingWindow,
wscheme=zounds.SampleRate(
frequency=samplerate.frequency * (SAMPLE_SIZE // 2),
duration=samplerate.frequency * SAMPLE_SIZE),
needs=BaseModel.resampled,
store=False)
perceptual = zounds.ArrayWithUnitsFeature(
perceptual,
needs=windowed)
decomposed = zounds.ArrayWithUnitsFeature(
lambda x: FrequencyDecomposition(x, bands).as_frequency_adaptive(),
needs=windowed)
def feature_map_size(inp, kernel, stride=1, padding=0):
return ((inp - kernel + (2 * padding)) / stride) + 1
class FrequencyDecomposition(object):
def __init__(self, samples, sizes, window=None):
self.window = window
self.sizes = sorted(sizes)
self.samples = samples
original = self.samples.copy()
self.bands = []
self.frequency_bands = []
start_hz = 0
for size in sizes:
# extract a frequency band
if size != self.size:
s = self._resample(original, size)
else:
s = original
self.bands.append(s)
original -= self._resample(s, self.size)
stop_hz = samplerate.nyquist * (size / self.size)
self.frequency_bands.append(zounds.FrequencyBand(start_hz, stop_hz))
start_hz = stop_hz
@classmethod
def _rs(cls, samples, desired_size, window=None):
axis = -1
w = window(samples.shape[axis]) if window else None
return resample(samples, desired_size, axis=axis, window=w)
def _resample(self, samples, desired_size):
return self._rs(samples, desired_size, self.window)
@classmethod
def synthesize_block(cls, block, window=None):
samples = np.zeros((len(block), SAMPLE_SIZE), dtype=block.dtype)
start = 0
for i, band in enumerate(bands):
stop = start + band
b = block[:, start: stop]
samples += cls._rs(b * SCALING[i], SAMPLE_SIZE, window=window)
start = stop
return samples
@property
def size(self):
return self.samples.shape[1]
def as_frequency_adaptive(self):
scale = zounds.ExplicitScale(self.frequency_bands)
bands = [b / SCALING[i] for i, b in enumerate(self.bands)]
return zounds.FrequencyAdaptive(
bands, scale=scale, time_dimension=self.samples.dimensions[0])
def synthesize_iter(self):
fa = self.as_frequency_adaptive()
samples = self.__class__.synthesize_block(fa)
for sample in samples:
yield sample, zounds.AudioSamples(sample, samplerate) \
.pad_with_silence(zounds.Seconds(1))
class FDDiscriminator(nn.Module):
def __init__(self):
super(FDDiscriminator, self).__init__()
self.factor = FACTOR
self.layer_stacks = [[] for _ in bands]
self.feature_map_sizes = reduce(
lambda x, y: x + [int(x[-1] * FACTOR)],
range(len(bands) - 1),
[FIRST_FEATURE_MAP_SIZE])
for i, band in enumerate(bands):
for j in range(i + 1):
fms = self.feature_map_sizes[j]
first_layer = j == 0
in_channels = 1 if first_layer else self.feature_map_sizes[
j - 1]
params = (8, 4, 0) if first_layer else (3, 2, 0)
layer = CriticLayer(in_channels, fms, *params)
self.layer_stacks[i].append(layer)
self.add_module('{i}{j}'.format(**locals()), layer)
self.l1 = nn.Linear(sum(self.feature_map_sizes), 256, bias=False)
self.l2 = nn.Linear(256, 1, bias=False)
def forward(self, x):
fms = []
start_index = 0
for i, band in enumerate(bands):
stop = start_index + band
slce = x[:, start_index: stop]
start_index = stop
fm = slce.contiguous().view(-1, 1, band)
# subset = self.layers[:i + 1]
subset = self.layer_stacks[i]
for s in subset:
fm = s(fm)
fms.append(fm)
# push the band-wise frequency maps through some linear layers
flat = torch.cat(fms, dim=1).squeeze()
x = self.l1(flat)
x = F.leaky_relu(x, 0.2)
x = F.dropout(x, 0.2, self.training)
x = self.l2(x)
return x
class FDGenerator(nn.Module):
def __init__(self):
super(FDGenerator, self).__init__()
self.layer_stacks = [[] for _ in bands]
self.factor = FACTOR
self.feature_map_sizes = reduce(
lambda x, y: x + [int(x[-1] * FACTOR)],
range(len(bands) - 1),
[FIRST_FEATURE_MAP_SIZE])
for i, band in enumerate(bands):
for j in range(i + 1):
fms = self.feature_map_sizes[j]
first_layer = j == 0
out_channels = 1 if first_layer else self.feature_map_sizes[
j - 1]
params = (8, 4, 0) if first_layer else (3, 2, 0)
cls = FinalGeneratorLayer if first_layer else GeneratorLayer
layer = cls(fms, out_channels, *params)
self.layer_stacks[i].append(layer)
self.add_module('{i}{j}'.format(**locals()), layer)
total_features = sum(self.feature_map_sizes)
self.l1 = nn.Linear(LATENT_DIM, 256, bias=False)
self.bn1 = nn.BatchNorm1d(256)
self.l2 = nn.Linear(256, total_features, bias=False)
self.bn2 = nn.BatchNorm1d(total_features)
def forward(self, x):
x = x.view(-1, LATENT_DIM)
x = self.l1(x)
x = self.bn1(x)
x = F.leaky_relu(x, 0.2)
x = F.dropout(x, 0.2, self.training)
x = self.l2(x)
x = self.bn2(x)
x = F.leaky_relu(x, 0.2)
x = F.dropout(x, 0.2, self.training)
current = 0
bands = []
for i, fms in enumerate(self.feature_map_sizes):
stop = current + fms
segment = x[:, current: stop]
current = stop
fm = segment.contiguous().view(-1, segment.size()[1], 1)
# subset = self.layers[-(i + 1):]
subset = self.layer_stacks[i][::-1]
for s in subset:
fm = s(fm)
bands.append(fm.squeeze())
return torch.cat(bands, dim=1)
class Generator(BaseGenerator):
def __init__(self):
super(Generator, self).__init__(
(LATENT_DIM, 512, 4, 1, 0),
(512, 256, 8, 4, 2),
(256, 128, 8, 4, 2),
(128, 128, 8, 4, 2),
(128, 128, 8, 4, 2),
(128, 1, 16, 8, 4))
class Generator2(BaseGenerator):
def __init__(self):
super(Generator2, self).__init__(
(LATENT_DIM, 256, 4, 1, 0),
(256, 256, 8, 4, 2),
(256, 128, 8, 4, 2),
(128, 128, 8, 4, 2),
(128, 1, 126, 32, 47))
class Critic(BaseCritic):
def __init__(self):
super(Critic, self).__init__(
SAMPLE_SIZE,
(1, 64, 16, 8, 4),
(64, 128, 8, 4, 2),
(128, 128, 8, 4, 2),
(128, 128, 8, 4, 2),
(128, 256, 8, 4, 2),
(256, 512, 4, 1, 0),
(512, 1))
class Critic2(BaseCritic):
def __init__(self):
super(Critic2, self).__init__(
SAMPLE_SIZE,
(1, 128, 126, 32, 47),
(128, 128, 8, 4, 2),
(128, 256, 8, 4, 2),
(256, 256, 8, 4, 2),
(256, 512, 4, 1, 0),
(512, 1))
class GanPair(nn.Module):
def __init__(self):
super(GanPair, self).__init__()
self.generator = Generator()
self.discriminator = Critic()
def forward(self, x):
raise NotImplementedError()
def try_network():
z = np.random.normal(0, 1, (64, LATENT_DIM)).astype(np.float32)
t = torch.from_numpy(z)
v = Variable(t).cuda()
network = GanPair().cuda()
g = network.generator
c = network.discriminator
result = g(v, debug=True)
print(result.size())
labels = c(result, debug=True)
print(labels.size())
@zounds.simple_settings
class Gan(ff.BaseModel):
samples = ff.PickleFeature(ff.IteratorNode)
shuffled = ff.PickleFeature(
zounds.ShuffledSamples,
nsamples=int(1e5),
dtype=np.float32,
needs=samples)
scaled = ff.PickleFeature(
zounds.InstanceScaling,
needs=shuffled)
wgan = ff.PickleFeature(
zounds.PyTorchGan,
trainer=ff.Var('trainer'),
needs=scaled)
pipeline = ff.PickleFeature(
zounds.PreprocessingPipeline,
needs=(scaled, wgan,),
store=True)
"""
Log
- reduced sample size from 8192 to 4096. No difference
- Introduced batch norm. Discriminator loss seems to stall out, and generator
produces very periodic sine-like sound with many harmonics
- leaky RELU in discriminator seems to make little or no difference
- tanh for all layers produces noise
- leaky RELU in the generator seem to produce more plausible *looking* waveforms.
generated examples are a combination of a single tone and noise
- add batch norm to the last generator layer - this seems to have really helped
with the visual appearance of generated samples
- don't do instance scaling? there seems to be more variability, but still noise
- try with mu_law this results in noise, and strong peaks at convolution boundaries. Why do things totally break down with mu_law?
- try the mu-law one-hot encoding. this is very slow, and it's producing some pretty awful output. Perhaps I should train longer with softmax.
- try penalizing the norm (improved WGAN) much more variation. Still some noise
- try tanh all the way through - doesn't learn at all
- try interpolating in sample space - learns a good deal of variety
- try learning on downsampled 8192 samples - there is some movement in the samples
- try without dropout - this seems to be slightly worse/noisier
- now that I'm doing WGAN, try instance scaling again - this seems to be OK now, and produces more variation
- try A-weighing the frequency domain and then IFFT back to samples- this seems to slightly improve variation
- try without tanh? - doesn't get rid of the noise
- try without batch norm in last layer? - doesn't change noise situation
- try with tanh in first layer of discriminator? - doesn't change noise
- can I scale up to 8192 and capture meaningful structure, even if noisy?
- yes, at least for Bach
- try with phat drum loops
- yes, starts to learn drum-like sounds
- try with speech
- yes, starts to learn speech-like sounds
- try with a toy dataset so I can understand the biases/problems
- try with <NAME>
- yes, learns speech-like sounds plus kick drums and bass
- try with a mixture of different types of sample
- residuals in the network
- dilated convolutions
- try adding batch norm back into discriminator?
- try training on mdct - nope, learns nothing
- progressively growing WGAN
"""
def load_and_play():
files = sorted(
glob.glob('*.npy'),
cmp=lambda x, y: int(os.stat(x).st_ctime - os.stat(y).st_ctime))
most_recent = files[-1]
print('loading generated examples from', most_recent)
results = np.load(most_recent)
# synthesized = FrequencyDecomposition.synthesize_block(results)
synthesized = results
for raw, result in zip(results, synthesized):
windowed = zounds.sliding_window(result, 512, 256)
spec = np.abs(np.fft.rfft(windowed))
audio_samples = zounds.AudioSamples(result, samplerate) \
.pad_with_silence(zounds.Seconds(1))
yield raw, result, audio_samples / audio_samples.max(), spec
def synthetic():
for i in range(100):
duration = zounds.Seconds(np.random.randint(2, 20))
root = np.random.randint(50, 400)
hz = [root]
for _ in range(0):
hz.append(hz[-1] * 2)
synth = zounds.SineSynthesizer(samplerate)
s = synth.synthesize(duration, hz)
yield s.encode()
def ingest_all():
data = [
zounds.InternetArchive('AOC11B'),
zounds.InternetArchive('Greatest_Speeches_of_the_20th_Century'),
zounds.InternetArchive('Kevin_Gates_-_By_Any_Means-2014'),
zounds.PhatDrumLoops()
]
for d in data:
zounds.ingest(d, Sound, multi_threaded=True)
def ingest():
zounds.ingest(
zounds.InternetArchive('AOC11B'),
Sound,
multi_threaded=True)
# for s in synthetic():
# print Sound.process(meta=s, _id=uuid4().hex)
def ingest_and_train(epochs):
ingest()
network = GanPair()
def arg_maker(epoch):
z = np.random.normal(0, 1, (64, LATENT_DIM)).astype(np.float32)
t = torch.from_numpy(z)
v = Variable(t).cuda()
samples = network.generator(v).data.cpu().numpy().squeeze()
np.save('epoch' + str(epoch), samples)
print('saved samples for epoch', epoch)
return dict()
# out_channels = 128
# kernel_size = 126
# basis = np.zeros((out_channels, kernel_size), dtype=np.float32)
# synth = zounds.SineSynthesizer(samplerate)
# space = np.geomspace(50, samplerate.nyquist, out_channels)
# for i, freq in enumerate(space):
# basis[i] = synth.synthesize(samplerate.frequency * kernel_size, [freq])
# basis = torch.from_numpy(basis[:, None, :]).cuda()
# basis = Variable(basis)
#
# gen_basis = torch.from_numpy(basis[:, None, :]).cuda()
# critic_basis = torch.from_numpy(basis[:, None, :]).cuda()
#
# print network.generator.main[-1].l1.weight.size()
# network.generator.main[-1].l1.weight.data = gen_basis
# network.generator.main[-1].l1.weight.requires_grad = False
#
# print network.discriminator.main[0].l1.weight.size()
# network.discriminator.main[0].l1.weight.data = critic_basis
# network.discriminator.main[0].l1.weight.requires_grad = False
# def generator_loss_term(network, samples):
# result = F.conv1d(samples, basis, stride=64)
# result = torch.abs(result)
# mean = result.mean(dim=1)
# std = result.std(dim=1)
# result = mean / std
# return result.mean() * 2500
if not Gan.exists():
trainer = zounds.WassersteinGanTrainer(
network=network,
latent_dimension=(LATENT_DIM,),
n_critic_iterations=10,
epochs=epochs,
batch_size=64,
arg_maker=arg_maker)
Gan.process(samples=(snd.perceptual for snd in Sound), trainer=trainer)
p = Gan()
def walk2(steps):
for i in range(steps):
yield np.random.normal(0, 1, LATENT_DIM)
def listen():
padding = zounds.Milliseconds(250)
z = np.concatenate(list(walk2(1000)))
result = p.pipeline.transform(z).data.squeeze()
x = np.concatenate([
zounds.AudioSamples(j,
samplerate).pad_with_silence(
padding)
for j in result])
return zounds.AudioSamples(x, zounds.SR11025())
return listen()
def test_frequency_decomposition():
# snds = list(Sound)
# snd = choice(snds)
# fd = FrequencyDecomposition(snd.windowed, bands)
# print [band.shape for band in fd.bands]
gen = FDGenerator().cuda()
inp = torch.zeros(64, LATENT_DIM).normal_(0, 1)
inp = Variable(inp).cuda()
x = gen(inp)
print(x.size())
disc = FDDiscriminator().cuda()
# fa = fd.as_frequency_adaptive()[:64].astype(np.float32)
# print fa.shape, [fa[:, band].shape for band in fa.dimensions[1].scale]
# inp = torch.from_numpy(fa)
# inp = Variable(inp).cuda()
x = disc(x)
print(x.size())
print(gen.layers)
print(disc.layers)
def tweak():
snd = choice(list(Sound))
original = snd.windowed
windowed = original * np.hanning(SAMPLE_SIZE)
twindowed = original * tukey(SAMPLE_SIZE, 0.2)
ofd = FrequencyDecomposition(original, bands)
ofd2 = FrequencyDecomposition(original, bands, window=np.hanning)
wfd = FrequencyDecomposition(windowed, bands)
wfd2 = FrequencyDecomposition(windowed, bands, window=np.hanning)
tfd = FrequencyDecomposition(twindowed, bands)
tfd2 = FrequencyDecomposition(twindowed, bands, window=np.hanning)
return ofd, ofd2, wfd, wfd2, tfd, tfd2
def get_magnitudes():
snds = list(Sound)
snd = choice(snds)
x = snd.decomposed
magnitudes = []
scaled = []
start = 0
for band in bands:
stop = start + band
b = x[:, start: stop]
m = np.abs(b).mean()
magnitudes.append(m)
scaled.append(np.abs(b / m).mean())
print(magnitudes)
print(scaled)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--ingest',
action='store_true')
parser.add_argument(
'--train',
help='ingest audio and train',
action='store_true')
parser.add_argument(
'--epochs',
help='number of epochs to train',
type=int)
parser.add_argument(
'--try-network',
help='dry run of network',
action='store_true')
parser.add_argument(
'--evaluate',
help='listen to and view generated results',
action='store_true')
parser.add_argument(
'--test-decomposition',
help='test out the frequency decomposition',
action='store_true')
parser.add_argument(
'--get-magnitudes',
action='store_true')
parser.add_argument(
'--tweak',
action='store_true')
args = parser.parse_args()
if args.train:
s = ingest_and_train(args.epochs)
elif args.ingest:
ingest()
elif args.try_network:
try_network()
elif args.evaluate:
result_iter = load_and_play()
elif args.test_decomposition:
test_frequency_decomposition()
elif args.get_magnitudes:
get_magnitudes()
elif args.tweak:
ofd, ofd2, wfd, wfd2, tfd, tfd2 = tweak()
# start up an in-browser REPL to interact with the results
app = zounds.ZoundsApp(
model=Sound,
audio_feature=Sound.ogg,
visualization_feature=Sound.windowed,
globals=globals(),
locals=locals())
app.start(9999)
| [
"numpy.hanning",
"zounds.Milliseconds",
"zounds.WassersteinGanTrainer",
"numpy.fft.irfft",
"zounds.FrequencyDimension",
"torch.from_numpy",
"torch.nn.BatchNorm1d",
"zounds.AudioSamples",
"zounds.AWeighting",
"zounds.ArrayWithUnits",
"zounds.PhatDrumLoops",
"zounds.resampled",
"featureflow.Pi... | [((440, 456), 'zounds.SR11025', 'zounds.SR11025', ([], {}), '()\n', (454, 456), False, 'import zounds\n'), ((469, 531), 'zounds.resampled', 'zounds.resampled', ([], {'resample_to': 'samplerate', 'store_resampled': '(True)'}), '(resample_to=samplerate, store_resampled=True)\n', (485, 531), False, 'import zounds\n'), ((1438, 1524), 'zounds.simple_lmdb_settings', 'zounds.simple_lmdb_settings', (['"""wgan"""'], {'map_size': '(10000000000.0)', 'user_supplied_id': '(True)'}), "('wgan', map_size=10000000000.0,\n user_supplied_id=True)\n", (1465, 1524), False, 'import zounds\n'), ((682, 698), 'numpy.cumsum', 'np.cumsum', (['bands'], {}), '(bands)\n', (691, 698), True, 'import numpy as np\n'), ((1075, 1112), 'numpy.fft.rfft', 'np.fft.rfft', (['x'], {'norm': '"""ortho"""', 'axis': '(-1)'}), "(x, norm='ortho', axis=-1)\n", (1086, 1112), True, 'import numpy as np\n'), ((1125, 1190), 'zounds.LinearScale.from_sample_rate', 'zounds.LinearScale.from_sample_rate', (['samplerate', 'coeffs.shape[-1]'], {}), '(samplerate, coeffs.shape[-1])\n', (1160, 1190), False, 'import zounds\n'), ((1304, 1323), 'zounds.AWeighting', 'zounds.AWeighting', ([], {}), '()\n', (1321, 1323), False, 'import zounds\n'), ((1338, 1378), 'numpy.fft.irfft', 'np.fft.irfft', (['arr'], {'norm': '"""ortho"""', 'axis': '(-1)'}), "(arr, norm='ortho', axis=-1)\n", (1350, 1378), True, 'import numpy as np\n'), ((1390, 1434), 'zounds.ArrayWithUnits', 'zounds.ArrayWithUnits', (['samples', 'x.dimensions'], {}), '(samples, x.dimensions)\n', (1411, 1434), False, 'import zounds\n'), ((1843, 1899), 'zounds.ArrayWithUnitsFeature', 'zounds.ArrayWithUnitsFeature', (['perceptual'], {'needs': 'windowed'}), '(perceptual, needs=windowed)\n', (1871, 1899), False, 'import zounds\n'), ((9536, 9555), 'torch.from_numpy', 'torch.from_numpy', (['z'], {}), '(z)\n', (9552, 9555), False, 'import torch\n'), ((9852, 9885), 'featureflow.PickleFeature', 'ff.PickleFeature', (['ff.IteratorNode'], {}), '(ff.IteratorNode)\n', (9868, 9885), True, 'import featureflow as ff\n'), ((10042, 10098), 'featureflow.PickleFeature', 'ff.PickleFeature', (['zounds.InstanceScaling'], {'needs': 'shuffled'}), '(zounds.InstanceScaling, needs=shuffled)\n', (10058, 10098), True, 'import featureflow as ff\n'), ((10246, 10331), 'featureflow.PickleFeature', 'ff.PickleFeature', (['zounds.PreprocessingPipeline'], {'needs': '(scaled, wgan)', 'store': '(True)'}), '(zounds.PreprocessingPipeline, needs=(scaled, wgan), store=True\n )\n', (10262, 10331), True, 'import featureflow as ff\n'), ((12930, 12950), 'numpy.load', 'np.load', (['most_recent'], {}), '(most_recent)\n', (12937, 12950), True, 'import numpy as np\n'), ((18198, 18210), 'random.choice', 'choice', (['snds'], {}), '(snds)\n', (18204, 18210), False, 'from random import choice\n'), ((18551, 18576), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (18574, 18576), False, 'import argparse\n'), ((3114, 3166), 'scipy.signal.resample', 'resample', (['samples', 'desired_size'], {'axis': 'axis', 'window': 'w'}), '(samples, desired_size, axis=axis, window=w)\n', (3122, 3166), False, 'from scipy.signal import resample, tukey\n'), ((3796, 3838), 'zounds.ExplicitScale', 'zounds.ExplicitScale', (['self.frequency_bands'], {}), '(self.frequency_bands)\n', (3816, 3838), False, 'import zounds\n'), ((3921, 4013), 'zounds.FrequencyAdaptive', 'zounds.FrequencyAdaptive', (['bands'], {'scale': 'scale', 'time_dimension': 'self.samples.dimensions[0]'}), '(bands, scale=scale, time_dimension=self.samples.\n dimensions[0])\n', (3945, 4013), False, 'import zounds\n'), ((5259, 5288), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(1)'], {'bias': '(False)'}), '(256, 1, bias=False)\n', (5268, 5288), False, 'from torch import nn\n'), ((5888, 5908), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', '(0.2)'], {}), '(x, 0.2)\n', (5900, 5908), True, 'import torch.nn.functional as F\n'), ((5921, 5953), 'torch.nn.functional.dropout', 'F.dropout', (['x', '(0.2)', 'self.training'], {}), '(x, 0.2, self.training)\n', (5930, 5953), True, 'import torch.nn.functional as F\n'), ((6993, 7031), 'torch.nn.Linear', 'nn.Linear', (['LATENT_DIM', '(256)'], {'bias': '(False)'}), '(LATENT_DIM, 256, bias=False)\n', (7002, 7031), False, 'from torch import nn\n'), ((7051, 7070), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (7065, 7070), False, 'from torch import nn\n'), ((7089, 7131), 'torch.nn.Linear', 'nn.Linear', (['(256)', 'total_features'], {'bias': '(False)'}), '(256, total_features, bias=False)\n', (7098, 7131), False, 'from torch import nn\n'), ((7151, 7181), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['total_features'], {}), '(total_features)\n', (7165, 7181), False, 'from torch import nn\n'), ((7304, 7324), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', '(0.2)'], {}), '(x, 0.2)\n', (7316, 7324), True, 'import torch.nn.functional as F\n'), ((7337, 7369), 'torch.nn.functional.dropout', 'F.dropout', (['x', '(0.2)', 'self.training'], {}), '(x, 0.2, self.training)\n', (7346, 7369), True, 'import torch.nn.functional as F\n'), ((7430, 7450), 'torch.nn.functional.leaky_relu', 'F.leaky_relu', (['x', '(0.2)'], {}), '(x, 0.2)\n', (7442, 7450), True, 'import torch.nn.functional as F\n'), ((7463, 7495), 'torch.nn.functional.dropout', 'F.dropout', (['x', '(0.2)', 'self.training'], {}), '(x, 0.2, self.training)\n', (7472, 7495), True, 'import torch.nn.functional as F\n'), ((7969, 7992), 'torch.cat', 'torch.cat', (['bands'], {'dim': '(1)'}), '(bands, dim=1)\n', (7978, 7992), False, 'import torch\n'), ((12737, 12755), 'glob.glob', 'glob.glob', (['"""*.npy"""'], {}), "('*.npy')\n", (12746, 12755), False, 'import glob\n'), ((13117, 13156), 'zounds.sliding_window', 'zounds.sliding_window', (['result', '(512)', '(256)'], {}), '(result, 512, 256)\n', (13138, 13156), False, 'import zounds\n'), ((13505, 13531), 'numpy.random.randint', 'np.random.randint', (['(50)', '(400)'], {}), '(50, 400)\n', (13522, 13531), True, 'import numpy as np\n'), ((13629, 13663), 'zounds.SineSynthesizer', 'zounds.SineSynthesizer', (['samplerate'], {}), '(samplerate)\n', (13651, 13663), False, 'import zounds\n'), ((13773, 13805), 'zounds.InternetArchive', 'zounds.InternetArchive', (['"""AOC11B"""'], {}), "('AOC11B')\n", (13795, 13805), False, 'import zounds\n'), ((13815, 13878), 'zounds.InternetArchive', 'zounds.InternetArchive', (['"""Greatest_Speeches_of_the_20th_Century"""'], {}), "('Greatest_Speeches_of_the_20th_Century')\n", (13837, 13878), False, 'import zounds\n'), ((13888, 13945), 'zounds.InternetArchive', 'zounds.InternetArchive', (['"""Kevin_Gates_-_By_Any_Means-2014"""'], {}), "('Kevin_Gates_-_By_Any_Means-2014')\n", (13910, 13945), False, 'import zounds\n'), ((13955, 13977), 'zounds.PhatDrumLoops', 'zounds.PhatDrumLoops', ([], {}), '()\n', (13975, 13977), False, 'import zounds\n'), ((14011, 14055), 'zounds.ingest', 'zounds.ingest', (['d', 'Sound'], {'multi_threaded': '(True)'}), '(d, Sound, multi_threaded=True)\n', (14024, 14055), False, 'import zounds\n'), ((14099, 14131), 'zounds.InternetArchive', 'zounds.InternetArchive', (['"""AOC11B"""'], {}), "('AOC11B')\n", (14121, 14131), False, 'import zounds\n'), ((14442, 14461), 'torch.from_numpy', 'torch.from_numpy', (['z'], {}), '(z)\n', (14458, 14461), False, 'import torch\n'), ((15963, 16125), 'zounds.WassersteinGanTrainer', 'zounds.WassersteinGanTrainer', ([], {'network': 'network', 'latent_dimension': '(LATENT_DIM,)', 'n_critic_iterations': '(10)', 'epochs': 'epochs', 'batch_size': '(64)', 'arg_maker': 'arg_maker'}), '(network=network, latent_dimension=(LATENT_DIM,\n ), n_critic_iterations=10, epochs=epochs, batch_size=64, arg_maker=\n arg_maker)\n', (15991, 16125), False, 'import zounds\n'), ((16428, 16452), 'zounds.Milliseconds', 'zounds.Milliseconds', (['(250)'], {}), '(250)\n', (16447, 16452), False, 'import zounds\n'), ((17658, 17681), 'numpy.hanning', 'np.hanning', (['SAMPLE_SIZE'], {}), '(SAMPLE_SIZE)\n', (17668, 17681), True, 'import numpy as np\n'), ((17709, 17732), 'scipy.signal.tukey', 'tukey', (['SAMPLE_SIZE', '(0.2)'], {}), '(SAMPLE_SIZE, 0.2)\n', (17714, 17732), False, 'from scipy.signal import resample, tukey\n'), ((1258, 1290), 'zounds.FrequencyDimension', 'zounds.FrequencyDimension', (['scale'], {}), '(scale)\n', (1283, 1290), False, 'import zounds\n'), ((1627, 1746), 'zounds.SampleRate', 'zounds.SampleRate', ([], {'frequency': '(samplerate.frequency * (SAMPLE_SIZE // 2))', 'duration': '(samplerate.frequency * SAMPLE_SIZE)'}), '(frequency=samplerate.frequency * (SAMPLE_SIZE // 2),\n duration=samplerate.frequency * SAMPLE_SIZE)\n', (1644, 1746), False, 'import zounds\n'), ((9468, 9508), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(64, LATENT_DIM)'], {}), '(0, 1, (64, LATENT_DIM))\n', (9484, 9508), True, 'import numpy as np\n'), ((9564, 9575), 'torch.autograd.Variable', 'Variable', (['t'], {}), '(t)\n', (9572, 9575), False, 'from torch.autograd import Variable\n'), ((10189, 10206), 'featureflow.Var', 'ff.Var', (['"""trainer"""'], {}), "('trainer')\n", (10195, 10206), True, 'import featureflow as ff\n'), ((13179, 13200), 'numpy.fft.rfft', 'np.fft.rfft', (['windowed'], {}), '(windowed)\n', (13190, 13200), True, 'import numpy as np\n'), ((13298, 13315), 'zounds.Seconds', 'zounds.Seconds', (['(1)'], {}), '(1)\n', (13312, 13315), False, 'import zounds\n'), ((13464, 13488), 'numpy.random.randint', 'np.random.randint', (['(2)', '(20)'], {}), '(2, 20)\n', (13481, 13488), True, 'import numpy as np\n'), ((16850, 16866), 'zounds.SR11025', 'zounds.SR11025', ([], {}), '()\n', (16864, 16866), False, 'import zounds\n'), ((17121, 17148), 'torch.zeros', 'torch.zeros', (['(64)', 'LATENT_DIM'], {}), '(64, LATENT_DIM)\n', (17132, 17148), False, 'import torch\n'), ((17173, 17186), 'torch.autograd.Variable', 'Variable', (['inp'], {}), '(inp)\n', (17181, 17186), False, 'from torch.autograd import Variable\n'), ((2877, 2916), 'zounds.FrequencyBand', 'zounds.FrequencyBand', (['start_hz', 'stop_hz'], {}), '(start_hz, stop_hz)\n', (2897, 2916), False, 'import zounds\n'), ((5008, 5046), 'pytorch_wgan2.CriticLayer', 'CriticLayer', (['in_channels', 'fms', '*params'], {}), '(in_channels, fms, *params)\n', (5019, 5046), False, 'from pytorch_wgan2 import BaseGenerator, BaseCritic, CriticLayer, GeneratorLayer, FinalGeneratorLayer\n'), ((5818, 5839), 'torch.cat', 'torch.cat', (['fms'], {'dim': '(1)'}), '(fms, dim=1)\n', (5827, 5839), False, 'import torch\n'), ((13226, 13265), 'zounds.AudioSamples', 'zounds.AudioSamples', (['result', 'samplerate'], {}), '(result, samplerate)\n', (13245, 13265), False, 'import zounds\n'), ((14370, 14410), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(64, LATENT_DIM)'], {}), '(0, 1, (64, LATENT_DIM))\n', (14386, 14410), True, 'import numpy as np\n'), ((14474, 14485), 'torch.autograd.Variable', 'Variable', (['t'], {}), '(t)\n', (14482, 14485), False, 'from torch.autograd import Variable\n'), ((16356, 16390), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'LATENT_DIM'], {}), '(0, 1, LATENT_DIM)\n', (16372, 16390), True, 'import numpy as np\n'), ((18379, 18388), 'numpy.abs', 'np.abs', (['b'], {}), '(b)\n', (18385, 18388), True, 'import numpy as np\n'), ((18447, 18460), 'numpy.abs', 'np.abs', (['(b / m)'], {}), '(b / m)\n', (18453, 18460), True, 'import numpy as np\n'), ((4283, 4300), 'zounds.Seconds', 'zounds.Seconds', (['(1)'], {}), '(1)\n', (4297, 4300), False, 'import zounds\n'), ((16615, 16649), 'zounds.AudioSamples', 'zounds.AudioSamples', (['j', 'samplerate'], {}), '(j, samplerate)\n', (16634, 16649), False, 'import zounds\n'), ((4207, 4246), 'zounds.AudioSamples', 'zounds.AudioSamples', (['sample', 'samplerate'], {}), '(sample, samplerate)\n', (4226, 4246), False, 'import zounds\n'), ((12786, 12796), 'os.stat', 'os.stat', (['x'], {}), '(x)\n', (12793, 12796), False, 'import os\n'), ((12808, 12818), 'os.stat', 'os.stat', (['y'], {}), '(y)\n', (12815, 12818), False, 'import os\n')] |
#!/usr/bin/env python
from collections import defaultdict
import numpy as np
from nltk.corpus import reuters
def analyze_data_distribution(cat2count):
i = 1
most_frequent_words = sorted(cat2count.items(),
key=lambda n: n[1]['train'],
reverse=True)
for el in most_frequent_words:
cat = el[0]
print("\t{:>2}: {:<20}: {:>4}\t{:>4}\t{:0.1f}"
.format(i, cat,
cat2count[cat]['train'],
cat2count[cat]['test'],
np.array(cat2count[cat]['words']).mean()))
i += 1
def analyze_vocabulary(corpus):
word2count = defaultdict(int)
for word in corpus:
word2count[word] += 1
most_freq = sorted(word2count.items(), key=lambda n: n[1], reverse=True)
for i, el in enumerate(most_freq[:10]):
print("{}. frequent word is {} ({} occurences)"
.format(i, el[0], el[1]))
# Create vocabulary
min_occurences = 20
max_occurences = 50
vocabulary = [word[0]
for word in word2count.items()
if word[1] >= min_occurences and word[1] <= max_occurences]
# Design decision: Should there be a pseudo-word OOV
# (out of vocabulary)?
with_oov = True
if with_oov:
word2wid = {'<OOV>': 0}
else:
word2wid = {}
vocabulary = list(vocabulary)
for wid, word in enumerate(vocabulary, start=len(word2wid)):
word2wid[word] = wid
print("Created word2wid")
# Analyze the vocabulary
print("total vocabulary = {}".format(len(word2count)))
print("vocabulary size = {} (min_occ={}, max_occ={})"
.format(len(word2wid), min_occurences, max_occurences))
def main(categories, document_ids, verbose=False):
print(f"categories: {categories}")
print("number of categories: {}".format(len(categories)))
cat2catid = {}
for catid, cat in enumerate(sorted(categories)):
cat2catid[cat] = catid
documents = document_ids
test = [d for d in documents if d.startswith('test/')]
train = [d for d in documents if d.startswith('training/')]
print("train documents: {}".format(len(train)))
print("test documents: {}".format(len(test)))
# make it easy to map data to label
# gather simple statistics
id2cats = defaultdict(list)
cat2count = {}
for cat in categories:
for fid in reuters.fileids(cat):
id2cats[fid].append(cat)
if cat not in cat2count:
cat2count[cat] = {'train': 0, 'test': 0, 'words': []}
if fid in train:
cat2count[cat]['train'] += 1
else:
cat2count[cat]['test'] += 1
cat2count[cat]['words'].append(len(reuters.words(fid)))
print("How many labels do documents usually have?")
labelcount2doccount = defaultdict(int)
for _, cats in id2cats.items():
labelcount2doccount[len(cats)] += 1
s = sorted(labelcount2doccount.items(), reverse=True, key=lambda n: n[1])
for labelcount, documentcount in s:
print("\tlabelcount={:>3}, documentcount={:>3}"
.format(labelcount, documentcount))
# Analyze data distribution to classes
analyze_data_distribution(cat2count)
# Build corpus
corpus = []
for document_id in train:
corpus += list(reuters.words(document_id))
analyze_vocabulary(corpus)
def find_class_predictors(ys):
class_pred_corr = [[0.0 for _ in range(90)] for _ in range(90)]
class_pred_total = [[0.0 for _ in range(90)] for _ in range(90)]
for document_cats in ys:
for take_i in range(90):
for predict_i in range(90):
if take_i == 0:
continue
class_pred_total[take_i][predict_i] += 1
if document_cats[take_i] == document_cats[predict_i]:
class_pred_corr[take_i][predict_i] += 1
acc = []
for i in range(90):
line = []
for j in range(90):
if class_pred_total[i][j] == 0.0:
score = 0.0
else:
score = class_pred_corr[i][j] / class_pred_total[i][j]
line.append(score)
acc.append(line)
return acc
def print_class_predictors(acc):
score_list = []
for take_i in range(90):
for predict_i in range(90):
score_list.append({'take': take_i,
'pred': predict_i,
'acc': acc[take_i][predict_i]})
score_list = sorted(score_list, key=lambda n: n['acc'], reverse=True)
for el in score_list:
if el['take'] == el['pred']:
continue
take = reuters.labels[el['take']]
pred = reuters.labels[el['pred']]
print("{} => {} ({})".format(take, pred, el['acc']))
if __name__ == '__main__':
# main(reuters.categories(), reuters.fileids())
import reuters
acc = find_class_predictors(reuters.load_data()['y_train'])
print_class_predictors(acc)
| [
"reuters.load_data",
"reuters.fileids",
"reuters.words",
"numpy.array",
"collections.defaultdict"
] | [((690, 706), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (701, 706), False, 'from collections import defaultdict\n'), ((2361, 2378), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2372, 2378), False, 'from collections import defaultdict\n'), ((2897, 2913), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2908, 2913), False, 'from collections import defaultdict\n'), ((2444, 2464), 'reuters.fileids', 'reuters.fileids', (['cat'], {}), '(cat)\n', (2459, 2464), False, 'import reuters\n'), ((3392, 3418), 'reuters.words', 'reuters.words', (['document_id'], {}), '(document_id)\n', (3405, 3418), False, 'import reuters\n'), ((5004, 5023), 'reuters.load_data', 'reuters.load_data', ([], {}), '()\n', (5021, 5023), False, 'import reuters\n'), ((2793, 2811), 'reuters.words', 'reuters.words', (['fid'], {}), '(fid)\n', (2806, 2811), False, 'import reuters\n'), ((581, 614), 'numpy.array', 'np.array', (["cat2count[cat]['words']"], {}), "(cat2count[cat]['words'])\n", (589, 614), True, 'import numpy as np\n')] |
import numpy as np
import sys
import trisectEdge_171122 as tse
import circumcenterSphTri_171123 as ccs
import array_tool_171125 as art
from scipy.spatial import Delaunay
import freeBoundary_171112 as frb
'''
v0.3 Nov. 29, 2017
- add Test_trisectTri()
v0.2 Nov. 26, 2017
- use np.concatenate() instead of my own concatenating code
v0.1 Nov. 25, 2017
- import [freeBoundary_171112]
- use Delaunay()
- add trisectTri()
'''
def trisectTri(xs, tris):
tris = np.array(tris).astype(int)
kidx = 0
v1 = np.zeros((2*len(tris), 3))
v2 = np.zeros((2*len(tris), 3))
v3 = np.zeros((2*len(tris), 3))
for jidx in range(len(tris)):
tri0 = tris[jidx, 0]
tri1 = tris[jidx, 1]
tri2 = tris[jidx, 2]
wrk = tse.trisectEdge(xs[tri0, :], xs[tri1, :])
v1[kidx:kidx+2, :] = wrk.copy()
wrk = tse.trisectEdge(xs[tri1, :], xs[tri2, :])
v2[kidx:kidx+2, :] = wrk.copy()
wrk = tse.trisectEdge(xs[tri2, :], xs[tri0, :])
v3[kidx:kidx+2, :] = wrk.copy()
kidx += 2
vs = np.concatenate((v1, v2, v3), axis=0)
# Add the circumcenter of the original triangle to the list.
wrk = ccs.circumcenterSphTri(tris, xs)
vs = np.concatenate((vs, wrk), axis=0)
vs = np.concatenate((xs, vs), axis=0)
# Remove repeating vertices
xs, _ = art.get_unique_rows(vs)
# Project the nodes to the sphere.
x_l2 = []
for elem in xs:
x_l2 += [np.sqrt(np.sum(elem*elem))]
x_l2 = np.array(x_l2)
# ---reshape for [xs / x_l2]
x_l2 = np.repeat(x_l2, len(xs[0]), axis=0)
x_l2 = x_l2.reshape(len(xs), len(xs[0]))
xs = xs / x_l2
# Triangulate the new nodes;
tris = Delaunay(xs).simplices
tris = frb.find_freeBoundary(tris)
tris = np.array(tris).astype(int)
return xs, tris
def Test_trisectTri():
tris = np.genfromtxt('tri_trisect_171125.txt', delimiter=' ')
xs = np.genfromtxt('x_trisect_171125.txt', delimiter=' ')
tris = tris.astype(int) # file is in float
tris = tris - 1 # from indexing [1..] to [0..]
resxs, restris = trisectTri(xs, tris)
'''
(Pdb) p len(resxs)
92
(Pdb) p resxs[:5]
array([[-0.98302355, -0.18347941, 0. ],
[-0.98302355, 0.18347941, 0. ],
[-0.93417236, 0. , -0.35682209],
[-0.93417236, 0. , 0.35682209],
[-0.85198102, -0.39551069, -0.34307382]])
(Pdb) p resxs[-5:]
array([[ 0.85198102, 0.39551069, 0.34307382],
[ 0.93417236, 0. , -0.35682209],
[ 0.93417236, 0. , 0.35682209],
[ 0.98302355, -0.18347941, 0. ],
[ 0.98302355, 0.18347941, 0. ]])
(Pdb) p len(restris)
180
(Pdb) p restris[:5]
array([[13, 27, 23],
[ 4, 2, 10],
[ 5, 3, 0],
[33, 37, 49],
[55, 59, 71]])
(Pdb) p restris[-5:]
array([[24, 34, 44],
[22, 24, 10],
[24, 18, 10],
[53, 47, 39],
[53, 65, 45]])
'''
# print(resxs)
pass # for breakpoint in pdb
if __name__ == '__main__':
Test_trisectTri()
| [
"circumcenterSphTri_171123.circumcenterSphTri",
"numpy.array",
"numpy.sum",
"freeBoundary_171112.find_freeBoundary",
"numpy.concatenate",
"numpy.genfromtxt",
"trisectEdge_171122.trisectEdge",
"array_tool_171125.get_unique_rows",
"scipy.spatial.Delaunay"
] | [((1056, 1092), 'numpy.concatenate', 'np.concatenate', (['(v1, v2, v3)'], {'axis': '(0)'}), '((v1, v2, v3), axis=0)\n', (1070, 1092), True, 'import numpy as np\n'), ((1169, 1201), 'circumcenterSphTri_171123.circumcenterSphTri', 'ccs.circumcenterSphTri', (['tris', 'xs'], {}), '(tris, xs)\n', (1191, 1201), True, 'import circumcenterSphTri_171123 as ccs\n'), ((1211, 1244), 'numpy.concatenate', 'np.concatenate', (['(vs, wrk)'], {'axis': '(0)'}), '((vs, wrk), axis=0)\n', (1225, 1244), True, 'import numpy as np\n'), ((1254, 1286), 'numpy.concatenate', 'np.concatenate', (['(xs, vs)'], {'axis': '(0)'}), '((xs, vs), axis=0)\n', (1268, 1286), True, 'import numpy as np\n'), ((1332, 1355), 'array_tool_171125.get_unique_rows', 'art.get_unique_rows', (['vs'], {}), '(vs)\n', (1351, 1355), True, 'import array_tool_171125 as art\n'), ((1486, 1500), 'numpy.array', 'np.array', (['x_l2'], {}), '(x_l2)\n', (1494, 1500), True, 'import numpy as np\n'), ((1724, 1751), 'freeBoundary_171112.find_freeBoundary', 'frb.find_freeBoundary', (['tris'], {}), '(tris)\n', (1745, 1751), True, 'import freeBoundary_171112 as frb\n'), ((1848, 1903), 'numpy.genfromtxt', 'np.genfromtxt', (['"""tri_trisect_171125.txt"""'], {'delimiter': '""" """'}), "('tri_trisect_171125.txt', delimiter=' ')\n", (1861, 1903), True, 'import numpy as np\n'), ((1913, 1966), 'numpy.genfromtxt', 'np.genfromtxt', (['"""x_trisect_171125.txt"""'], {'delimiter': '""" """'}), "('x_trisect_171125.txt', delimiter=' ')\n", (1926, 1966), True, 'import numpy as np\n'), ((753, 794), 'trisectEdge_171122.trisectEdge', 'tse.trisectEdge', (['xs[tri0, :]', 'xs[tri1, :]'], {}), '(xs[tri0, :], xs[tri1, :])\n', (768, 794), True, 'import trisectEdge_171122 as tse\n'), ((849, 890), 'trisectEdge_171122.trisectEdge', 'tse.trisectEdge', (['xs[tri1, :]', 'xs[tri2, :]'], {}), '(xs[tri1, :], xs[tri2, :])\n', (864, 890), True, 'import trisectEdge_171122 as tse\n'), ((945, 986), 'trisectEdge_171122.trisectEdge', 'tse.trisectEdge', (['xs[tri2, :]', 'xs[tri0, :]'], {}), '(xs[tri2, :], xs[tri0, :])\n', (960, 986), True, 'import trisectEdge_171122 as tse\n'), ((1690, 1702), 'scipy.spatial.Delaunay', 'Delaunay', (['xs'], {}), '(xs)\n', (1698, 1702), False, 'from scipy.spatial import Delaunay\n'), ((470, 484), 'numpy.array', 'np.array', (['tris'], {}), '(tris)\n', (478, 484), True, 'import numpy as np\n'), ((1764, 1778), 'numpy.array', 'np.array', (['tris'], {}), '(tris)\n', (1772, 1778), True, 'import numpy as np\n'), ((1455, 1474), 'numpy.sum', 'np.sum', (['(elem * elem)'], {}), '(elem * elem)\n', (1461, 1474), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Create the data for the LSTM.
"""
import os
import sys
import argparse
import numpy as np
import h5py
import itertools
from collections import defaultdict
class Indexer:
def __init__(self, symbols = ["<blank>","<unk>","<s>","</s>"]):
self.vocab = defaultdict(int)
self.PAD = symbols[0]
self.UNK = symbols[1]
self.BOS = symbols[2]
self.EOS = symbols[3]
self.d = {self.PAD: 1, self.UNK: 2, self.BOS: 3, self.EOS: 4}
def add_w(self, ws):
for w in ws:
if w not in self.d:
self.d[w] = len(self.d) + 1
def convert(self, w):
return self.d[w] if w in self.d else self.d[self.UNK]
def convert_sequence(self, ls):
return [self.convert(l) for l in ls]
def clean(self, s):
s = s.replace(self.PAD, "")
s = s.replace(self.BOS, "")
s = s.replace(self.EOS, "")
return s
def write(self, outfile):
out = open(outfile, "w")
items = [(v, k) for k, v in self.d.iteritems()]
items.sort()
for v, k in items:
print >>out, k, v
out.close()
def prune_vocab(self, k, cnt = False):
vocab_list = [(word, count) for word, count in self.vocab.iteritems()]
if cnt:
self.pruned_vocab = {pair[0]: pair[1] for pair in vocab_list if pair[1] > k}
else:
vocab_list.sort(key = lambda x: x[1], reverse=True)
k = min(k, len(vocab_list))
self.pruned_vocab = {pair[0]:pair[1] for pair in vocab_list[:k]}
for word in self.pruned_vocab:
if word not in self.d:
self.d[word] = len(self.d) + 1
def load_vocab(self, vocab_file):
self.d = {}
for line in open(vocab_file, 'r'):
v, k = line.strip().split()
self.d[v] = int(k)
def pad(ls, length, symbol):
if len(ls) >= length:
return ls[:length]
return ls + [symbol] * (length -len(ls))
def get_data(args):
src_indexer = Indexer(["<blank>","<unk>","<s>","</s>"])
target_indexer = Indexer(["<blank>","<unk>","<s>","</s>"])
def make_vocab(srcfile, targetfile, srcseqlength, targetseqlength, train=1):
num_sents = 0
for _, (src_orig, targ_orig) in \
enumerate(itertools.izip(open(srcfile,'r'), open(targetfile,'r'))):
src_orig = src_indexer.clean(src_orig.strip())
targ_orig = target_indexer.clean(targ_orig.strip())
targ = targ_orig.strip().split()
src = src_orig.strip().split()
if len(targ) > targetseqlength or len(src) > srcseqlength or len(targ) < 1 or len(src) < 1:
continue
num_sents += 1
if train == 1:
for word in targ:
target_indexer.vocab[word] += 1
for word in src:
src_indexer.vocab[word] += 1
return num_sents
def convert(srcfile, targetfile, batchsize, srcseqlength, targetseqlength, outfile, num_sents,
max_sent_l=0, shuffle=0):
newsrcseqlength = srcseqlength + 2 #add 2 for EOS and BOS
newtargetseqlength = targetseqlength + 2
targets = np.zeros((num_sents, newtargetseqlength), dtype=int)
target_output = np.zeros((num_sents, newtargetseqlength), dtype=int)
sources = np.zeros((num_sents, newsrcseqlength), dtype=int)
source_lengths = np.zeros((num_sents,), dtype=int)
target_lengths = np.zeros((num_sents,), dtype=int)
dropped = 0
sent_id = 0
for _, (src_orig, targ_orig) in \
enumerate(itertools.izip(open(srcfile,'r'), open(targetfile,'r'))):
src_orig = src_indexer.clean(src_orig.strip())
targ_orig = target_indexer.clean(targ_orig.strip())
targ = [target_indexer.BOS] + targ_orig.strip().split() + [target_indexer.EOS]
src = [src_indexer.BOS] + src_orig.strip().split() + [src_indexer.EOS]
max_sent_l = max(len(targ), len(src), max_sent_l)
if len(targ) > newtargetseqlength or len(src) > newsrcseqlength or len(targ) < 3 or len(src) < 3:
dropped += 1
continue
targ = pad(targ, newtargetseqlength+1, target_indexer.PAD)
targ = target_indexer.convert_sequence(targ)
targ = np.array(targ, dtype=int)
src = pad(src, newsrcseqlength, src_indexer.PAD)
src = src_indexer.convert_sequence(src)
src = np.array(src, dtype=int)
targets[sent_id] = np.array(targ[:-1],dtype=int)
target_lengths[sent_id] = (targets[sent_id] != 1).sum()
target_output[sent_id] = np.array(targ[1:],dtype=int)
sources[sent_id] = np.array(src, dtype=int)
source_lengths[sent_id] = (sources[sent_id] != 1).sum()
sent_id += 1
if sent_id % 100000 == 0:
print("{}/{} sentences processed".format(sent_id, num_sents))
print(sent_id, num_sents)
if shuffle == 1:
rand_idx = np.random.permutation(sent_id)
targets = targets[rand_idx]
target_output = target_output[rand_idx]
sources = sources[rand_idx]
source_lengths = source_lengths[rand_idx]
target_lengths = target_lengths[rand_idx]
#break up batches based on source lengths
source_lengths = source_lengths[:sent_id]
source_sort = np.argsort(source_lengths)
sources = sources[source_sort]
targets = targets[source_sort]
target_output = target_output[source_sort]
target_l = target_lengths[source_sort]
source_l = source_lengths[source_sort]
curr_l = 0
l_location = [] #idx where sent length changes
for j,i in enumerate(source_sort):
if source_lengths[i] > curr_l:
curr_l = source_lengths[i]
l_location.append(j+1)
l_location.append(len(sources))
#get batch sizes
curr_idx = 1
batch_idx = [1]
nonzeros = []
batch_l = []
batch_w = []
target_l_max = []
for i in range(len(l_location)-1):
while curr_idx < l_location[i+1]:
curr_idx = min(curr_idx + batchsize, l_location[i+1])
batch_idx.append(curr_idx)
for i in range(len(batch_idx)-1):
batch_l.append(batch_idx[i+1] - batch_idx[i])
batch_w.append(source_l[batch_idx[i]-1])
nonzeros.append((target_output[batch_idx[i]-1:batch_idx[i+1]-1] != 1).sum().sum())
target_l_max.append(max(target_l[batch_idx[i]-1:batch_idx[i+1]-1]))
# Write output
f = h5py.File(outfile, "w")
f["source"] = sources
f["target"] = targets
f["target_output"] = target_output
f["target_l"] = np.array(target_l_max, dtype=int)
f["target_l_all"] = target_l
f["batch_l"] = np.array(batch_l, dtype=int)
f["batch_w"] = np.array(batch_w, dtype=int)
f["batch_idx"] = np.array(batch_idx[:-1], dtype=int)
f["target_nonzeros"] = np.array(nonzeros, dtype=int)
f["source_size"] = np.array([len(src_indexer.d)])
f["target_size"] = np.array([len(target_indexer.d)])
print("Saved {} sentences (dropped {} due to length)".format(len(f["source"]), dropped))
f.close()
return max_sent_l
print("First pass through data to get vocab...")
num_sents_train = make_vocab(args.srcfile, args.targetfile,
args.srcseqlength, args.targetseqlength)
print("Number of sentences in training: {}".format(num_sents_train))
num_sents_valid = make_vocab(args.srcvalfile, args.targetvalfile,
args.srcseqlength, args.targetseqlength, 0)
print("Number of sentences in valid: {}".format(num_sents_valid))
#prune and write vocab
src_indexer.prune_vocab(args.srcvocabminfreq, True)
target_indexer.prune_vocab(args.targetvocabminfreq, True)
if args.srcvocabfile != '':
print('Loading pre-specified source vocab from ' + args.srcvocabfile)
src_indexer.load_vocab(args.srcvocabfile)
if args.targetvocabfile != '':
print('Loading pre-specified target vocab from ' + args.targetvocabfile)
target_indexer.load_vocab(args.targetvocabfile)
src_indexer.write(args.outputfile + ".src.dict")
target_indexer.write(args.outputfile + ".targ.dict")
print("Source vocab size: Original = {}, Pruned = {}".format(len(src_indexer.vocab),
len(src_indexer.d)))
print("Target vocab size: Original = {}, Pruned = {}".format(len(target_indexer.vocab),
len(target_indexer.d)))
max_sent_l = 0
max_sent_l = convert(args.srcvalfile, args.targetvalfile, args.batchsize, args.srcseqlength,
args.targetseqlength, args.outputfile + "-val.hdf5", num_sents_valid,
max_sent_l, args.shuffle)
max_sent_l = convert(args.srcfile, args.targetfile, args.batchsize, args.srcseqlength,
args.targetseqlength,
args.outputfile + "-train.hdf5", num_sents_train,
max_sent_l, args.shuffle)
print("Max sent length (before dropping): {}".format(max_sent_l))
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--srcvocabminfreq', help="Source vocab count limit. All words that occurred"
"less than this amount are replaced with UNK.",
type=int, default=10)
parser.add_argument('--targetvocabminfreq', help="Source vocab count limit. All words that occurred"
"less than this amount are replaced with UNK.",
type=int, default=10)
parser.add_argument('--srcfile', help="Path to source training data, "
"where each line represents a single "
"source/target sequence.", required=True)
parser.add_argument('--targetfile', help="Path to target training data, "
"where each line represents a single "
"source/target sequence.", required=True)
parser.add_argument('--srcvalfile', help="Path to source validation data.", required=True)
parser.add_argument('--targetvalfile', help="Path to target validation data.", required=True)
parser.add_argument('--batchsize', help="Size of each minibatch.", type=int, default=128)
parser.add_argument('--srcseqlength', help="Maximum source sequence length. Sequences longer "
"than this are dropped.", type=int, default=50)
parser.add_argument('--targetseqlength', help="Maximum target sequence length. Sequences longer "
"than this are dropped.", type=int, default=50)
parser.add_argument('--outputfile', help="Prefix of the output file names. ", type=str, required=True)
parser.add_argument('--srcvocabfile', help="If working with a preset vocab, "
"then including this will ignore srcvocabsize and use the"
"vocab provided here.",
type = str, default='')
parser.add_argument('--targetvocabfile', help="If working with a preset vocab, "
"then including this will ignore targetvocabsize and "
"use the vocab provided here.",
type = str, default='')
parser.add_argument('--shuffle', help="If = 1, shuffle sentences before sorting (based on "
"source length).",
type = int, default = 0)
args = parser.parse_args(arguments)
get_data(args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [
"argparse.ArgumentParser",
"h5py.File",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"collections.defaultdict",
"numpy.random.permutation"
] | [((9598, 9703), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n', (9621, 9703), False, 'import argparse\n'), ((311, 327), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (322, 327), False, 'from collections import defaultdict\n'), ((3282, 3334), 'numpy.zeros', 'np.zeros', (['(num_sents, newtargetseqlength)'], {'dtype': 'int'}), '((num_sents, newtargetseqlength), dtype=int)\n', (3290, 3334), True, 'import numpy as np\n'), ((3359, 3411), 'numpy.zeros', 'np.zeros', (['(num_sents, newtargetseqlength)'], {'dtype': 'int'}), '((num_sents, newtargetseqlength), dtype=int)\n', (3367, 3411), True, 'import numpy as np\n'), ((3430, 3479), 'numpy.zeros', 'np.zeros', (['(num_sents, newsrcseqlength)'], {'dtype': 'int'}), '((num_sents, newsrcseqlength), dtype=int)\n', (3438, 3479), True, 'import numpy as np\n'), ((3505, 3538), 'numpy.zeros', 'np.zeros', (['(num_sents,)'], {'dtype': 'int'}), '((num_sents,), dtype=int)\n', (3513, 3538), True, 'import numpy as np\n'), ((3564, 3597), 'numpy.zeros', 'np.zeros', (['(num_sents,)'], {'dtype': 'int'}), '((num_sents,), dtype=int)\n', (3572, 3597), True, 'import numpy as np\n'), ((5557, 5583), 'numpy.argsort', 'np.argsort', (['source_lengths'], {}), '(source_lengths)\n', (5567, 5583), True, 'import numpy as np\n'), ((6819, 6842), 'h5py.File', 'h5py.File', (['outfile', '"""w"""'], {}), "(outfile, 'w')\n", (6828, 6842), False, 'import h5py\n'), ((6971, 7004), 'numpy.array', 'np.array', (['target_l_max'], {'dtype': 'int'}), '(target_l_max, dtype=int)\n', (6979, 7004), True, 'import numpy as np\n'), ((7065, 7093), 'numpy.array', 'np.array', (['batch_l'], {'dtype': 'int'}), '(batch_l, dtype=int)\n', (7073, 7093), True, 'import numpy as np\n'), ((7117, 7145), 'numpy.array', 'np.array', (['batch_w'], {'dtype': 'int'}), '(batch_w, dtype=int)\n', (7125, 7145), True, 'import numpy as np\n'), ((7171, 7206), 'numpy.array', 'np.array', (['batch_idx[:-1]'], {'dtype': 'int'}), '(batch_idx[:-1], dtype=int)\n', (7179, 7206), True, 'import numpy as np\n'), ((7238, 7267), 'numpy.array', 'np.array', (['nonzeros'], {'dtype': 'int'}), '(nonzeros, dtype=int)\n', (7246, 7267), True, 'import numpy as np\n'), ((4435, 4460), 'numpy.array', 'np.array', (['targ'], {'dtype': 'int'}), '(targ, dtype=int)\n', (4443, 4460), True, 'import numpy as np\n'), ((4593, 4617), 'numpy.array', 'np.array', (['src'], {'dtype': 'int'}), '(src, dtype=int)\n', (4601, 4617), True, 'import numpy as np\n'), ((4650, 4680), 'numpy.array', 'np.array', (['targ[:-1]'], {'dtype': 'int'}), '(targ[:-1], dtype=int)\n', (4658, 4680), True, 'import numpy as np\n'), ((4785, 4814), 'numpy.array', 'np.array', (['targ[1:]'], {'dtype': 'int'}), '(targ[1:], dtype=int)\n', (4793, 4814), True, 'import numpy as np\n'), ((4845, 4869), 'numpy.array', 'np.array', (['src'], {'dtype': 'int'}), '(src, dtype=int)\n', (4853, 4869), True, 'import numpy as np\n'), ((5163, 5193), 'numpy.random.permutation', 'np.random.permutation', (['sent_id'], {}), '(sent_id)\n', (5184, 5193), True, 'import numpy as np\n')] |
"""functions related to sampling
handful of functions here, related to sampling and checking whether
you're sampling correctly, in order to avoid aliasing
when doing something like strided convolution or using the pooling windows from
plenoptic, you want to make sure you're sampling the image appropriately, in
order to avoid aliasing. this file contains some functions to help you with
that, see the Sampling_and_Aliasing notebook for some examples
"""
import numpy as np
import matplotlib.pyplot as plt
import torch
from matplotlib import animation
from .pooling import gaussian
from . import utils
def check_sampling(val_sampling=.5, pix_sampling=None, func=gaussian, x=torch.linspace(-5, 5, 101),
**func_kwargs):
r"""check how sampling relates to interpolation quality
Given a function, a domain, and how to sample that domain, this
function will use linear algebra (``np.linalg.lstsq``) to determine
how to interpolate the function so that it's centered on each
pixel. You can then use functions like ``plot_coeffs`` and
``create_movie`` to see the quality of this interpolation
The idea here is to take a function (for example,
``po.simul.pooling.gaussian``) and say that we have this function
defined at, e.g., every 10 pixels on the array ``linspace(-5, 5,
101)``. We want to answer then, the question of how well we can
interpolate to all the intermediate functions, that is, the
functions centered on each pixel in the array.
You can either specify the spacing in pixels (``pix_sampling``) xor
in x values (``val_sampling``), but exactly one of them must be set.
Your function can either be a torch or numpy function, but ``x``
must be the appropriate type, we will not cast it for you.
Parameters
----------
val_sampling : float or None, optional.
If float, how far apart (in x-values) each sampled function
should be. This doesn't have to align perfectly with the pixels,
but should be close. If None, we use ``pix_sampling`` instead.
pix_sampling : int or None, optional
If int, how far apart (in pixels) each sampled function should
be. If None, we use ``val_sampling`` instead.
func : callable, optional
the function to check interpolation for. must take ``x`` as its
first input, all additional kwargs can be specified in
``func_kwargs``
x : torch.tensor or np.array, optional
the 1d tensor/array to evaluate ``func`` on.
func_kwargs :
additional kwargs to pass to ``func``
Returns
-------
sampled : np.array
the array of sampled functions. will have shape ``(len(x),
ceil(len(x)/pix_sampling))``
full : np.array
the array of functions centered at each pixel. will have shape
``(len(x), len(x))``
interpolated : np.array
the array of functions interpolated to each pixel. will have
shape ``(len(x), len(x))``
coeffs : np.array
the array of coefficients to transform ``sampled`` to
``full``. This has been transposed from the array returned by
``np.linalg.lstsq`` and thus will have the same shape as
``sampled`` (this is to make it easier to restrict which coeffs
to look at, since they'll be more easily indexed along first
dimension)
residuals : np.array
the errors for each interpolation, will have shape ``len(x)``
"""
if val_sampling is not None:
if pix_sampling is not None:
raise Exception("One of val_sampling or pix_sampling must be None!")
# this will get us the closest value, if there's no exactly
# correct one.
pix_sampling = np.argmin(abs((x+val_sampling)[0] - x))
if pix_sampling == 0 or pix_sampling == (len(x)-1):
# the above works if x is increasing. if it's decreasing,
# then pix_sampling will be one of the extremal values, and
# we need to try the following
pix_sampling = np.argmin(abs((x-val_sampling)[0] - x))
try:
X = x.unsqueeze(1) + x[::pix_sampling]
sampled = utils.to_numpy(func(X, **func_kwargs))
full_X = x.unsqueeze(1) + x
full = utils.to_numpy(func(full_X, **func_kwargs))
except AttributeError:
# numpy arrays don't have unsqueeze, so we use this `[:, None]`
# syntax to get the same outcome
X = x[:, None] + x[::pix_sampling]
sampled = func(X, **func_kwargs)
full_X = x.unsqueeze(1) + x
full = func(full_X, func_kwargs)
coeffs, residuals, rank, s = np.linalg.lstsq(sampled, full, rcond=None)
interpolated = np.matmul(sampled, coeffs)
return sampled, full, interpolated, coeffs.T, residuals
def plot_coeffs(coeffs, ncols=5, ax_size=(5, 5)):
r"""plot interpolation coefficients
Simple function to plot a bunch of interpolation coefficients on the
same figure as stem plots
Parameters
----------
coeffs : np.array
the array of coefficients to transform ``sampled`` to
``full``. In order to show fewer coefficients (because they're
so many), index along the first dimension (e.g., ``coeffs[:10]``
to view first 10)
ncols : int, optional
the number of columns to create in the plot
ax_size : tuple, optional
the size of each subplots axis
Returns
-------
fig : plt.Figure
the figure containing the plot
"""
nrows = int(np.ceil(coeffs.shape[0] / ncols))
ylim = max(abs(coeffs.max()), abs(coeffs.min()))
ylim += ylim/10
fig, axes = plt.subplots(nrows, ncols, figsize=[i*j for i, j in zip(ax_size, [ncols, nrows])])
for i, ax in enumerate(axes.flatten()):
ax.stem(coeffs[i], use_line_collection=True)
ax.set_ylim((-ylim, ylim))
return fig
def interpolation_plot(interpolated, residuals, pix=0, val=None, x=np.linspace(-5, 5, 101),
full=None):
r"""create plot showing interpolation results at specified pixel or value
We have two subplots: the interpolation (with optional actual
values) and the residuals
Either ``pix`` or ``val`` must be set, and the other must be
``None``. They specify which interpolated function to display
Parameters
----------
interpolated : np.array
the array of functions interpolated to each pixel
residuals : np.array
the errors for each interpolation
pix : int or None, optional
we plot the interpolated function centered at this pixel
val : float or None, optional
we plot the interpolated function centered at this x-value
x : torch.tensor or np.array, optional
the 1d tensor/array passed to ``check_sampling()``. the default
here is the default there. plotted on x-axis
full : np.array, optional
the array of functions centered at each pixel. If None, won't
plot. If not None, will plot as dashed line behind the
interpolation for comparison
framerate : int, optional
How many frames a second to display.
Returns
-------
fig : plt.Figure
figure containing the plot
"""
if val is not None:
if pix is not None:
raise Exception("One of val_sampling or pix_sampling must be None!")
# this will get us the closest value, if there's no exactly
# correct one.
pix = np.argmin(abs(x-val))
x = utils.to_numpy(x)
ylim = [interpolated.min(), interpolated.max()]
ylim = [ylim[0] - np.diff(ylim)/10, ylim[1] + np.diff(ylim)/10]
fig, axes = plt.subplots(1, 2, figsize=(12, 5))
axes[0].set_ylim(ylim)
axes[0].plot(x, interpolated[:, pix], label='interpolation')
if full is not None:
axes[0].plot(x, full[:, pix], '--', zorder=0, label='actual')
axes[0].legend()
axes[1].stem(x, residuals, use_line_collection=True)
axes[1].scatter(x[pix], residuals[pix], c='r', zorder=10)
axes[0].set_title("Interpolated function centered at highlighted pixel")
axes[1].set_title("Error for interpolation centered at highlighted pixel")
return fig
def create_movie(interpolated, residuals, x=np.linspace(-5, 5, 101), full=None, framerate=10):
r"""create movie showing the interpolation results
We create a simple movie to show this in action. we have two
subplots: the interpolation (with optional actual values) and the
residuals.
the more finely sampled your ``x`` was when calling
``check_sampling()`` (and thus the larger your ``interpolated`` and
``full`` arrays), the longer this will take. Calling this function
will not take too long, but displaying or saving the returned
animation will.
Parameters
----------
interpolated : np.array
the array of functions interpolated to each pixel
residuals : np.array
the errors for each interpolation
x : torch.tensor or np.array, optional
the 1d tensor/array passed to ``check_sampling()``. the default
here is the default there. plotted on x-axis
full : np.array, optional
the array of functions centered at each pixel. If None, won't
plot. If not None, will plot as dashed line behind the
interpolation for comparison
framerate : int, optional
How many frames a second to display.
Returns
-------
anim : matplotlib.animation.FuncAnimation
The animation object. In order to view, must convert to HTML
(call ``po.convert_anim_to_html(anim)``) or save (call
``anim.save(movie.mp4)``, must have ``ffmpeg`` installed).
"""
x = utils.to_numpy(x)
fig = interpolation_plot(interpolated, residuals, x=x, full=full)
if full is not None:
full_line = fig.axes[0].lines[1]
interp_line = fig.axes[0].lines[0]
scat = fig.axes[1].collections[1]
def movie_plot(i):
interp_line.set_data(x, interpolated[:, i])
scat.set_offsets((x[i], residuals[i]))
artists = [interp_line, scat]
if full is not None:
full_line.set_data(x, full[:, i])
artists.append(full_line)
return artists
plt.close(fig)
return animation.FuncAnimation(fig, movie_plot, frames=len(interpolated), blit=True,
interval=1000./framerate, repeat=False)
| [
"numpy.ceil",
"numpy.diff",
"matplotlib.pyplot.close",
"numpy.linspace",
"numpy.matmul",
"numpy.linalg.lstsq",
"matplotlib.pyplot.subplots",
"torch.linspace"
] | [((679, 705), 'torch.linspace', 'torch.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (693, 705), False, 'import torch\n'), ((4638, 4680), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['sampled', 'full'], {'rcond': 'None'}), '(sampled, full, rcond=None)\n', (4653, 4680), True, 'import numpy as np\n'), ((4700, 4726), 'numpy.matmul', 'np.matmul', (['sampled', 'coeffs'], {}), '(sampled, coeffs)\n', (4709, 4726), True, 'import numpy as np\n'), ((5946, 5969), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (5957, 5969), True, 'import numpy as np\n'), ((7651, 7686), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 5)'}), '(1, 2, figsize=(12, 5))\n', (7663, 7686), True, 'import matplotlib.pyplot as plt\n'), ((8235, 8258), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(101)'], {}), '(-5, 5, 101)\n', (8246, 8258), True, 'import numpy as np\n'), ((10225, 10239), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (10234, 10239), True, 'import matplotlib.pyplot as plt\n'), ((5524, 5556), 'numpy.ceil', 'np.ceil', (['(coeffs.shape[0] / ncols)'], {}), '(coeffs.shape[0] / ncols)\n', (5531, 5556), True, 'import numpy as np\n'), ((7589, 7602), 'numpy.diff', 'np.diff', (['ylim'], {}), '(ylim)\n', (7596, 7602), True, 'import numpy as np\n'), ((7617, 7630), 'numpy.diff', 'np.diff', (['ylim'], {}), '(ylim)\n', (7624, 7630), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
def displayData(X, example_width=None):
"""displays 2D data
stored in X in a nice grid. It returns the figure handle h and the
displayed array if requested."""
if X.ndim == 1:
X = X.reshape(1, -1)
# Set example_width automatically if not passed in
if not example_width:
example_width = round(X.shape[1]**0.5)
# Gray Image
plt.set_cmap('gray')
# Compute rows, cols
m, n = X.shape
example_height = int(n / example_width)
# Compute number of items to display
display_rows = int(m**0.5)
display_cols = int(np.ceil(m / display_rows))
# Between images padding
pad = 1
# Setup blank display
display_array = -np.ones(
(pad + display_rows * (example_height + pad), pad + display_cols *
(example_width + pad)))
# Copy each example into a patch on the display array
curr_ex = 0
for j in range(display_rows):
for i in range(display_cols):
if curr_ex > (m - 1):
break
# Copy the patch
# Get the max value of the patch
max_val = max(abs(X[curr_ex]))
r = pad + j * (example_height + pad)
c = pad + i * (example_width + pad)
display_array[r:r + example_height, c:c + example_width] = X[
curr_ex].reshape(
example_height, example_width, order='F') / max_val
curr_ex += 1
if curr_ex > (m - 1):
break
# Display Image
plt.imshow(display_array)
# Do not show axis
plt.axis('off')
plt.show(block=True)
| [
"matplotlib.pyplot.imshow",
"numpy.ceil",
"numpy.ones",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.set_cmap",
"matplotlib.pyplot.show"
] | [((429, 449), 'matplotlib.pyplot.set_cmap', 'plt.set_cmap', (['"""gray"""'], {}), "('gray')\n", (441, 449), True, 'import matplotlib.pyplot as plt\n'), ((1568, 1593), 'matplotlib.pyplot.imshow', 'plt.imshow', (['display_array'], {}), '(display_array)\n', (1578, 1593), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1637), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1630, 1637), True, 'import matplotlib.pyplot as plt\n'), ((1643, 1663), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (1651, 1663), True, 'import matplotlib.pyplot as plt\n'), ((635, 660), 'numpy.ceil', 'np.ceil', (['(m / display_rows)'], {}), '(m / display_rows)\n', (642, 660), True, 'import numpy as np\n'), ((752, 854), 'numpy.ones', 'np.ones', (['(pad + display_rows * (example_height + pad), pad + display_cols * (\n example_width + pad))'], {}), '((pad + display_rows * (example_height + pad), pad + display_cols *\n (example_width + pad)))\n', (759, 854), True, 'import numpy as np\n')] |
import os
import json
import time
import torch
import argparse
import numpy as np
from collections import OrderedDict, defaultdict
import pickle
from tensorboardX import SummaryWriter
from convlab2.policy.mle.idea9.model_dialogue import dialogue_VAE, data_mask, loss_fn
from convlab2.policy.mle.idea9.utils import expierment_name
import torch.nn.functional as F
device = "cuda" if torch.cuda.is_available() else "cpu"
def main(args):
ts = time.strftime('%Y-%b-%d-%H:%M:%S', time.gmtime())
# splits = ['train', 'val'] + (['test'] if args.test else [])
splits = ['train'] + (['test'] if args.test else [])
# splits = ["test"]
datasets_real = OrderedDict()
for split in splits:
with open(os.path.join("/home/raliegh/图片/ConvLab-2/convlab2/policy/mle/processed_data",
'sa_{}.pkl'.format(split)), 'rb') as f:
datasets_real[split] = pickle.load(f)
model = dialogue_VAE(549)
model.to(device)
if args.tensorboard_logging:
writer = SummaryWriter(os.path.join(args.logdir, expierment_name(args,ts)))
writer.add_text("model", str(model))
writer.add_text("args", str(args))
writer.add_text("ts", ts)
save_model_path = os.path.join(args.save_model_path, ts)
os.makedirs(save_model_path)
def kl_anneal_function(anneal_function, step, k, x0):
"""
:param anneal_function:
:param step:
:param k:
:param x0:
:return:
"""
if anneal_function == 'logistic':
return float(1/(1+np.exp(-k*(step-x0))))
elif anneal_function == 'linear':
return min(1, step/x0)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.Tensor
step = 0
# start from here.
batch_id = 0
# data
processed_data = {}
processed_dir = "/home/raliegh/图片/ConvLab-2/convlab2/policy/mle/processed_data"
for part in ['train', 'test']:
with open(os.path.join(processed_dir, 'mask_element_{}.pkl'.format(part)), 'rb') as f:
processed_data[part] = pickle.load(f)
# for split in splits:
# data_loader_real = datasets_real[split][split]
# data_collc = []
# domain_collc = []
# mask_id_collc = []
# bf_collc = []
# for i, batch in enumerate(data_loader_real):
# one_1, one_2, one_3, one_4 = data_mask(batch)
# data_collc += one_1
# mask_id_collc += one_2
# domain_collc += one_3
# bf_collc += one_4
# index = [i for i in range(len(domain_collc))]
# processed_data[split] = (data_collc, domain_collc, mask_id_collc, bf_collc, index)
#
# # save file
# with open(os.path.join(processed_dir, 'mask_element_{}_.pkl'.format(split)), 'wb') as f:
# pickle.dump(processed_data[split], f)
for epoch in range(args.epochs):
for split in splits:
tracker = defaultdict(tensor)
# Enable/Disable Dropout
if split == 'train':
model.train()
else:
model.eval()
temp = []
discriminator_target = []
mask_list = []
bf_list = []
data_collc, domain_collc, mask_id_collc, bf_collc, index = processed_data[split]
for batch, domain, mask, bf, iteration in zip(data_collc, domain_collc, mask_id_collc, bf_collc, index):
if batch.size(1) >1:
temp.append(batch)
discriminator_target.append(domain)
mask_list.append(mask)
bf_list.append(bf)
if (iteration+1) % (args.batch_size) == 0:
batch_size = len(temp)
# Forward path for VAE
prediction = model(temp, torch.stack(bf_list).to("cuda"))
# original_input, logp, mean, logv, z = model(temp, max_len)
# loss calculation
loss, loss2 = loss_fn(prediction, torch.tensor(discriminator_target).float().to("cuda"))
#loss = loss1 + loss2
if (batch_id+1) % 500 == 0:
print("loss1 & 2:",loss.item()/batch_size, loss2.item()/batch_size)
# evluation stuff
# backward + optimization
if split == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
step += 1
# bookkeepeing
tracker['ELBO'] = torch.cat((tracker['ELBO'], loss.detach().unsqueeze(0)))
# if split == "test":
# l1_loss = torch.sum(torch.abs((logp > 0.5).type(torch.FloatTensor) - original_input)).to("cuda")
# tracker['l1_loss'] = torch.cat((tracker['l1_loss'], l1_loss.unsqueeze(0)))
if args.tensorboard_logging and (batch_id+1) % args.print_every == 0:
writer.add_scalar("%s/ELBO"%split.upper(), loss.item()/batch_size, batch_id)
# writer.add_scalar("%s/NLL Loss"%split.upper(), NLL_loss.item()/batch_size, batch_id)
# writer.add_scalar("%s/KL Loss"%split.upper(), KL_loss.item()/batch_size, batch_id)
# writer.add_scalar("%s/KL Weight"%split.upper(), KL_weight, batch_id)
# if (batchID+1) % args.print_every == 0: # or iteration+1 == len(data_loader):
# print("%s Batch %04d/%i, Loss %9.4f, NLL-Loss %9.4f, KL-Loss %9.4f, KL-Weight %6.3f"
# %(split.upper(), batchID, len(data_loader)-1, loss.item()/batch_size, NLL_loss.item()/batch_size, KL_loss.item()/batch_size, KL_weight))
# if split == 'valid':
# if 'target_sents' not in tracker:
# tracker['target_sents'] = list()
#
# tracker['target_sents'] += idx2word(batch['target'].tolist(), i2w=datasets['train'].get_i2w(), pad_idx=datasets['train'].pad_idx)
# tracker['z'] = torch.cat((tracker['z'], z.data), dim=0)
temp = []
discriminator_target = []
mask_list = []
bf_list = []
batch_id += 1
# print("No. of pos in data:", (torch.sum(original_input) /args.batch_size).item(), "No. of incorrect prediction:",
# (torch.sum(torch.abs((logp > 0).float() - original_input.to("cuda"))) /args.batch_size).item())
print("%s Epoch %02d/%i, total Loss %9.4f"%(split.upper(), epoch, args.epochs, torch.mean(tracker['ELBO'])/args.batch_size ))
if args.tensorboard_logging:
writer.add_scalar("%s-Epoch/ELBO" % split.upper(), torch.mean(tracker['ELBO']), epoch)
# save a dump of all sentences and the encoded latent space
if split == 'valid':
dump = {'target_sents':tracker['target_sents'], 'z':tracker['z'].tolist()}
if not os.path.exists(os.path.join('dumps', ts)):
os.makedirs('dumps/'+ts)
with open(os.path.join('dumps/'+ts+'/valid_E%i.json'%epoch), 'w') as dump_file:
json.dump(dump,dump_file)
# save checkpoint
if split == 'train' and (epoch+1) % 5 == 0:
checkpoint_path = os.path.join(save_model_path, "E%i.mdl"%(epoch))
torch.save(model.state_dict(), checkpoint_path)
print("Model saved at %s"%checkpoint_path)
save_path = "./bin/"
torch.save(model.state_dict(), save_path + "idea8.pol.mdl")
if __name__ == '__main__':
# args stuff
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--create_data', action='store_true')
parser.add_argument('--max_sequence_length', type=int, default=60)
parser.add_argument('--min_occ', type=int, default=1)
parser.add_argument('--test', action='store_true')
parser.add_argument('-ep', '--epochs', type=int, default=20)
parser.add_argument('-bs', '--batch_size', type=int, default=32)
parser.add_argument('-lr', '--learning_rate', type=float, default=0.001)
parser.add_argument('-eb', '--embedding_size', type=int, default=549)
parser.add_argument('-rnn', '--rnn_type', type=str, default='gru')
parser.add_argument('-hs', '--hidden_size', type=int, default=512)
parser.add_argument('-nl', '--num_layers', type=int, default=1)
parser.add_argument('-bi', '--bidirectional', type=bool, default=True)
parser.add_argument('-ls', '--latent_size', type=int, default=256)
parser.add_argument('-wd', '--word_dropout', type=float, default=1)
parser.add_argument('-ed', '--embedding_dropout', type=float, default=1)
parser.add_argument('-af', '--anneal_function', type=str, default='logistic')
parser.add_argument('-k', '--k', type=float, default=0.0025)
parser.add_argument('-x0', '--x0', type=int, default=2500)
parser.add_argument('-v', '--print_every', type=int, default=20)
parser.add_argument('-tb', '--tensorboard_logging', action='store_true')
parser.add_argument('-log', '--logdir', type=str, default='logs')
parser.add_argument('-bin', '--save_model_path', type=str, default='bin')
args_idea5 = parser.parse_args()
args_idea5.rnn_type = args_idea5.rnn_type.lower()
args_idea5.anneal_function = args_idea5.anneal_function.lower()
assert args_idea5.rnn_type in ['rnn', 'lstm', 'gru']
assert args_idea5.anneal_function in ['logistic', 'linear']
assert 0 <= args_idea5.word_dropout <= 1
main(args_idea5)
| [
"collections.OrderedDict",
"os.makedirs",
"argparse.ArgumentParser",
"torch.mean",
"torch.stack",
"os.path.join",
"pickle.load",
"numpy.exp",
"torch.tensor",
"torch.cuda.is_available",
"collections.defaultdict",
"convlab2.policy.mle.idea9.model_dialogue.dialogue_VAE",
"convlab2.policy.mle.id... | [((395, 420), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (418, 420), False, 'import torch\n'), ((686, 699), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (697, 699), False, 'from collections import OrderedDict, defaultdict\n'), ((961, 978), 'convlab2.policy.mle.idea9.model_dialogue.dialogue_VAE', 'dialogue_VAE', (['(549)'], {}), '(549)\n', (973, 978), False, 'from convlab2.policy.mle.idea9.model_dialogue import dialogue_VAE, data_mask, loss_fn\n'), ((1274, 1312), 'os.path.join', 'os.path.join', (['args.save_model_path', 'ts'], {}), '(args.save_model_path, ts)\n', (1286, 1312), False, 'import os\n'), ((1318, 1346), 'os.makedirs', 'os.makedirs', (['save_model_path'], {}), '(save_model_path)\n', (1329, 1346), False, 'import os\n'), ((8345, 8370), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8368, 8370), False, 'import argparse\n'), ((498, 511), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (509, 511), False, 'import time\n'), ((1851, 1876), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1874, 1876), False, 'import torch\n'), ((931, 945), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (942, 945), False, 'import pickle\n'), ((2245, 2259), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2256, 2259), False, 'import pickle\n'), ((3144, 3163), 'collections.defaultdict', 'defaultdict', (['tensor'], {}), '(tensor)\n', (3155, 3163), False, 'from collections import OrderedDict, defaultdict\n'), ((1097, 1122), 'convlab2.policy.mle.idea9.utils.expierment_name', 'expierment_name', (['args', 'ts'], {}), '(args, ts)\n', (1112, 1122), False, 'from convlab2.policy.mle.idea9.utils import expierment_name\n'), ((8014, 8062), 'os.path.join', 'os.path.join', (['save_model_path', "('E%i.mdl' % epoch)"], {}), "(save_model_path, 'E%i.mdl' % epoch)\n", (8026, 8062), False, 'import os\n'), ((7395, 7422), 'torch.mean', 'torch.mean', (["tracker['ELBO']"], {}), "(tracker['ELBO'])\n", (7405, 7422), False, 'import torch\n'), ((7720, 7746), 'os.makedirs', 'os.makedirs', (["('dumps/' + ts)"], {}), "('dumps/' + ts)\n", (7731, 7746), False, 'import os\n'), ((7863, 7889), 'json.dump', 'json.dump', (['dump', 'dump_file'], {}), '(dump, dump_file)\n', (7872, 7889), False, 'import json\n'), ((1628, 1652), 'numpy.exp', 'np.exp', (['(-k * (step - x0))'], {}), '(-k * (step - x0))\n', (1634, 1652), True, 'import numpy as np\n'), ((7671, 7696), 'os.path.join', 'os.path.join', (['"""dumps"""', 'ts'], {}), "('dumps', ts)\n", (7683, 7696), False, 'import os\n'), ((7772, 7827), 'os.path.join', 'os.path.join', (["('dumps/' + ts + '/valid_E%i.json' % epoch)"], {}), "('dumps/' + ts + '/valid_E%i.json' % epoch)\n", (7784, 7827), False, 'import os\n'), ((7236, 7263), 'torch.mean', 'torch.mean', (["tracker['ELBO']"], {}), "(tracker['ELBO'])\n", (7246, 7263), False, 'import torch\n'), ((4078, 4098), 'torch.stack', 'torch.stack', (['bf_list'], {}), '(bf_list)\n', (4089, 4098), False, 'import torch\n'), ((4302, 4336), 'torch.tensor', 'torch.tensor', (['discriminator_target'], {}), '(discriminator_target)\n', (4314, 4336), False, 'import torch\n')] |
import itertools
import numpy as np
def jitter(data, depth):
'''generates indices of the vertices for every
possible mesh configuration for the given depth.'''
return [a for a in
itertools.product([0,1], repeat=len(data))]
def mapVertices(vertices, all_indices, depths):
'''given indices (likely from jitter()), map the given
vertices to the depths indicated by all_indices'''
combs = []
for indices in all_indices:
comb = []
for vtx,i in zip(vertices,indices):
comb.append(
np.array([vtx[0],vtx[1],depths[i]]))
combs.append(comb)
return combs
def normalizeV3(arr):
lens = np.sqrt(arr[:,0]**2 + arr[:,1]**2 + arr[:,2]**2) + 0.00001
arr[:,0] /= lens
arr[:,1] /= lens
arr[:,2] /= lens
return arr | [
"numpy.array",
"numpy.sqrt"
] | [((676, 733), 'numpy.sqrt', 'np.sqrt', (['(arr[:, 0] ** 2 + arr[:, 1] ** 2 + arr[:, 2] ** 2)'], {}), '(arr[:, 0] ** 2 + arr[:, 1] ** 2 + arr[:, 2] ** 2)\n', (683, 733), True, 'import numpy as np\n'), ((561, 598), 'numpy.array', 'np.array', (['[vtx[0], vtx[1], depths[i]]'], {}), '([vtx[0], vtx[1], depths[i]])\n', (569, 598), True, 'import numpy as np\n')] |
"""Tests for Octave magics extension."""
import codecs
import unittest
import sys
from IPython.display import SVG
from IPython.testing.globalipapp import get_ipython
import numpy as np
from oct2py.ipython import octavemagic
from oct2py import Oct2PyError
class OctaveMagicTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
'''Set up an IPython session just once.
It'd be safer to set it up for each test, but for now,
I'm mimicking the IPython team's logic.
'''
if not sys.stdin.encoding:
# needed for py.test
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
cls.ip = get_ipython()
# This is just to get a minimally modified version of the changes
# working
cls.ip.magic('load_ext oct2py.ipython')
cls.ip.ex('import numpy as np')
cls.svgs_generated = 0
def test_octave_inline(self):
result = self.ip.run_line_magic('octave', '[1, 2, 3] + 1;')
assert np.allclose(result, [[2, 3, 4]])
def test_octave_roundtrip(self):
ip = self.ip
ip.ex('x = np.arange(3); y = 4.5')
ip.run_line_magic('octave_push', 'x y')
ip.run_line_magic('octave', 'x = x + 1; y = y + 1;')
ip.run_line_magic('octave_pull', 'x y')
assert np.allclose(ip.user_ns['x'], [[1, 2, 3]])
assert np.allclose(ip.user_ns['y'], 5.5)
def test_octave_cell_magic(self):
ip = self.ip
ip.ex('x = 3; y = [1, 2]')
ip.run_cell_magic('octave', '-f png -s 400,400 -i x,y -o z',
'z = x + y;')
assert np.allclose(ip.user_ns['z'], [[4, 5]])
def test_octave_plot(self):
magic = self.ip.find_cell_magic('octave').__self__
magic._display = self._verify_display
self.ip.run_cell_magic('octave', '-f svg -s 400,500',
'plot([1, 2, 3]); figure; plot([4, 5, 6]);')
assert self.svgs_generated == 2
def _verify_display(self, obj):
if isinstance(obj, SVG):
svg = obj.data
assert 'height="500px"' in svg, svg
assert 'width="400px"' in svg, svg
self.svgs_generated += 1
def test_octave_syntax_error(self):
try:
self.ip.run_cell_magic('octave', '', "a='1")
except Oct2PyError:
self.ip.magic('reload_ext oct2py.ipython')
def test_octave_error(self):
self.assertRaises(Oct2PyError, self.ip.run_cell_magic,
'octave', '', 'a = ones2(1)')
| [
"IPython.testing.globalipapp.get_ipython",
"codecs.getreader",
"numpy.allclose"
] | [((660, 673), 'IPython.testing.globalipapp.get_ipython', 'get_ipython', ([], {}), '()\n', (671, 673), False, 'from IPython.testing.globalipapp import get_ipython\n'), ((1003, 1035), 'numpy.allclose', 'np.allclose', (['result', '[[2, 3, 4]]'], {}), '(result, [[2, 3, 4]])\n', (1014, 1035), True, 'import numpy as np\n'), ((1311, 1352), 'numpy.allclose', 'np.allclose', (["ip.user_ns['x']", '[[1, 2, 3]]'], {}), "(ip.user_ns['x'], [[1, 2, 3]])\n", (1322, 1352), True, 'import numpy as np\n'), ((1368, 1401), 'numpy.allclose', 'np.allclose', (["ip.user_ns['y']", '(5.5)'], {}), "(ip.user_ns['y'], 5.5)\n", (1379, 1401), True, 'import numpy as np\n'), ((1621, 1659), 'numpy.allclose', 'np.allclose', (["ip.user_ns['z']", '[[4, 5]]'], {}), "(ip.user_ns['z'], [[4, 5]])\n", (1632, 1659), True, 'import numpy as np\n'), ((606, 631), 'codecs.getreader', 'codecs.getreader', (['"""utf-8"""'], {}), "('utf-8')\n", (622, 631), False, 'import codecs\n')] |
'''
Author: <NAME>
Description: Module to test the ODA algorithm and targeted landing.
'''
from Camera import camera
from Algorithms import create_samples as cs
from Algorithms import discretize as disc
from Algorithms import voronoi as voronoi
from Algorithms import gap_detection as gd
from process_frames import plot2
from Drone_Control import mission_move_drone as md
from process_frames import getFramesFromSource
import matplotlib.pyplot as plt
import time
import cv2
import numpy as np
from dronekit import connect
from pymavlink import mavutil
# copied from: http://python.dronekit.io/guide/copter/guided_mode.html
def send_ned_velocity(vehicle, velocity_x, velocity_y, velocity_z, duration):
"""
Move vehicle in direction based on specified velocity vectors.
velocity_z is positive towards the ground.
"""
msg = vehicle.message_factory.set_position_target_local_ned_encode(
0, # time_boot_ms (not used)
vehicle._master.target_system, vehicle._master.target_component, # target system, target component
# mavutil.mavlink.MAV_FRAME_LOCAL_NED, # frame
mavutil.mavlink.MAV_FRAME_BODY_NED,
0b0000111111000111, # type_mask (only speeds enabled)
0, 0, 0, # x, y, z positions (not used)
velocity_x, velocity_y, velocity_z, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not supported yet, ignored in GCS_Mavlink)
0, 0) # yaw, yaw_rate (not supported yet, ignored in GCS_Mavlink)
# send command to vehicle on 1 Hz cycle
for x in range(0, duration):
vehicle.send_mavlink(msg)
time.sleep(1)
def avoidObs(cam, numFrames, height_ratio, sub_sample, reduce_to, perc_samples, iters, min_dist):
print('COMMAND: Get drone\'s displacement from target.')
print('\tIf close to target, land and return. If not, continue.')
# d = cam.getFrames(numFrames, rgb=False)
# source = './Camera/Sample_Data/two_boxes'
# d, c = getFramesFromSource(source)
# generate representative depth matrix
h = 12
w = 16
d = 6.0 * np.random.rand(h, w)
t1 = time.time()
d_small = cam.reduceFrame(d, height_ratio = height_ratio, sub_sample = sub_sample, reduce_to = reduce_to)
samples, measured_vector = cs.createSamples(d_small, perc_samples)
try:
v = voronoi.getVoronoi(d_small.shape, samples, measured_vector)
except:
v = d_small
d = disc.depthCompletion(v, iters)
x = gd.findLargestGap(d, min_dist)
t2 = time.time()
print('COMMAND: Rotate drone to face target.')
print('COMMAND: Get depth data from R200.')
print('time to do gap detection: {0}'.format(t2 - t1))
if x == None:
x = len(d[0]) // 2
f = float(x)/len(d[0])
print('(f, position) of gap: ({0}, {1})'.format(f, x))
delTheta = f * 59 - 29.5
if f == 0.5:
print('COMMAND: Move forward.\n')
else:
print('COMMAND: Rotate drone {0} degrees and move forward until obstacle is cleared.\n'.format(delTheta))
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(d > min_dist)
plt.title('Obstacles (Shaded)')
plt.grid()
plt.subplot(1, 2, 2)
plt.imshow(d, cmap='plasma')
plt.title('Navigation')
plt.colorbar(fraction = 0.046, pad = 0.04)
plt.plot([x, x], [len(d)-1, len(d)//2], 'r-', LineWidth=5)
plt.plot([x, x], [len(d)-1, len(d)//2], 'w-', LineWidth=2)
for i in range(len(d)//2, len(d)):
plt.plot(int(x), i, 'wo', markersize=5)
plt.plot(int(x), i, 'ro', markersize=3)
plt.show()
def main():
######################### set up image processing
max_depth = 6.0
cam = camera.Camera(max_depth=max_depth)
try:
cam.connect()
print('Connected to R200 camera')
except:
print('Cannot connect to camera')
pass
source = cam
time.sleep(2.5)
numFrames = 5
# height_ratio of 1 keeps all rows of original image
# default of h_r = 0.5, s_s = 0.3
height_ratio = 1
sub_sample = 1
# reduce_to argFalseument can be: 'lower', 'middle_lower', 'middle', 'middle_upper', and 'upper'
reduce_to = 'middle'
# default of perc_samples = 0.01
perc_samples = 0.05
iters = 3
min_dist = 1.0
print('Program settings:')
print('\tsource: ' + str(source))
print('\tmax_depth: ' + str(max_depth))
print('\tnumFrames: ' + str(numFrames))
print('\theight_ratio: ' + str(height_ratio))
print('\tsub_sample: ' + str(sub_sample))
print('\treduce_to: ' + reduce_to)
print('\tperc_samples: ' + str(perc_samples))
print('\titers: ' + str(iters))
print('\tmin_dist: ' + str(min_dist))
#########################
while True:
avoidObs(cam, numFrames, height_ratio, sub_sample, reduce_to, perc_samples, iters, min_dist)
# ######################### set up drone connection
# connection_string = 'tcp:127.0.0.1:5760'
# vehicle = connect(connection_string, wait_ready=False)
# # set home to current position (to hopefully make alt >= 0)
# vehicle.home_location = vehicle.location.global_frame
# MAV_MODE = 8
# # change to MAV_MODE mode
# md.PX4setMode(vehicle, MAV_MODE)
# time.sleep(1)
# print('Mode: ' + str(vehicle.mode.name))
# #########################
# # arm vehicle
# print('Arming drone...')
# vehicle.armed = True
# try:
# while True:
# print('Going up...')
# send_ned_velocity(vehicle, 0, 0, -1, 4)
# print('Holding...')
# send_ned_velocity(vehicle, 0, 0, 0, 2)
# print('Going down...')
# send_ned_velocity(vehicle, 0, 0, 1, 4)
# print('Holding...')
# send_ned_velocity(vehicle, 0, 0, 0, 2)
# print('Disarming...')
# vehicle.armed = False
# time.sleep(1)
# return
# except KeyboardInterrupt:
# # disarm vehicle
# print('Disarming drone...')
# vehicle.armed = False
# time.sleep(1)
# # close vehicle object before exiting script
# vehicle.close()
# time.sleep(1)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
plt.close('all')
print('\nCtrl-C was pressed, exiting...') | [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.grid",
"Camera.camera.Camera",
"numpy.random.rand",
"matplotlib.pyplot.colorbar",
"time.sleep",
"Algorithms.create_samples.createSamples",
"matplotlib.pyplot.subplot",
"Algorithms.discretize.depthCompletion",
"matplotlib.pyplot.figure",
"matplotlib.... | [((2110, 2121), 'time.time', 'time.time', ([], {}), '()\n', (2119, 2121), False, 'import time\n'), ((2265, 2304), 'Algorithms.create_samples.createSamples', 'cs.createSamples', (['d_small', 'perc_samples'], {}), '(d_small, perc_samples)\n', (2281, 2304), True, 'from Algorithms import create_samples as cs\n'), ((2426, 2456), 'Algorithms.discretize.depthCompletion', 'disc.depthCompletion', (['v', 'iters'], {}), '(v, iters)\n', (2446, 2456), True, 'from Algorithms import discretize as disc\n'), ((2466, 2496), 'Algorithms.gap_detection.findLargestGap', 'gd.findLargestGap', (['d', 'min_dist'], {}), '(d, min_dist)\n', (2483, 2496), True, 'from Algorithms import gap_detection as gd\n'), ((2507, 2518), 'time.time', 'time.time', ([], {}), '()\n', (2516, 2518), False, 'import time\n'), ((3027, 3039), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3037, 3039), True, 'import matplotlib.pyplot as plt\n'), ((3044, 3064), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3055, 3064), True, 'import matplotlib.pyplot as plt\n'), ((3069, 3093), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(d > min_dist)'], {}), '(d > min_dist)\n', (3079, 3093), True, 'import matplotlib.pyplot as plt\n'), ((3098, 3129), 'matplotlib.pyplot.title', 'plt.title', (['"""Obstacles (Shaded)"""'], {}), "('Obstacles (Shaded)')\n", (3107, 3129), True, 'import matplotlib.pyplot as plt\n'), ((3134, 3144), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3142, 3144), True, 'import matplotlib.pyplot as plt\n'), ((3150, 3170), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (3161, 3170), True, 'import matplotlib.pyplot as plt\n'), ((3175, 3203), 'matplotlib.pyplot.imshow', 'plt.imshow', (['d'], {'cmap': '"""plasma"""'}), "(d, cmap='plasma')\n", (3185, 3203), True, 'import matplotlib.pyplot as plt\n'), ((3208, 3231), 'matplotlib.pyplot.title', 'plt.title', (['"""Navigation"""'], {}), "('Navigation')\n", (3217, 3231), True, 'import matplotlib.pyplot as plt\n'), ((3236, 3274), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'fraction': '(0.046)', 'pad': '(0.04)'}), '(fraction=0.046, pad=0.04)\n', (3248, 3274), True, 'import matplotlib.pyplot as plt\n'), ((3545, 3555), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3553, 3555), True, 'import matplotlib.pyplot as plt\n'), ((3653, 3687), 'Camera.camera.Camera', 'camera.Camera', ([], {'max_depth': 'max_depth'}), '(max_depth=max_depth)\n', (3666, 3687), False, 'from Camera import camera\n'), ((3849, 3864), 'time.sleep', 'time.sleep', (['(2.5)'], {}), '(2.5)\n', (3859, 3864), False, 'import time\n'), ((1619, 1632), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1629, 1632), False, 'import time\n'), ((2079, 2099), 'numpy.random.rand', 'np.random.rand', (['h', 'w'], {}), '(h, w)\n', (2093, 2099), True, 'import numpy as np\n'), ((2326, 2385), 'Algorithms.voronoi.getVoronoi', 'voronoi.getVoronoi', (['d_small.shape', 'samples', 'measured_vector'], {}), '(d_small.shape, samples, measured_vector)\n', (2344, 2385), True, 'from Algorithms import voronoi as voronoi\n'), ((6232, 6248), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6241, 6248), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 23 20:32:25 2018
@author: robot
"""
import os,sys
AbsolutePath = os.path.abspath(__file__)
#将相对路径转换成绝对路径
SuperiorCatalogue = os.path.dirname(AbsolutePath)
#相对路径的上级路径
BaseDir = os.path.dirname(SuperiorCatalogue)
#在“SuperiorCatalogue”的基础上在脱掉一层路径,得到我们想要的路径。
if BaseDir in sys.path:
# print('have been added')
pass
else:
sys.path.append(BaseDir)
import copy
import numpy as np
from constructMethod.instance import Instance
#import sys
class Solution(object):
"""
Class representing a solution to a problem.
"""
def __init__(self, instance : Instance ):
"""Creates a new solution for the given problem."""
super(Solution, self).__init__()
self._instance = instance
# self.encode = np.zeros()
self.objective = sys.float_info.max
self.encode = np.zeros((self._instance.robNum,self._instance.taskNum),dtype =int)
self.encode[:][:] = sys.maxsize
# self.variables = FixedLengthArray(problem.nvars)
# self.objectives = FixedLengthArray(problem.nobjs)
# self.constraints = FixedLengthArray(problem.nconstrs)
# self.constraint_violation = 0.0
# self.evaluated = False
def evaluate(self):
"""Evaluates this solution."""
makespan = self._instance.evaluate(self.encode)
self.objective = makespan
return makespan
def __repr__(self):
return self.__str__()
def __str__(self):
return "Solution encode =\n " + str(self.encode) + '\n objective = ' + str(self.objective) + ' \n instance = ' + str(self._instance)
# "Solution[" + ",".join(list(map(str, self.variables))) + "|" + ",".join(list(map(str, self.objectives))) + "|" + str(self.constraint_violation) + "]"
def __deepcopy__(self, memo):
"""Overridden to avoid cloning the problem definition."""
result = Solution(self._instance)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k != "_instance":
setattr(result, k, copy.deepcopy(v, memo))
return result
def __getitem__(self,index):
return self.encode[index[0]][index[1]]
def __setitem__(self,key,value):
self.encode[key[0]][key[1]] = value
return self.encode
def __eq__(self,other):
# print((self.encode == other.encode).all())
if (self.encode == other.encode).all():
if self._instance == other._instance:
return True
return False
def genNoBackTrackEncode(self):
self.encode = self._instance.genNoBackTrackEncode(self.encode)
# def __cmp__(self,other):
# if self.objective> other.objective:
# return True
# else:
# return False
if __name__ == '__main__':
# print(Solution.__doc__)
insName = 's100_5_10_max100_2.5_2.5_2.5_1.2_thre0.1_MPDAins.dat'
ins = Instance(BaseDir + '//data\\' + insName)
sol = Solution(ins)
# print(sol.encode)
# print(sol)
sol.encode[0][1] = 100
# print(sol.encode[(0,1)])
sol.encode[(0,1)] = -2
# print(sol.encode[(0,1)])
# print(sol.encode)
sol2 = copy.deepcopy(sol)
print((sol.encode == sol2.encode).all())
# print(sol.instance == sol2.instance)
# print(sol.instance.insFileName == sol2.instance.insFileName)
# print(sol.instance.insFileName)
# print(sol2.instance.insFileName)
sol.encode[(0,1)] = -20
if sol == sol2:
print('=-00asd-0i ')
else:
print('not eq')
# print(sol2.encode)
# if sol.encode == sol2.encode:
# print('0aslhdjk')
| [
"os.path.dirname",
"constructMethod.instance.Instance",
"numpy.zeros",
"copy.deepcopy",
"os.path.abspath",
"sys.path.append"
] | [((114, 139), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (129, 139), False, 'import os, sys\n'), ((185, 214), 'os.path.dirname', 'os.path.dirname', (['AbsolutePath'], {}), '(AbsolutePath)\n', (200, 214), False, 'import os, sys\n'), ((239, 273), 'os.path.dirname', 'os.path.dirname', (['SuperiorCatalogue'], {}), '(SuperiorCatalogue)\n', (254, 273), False, 'import os, sys\n'), ((399, 423), 'sys.path.append', 'sys.path.append', (['BaseDir'], {}), '(BaseDir)\n', (414, 423), False, 'import os, sys\n'), ((3009, 3049), 'constructMethod.instance.Instance', 'Instance', (["(BaseDir + '//data\\\\' + insName)"], {}), "(BaseDir + '//data\\\\' + insName)\n", (3017, 3049), False, 'from constructMethod.instance import Instance\n'), ((3265, 3283), 'copy.deepcopy', 'copy.deepcopy', (['sol'], {}), '(sol)\n', (3278, 3283), False, 'import copy\n'), ((896, 964), 'numpy.zeros', 'np.zeros', (['(self._instance.robNum, self._instance.taskNum)'], {'dtype': 'int'}), '((self._instance.robNum, self._instance.taskNum), dtype=int)\n', (904, 964), True, 'import numpy as np\n'), ((2146, 2168), 'copy.deepcopy', 'copy.deepcopy', (['v', 'memo'], {}), '(v, memo)\n', (2159, 2168), False, 'import copy\n')] |
import csv
import logging
from pathlib import Path
from typing import Union
import click
import numpy as np
from blender import Blender, Blend
from blender.catalog import blend2cat, CATALOG_HEADER
def save_img(blend: Blend, idx: int, prefix: str, outdir: Union[Path, str] = ".") -> None:
np.save(f"{outdir}/{prefix}_blend_{idx:06d}.npy", blend.img)
np.save(f"{outdir}/{prefix}_blend_seg_{idx:06d}.npy", blend.segmap)
def create_image_set(blender: Blender, n_blends: int, outdir: Path,
test_set: bool = False) -> None:
"""
Use a Blender instance to output stamps of blended galaxies and
their associated segmentation mask, plus a catalog of these sources.
Parameters
----------
blender:
the Blender instance
n_blends:
number of desired images
outdir:
output directory
test_set: default False
switch between the training and testing galaxy split
"""
prefix = "test" if test_set else "train"
outcat = outdir / f"{prefix}_catalogue.csv"
with open(outcat, "w") as f:
output = csv.writer(f)
output.writerow(CATALOG_HEADER)
msg = f"Producing {prefix} blended images"
with click.progressbar(range(n_blends), label=msg) as bar:
for blend_id in bar:
blend = blender.next_blend(from_test=test_set)
while blend is None:
blend = blender.next_blend(from_test=test_set)
output.writerow(blend2cat(blend, blend_id))
save_img(blend, blend_id, prefix, outdir)
@click.command("produce")
@click.option(
"-n",
"--n_blends",
type=int,
default=100,
show_default=True,
help="Number of blends to produce",
)
@click.option(
"--mag_low",
type=float,
default=0,
show_default=True,
help="Lowest galaxy magnitude",
)
@click.option(
"--mag_high",
type=float,
default=100,
show_default=True,
help="Highest galaxy magnitude",
)
@click.option(
"--mag_diff",
type=float,
default=2,
show_default=True,
help="Top magnitude difference between galaxies",
)
@click.option(
"--rad_diff",
type=float,
default=4,
show_default=True,
help="Top distance between galaxies as a fraction of radius",
)
@click.option(
"-t",
"--test_ratio",
type=float,
default=0.2,
show_default=True,
help="Ratio of the input galaxies used only for the test set",
)
@click.option(
"-e",
"--excluded_type",
type=click.Choice(["irr", "disk", "sph", "sphd"]),
multiple=True,
help="Excluded galaxy types",
)
@click.option(
"-d",
"--datapath",
type=click.Path(exists=True),
default="./data",
show_default=True,
help="Path to data files",
)
@click.option(
"-s",
"--seed",
type=int,
default=42,
show_default=True,
help="Random seed",
)
def main(n_blends, excluded_type, mag_low, mag_high, mag_diff, rad_diff,
test_ratio, datapath, seed):
"""
Produce stamps of CANDELS blended galaxies with their individual masks
"""
# Define the various paths and create directories
cwd = Path.cwd()
datapath = cwd / datapath
input_stamps = datapath / "candels_img.npy"
input_segmaps = datapath / "candels_seg.npy"
input_catalog = datapath / "candels_cat.csv"
outdir = cwd / f"output-s_{seed}-n_{n_blends}"
if not outdir.exists():
outdir.mkdir()
outlog = outdir / "candels-blender.log"
logging.basicConfig(
filename=outlog,
level=logging.INFO,
format="%(asctime)s [ %(levelname)s ] : %(message)s",
)
blender = Blender(
input_stamps,
input_segmaps,
input_catalog,
train_test_ratio=test_ratio,
magdiff=mag_diff,
raddiff=rad_diff,
seed=seed,
)
logger = logging.getLogger(__name__)
logger.info(
"\n"
"Configuration\n"
"=============\n"
f"Number of blends: {n_blends}\n"
f"Seed: {seed}\n"
"\n"
"Catalog cuts\n"
"------------\n"
f"Excluded galaxy types: {excluded_type}\n"
f"Lowest magnitude: {mag_low}\n"
f"Highest magnitude: {mag_high}\n"
"\n"
"Blend properties\n"
"----------------\n"
f"Top difference in magnitude between galaxies: {mag_diff}\n"
f"Top distance between galaxies as a fraction of radius: {rad_diff}\n"
)
# Apply cuts to the galaxy catalog
click.echo(
f"Selecting galaxies in the magnitude range {mag_low} < m < {mag_high}"
)
blender.make_cut(blender.cat.mag > mag_low)
blender.make_cut(blender.cat.mag < mag_high)
for galtype in set(excluded_type):
click.echo(f"Excluding {galtype} galaxies")
blender.make_cut(blender.cat.galtype != galtype)
click.echo(
f"After the cuts, there are {blender.n_gal} individual galaxies "
"left in the catalog."
)
# Compute the train/test splits
n_test = int(test_ratio * n_blends)
n_train = n_blends - n_test
create_image_set(blender, n_train, outdir)
create_image_set(blender, n_test, outdir, test_set=True)
click.echo(message=f"Images stored in {outdir}")
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
| [
"logging.basicConfig",
"logging.getLogger",
"click.Choice",
"blender.catalog.blend2cat",
"click.option",
"pathlib.Path.cwd",
"blender.Blender",
"csv.writer",
"click.echo",
"click.Path",
"click.command",
"numpy.save"
] | [((1596, 1620), 'click.command', 'click.command', (['"""produce"""'], {}), "('produce')\n", (1609, 1620), False, 'import click\n'), ((1622, 1736), 'click.option', 'click.option', (['"""-n"""', '"""--n_blends"""'], {'type': 'int', 'default': '(100)', 'show_default': '(True)', 'help': '"""Number of blends to produce"""'}), "('-n', '--n_blends', type=int, default=100, show_default=True,\n help='Number of blends to produce')\n", (1634, 1736), False, 'import click\n'), ((1761, 1865), 'click.option', 'click.option', (['"""--mag_low"""'], {'type': 'float', 'default': '(0)', 'show_default': '(True)', 'help': '"""Lowest galaxy magnitude"""'}), "('--mag_low', type=float, default=0, show_default=True, help=\n 'Lowest galaxy magnitude')\n", (1773, 1865), False, 'import click\n'), ((1885, 1993), 'click.option', 'click.option', (['"""--mag_high"""'], {'type': 'float', 'default': '(100)', 'show_default': '(True)', 'help': '"""Highest galaxy magnitude"""'}), "('--mag_high', type=float, default=100, show_default=True, help\n ='Highest galaxy magnitude')\n", (1897, 1993), False, 'import click\n'), ((2013, 2136), 'click.option', 'click.option', (['"""--mag_diff"""'], {'type': 'float', 'default': '(2)', 'show_default': '(True)', 'help': '"""Top magnitude difference between galaxies"""'}), "('--mag_diff', type=float, default=2, show_default=True, help=\n 'Top magnitude difference between galaxies')\n", (2025, 2136), False, 'import click\n'), ((2156, 2291), 'click.option', 'click.option', (['"""--rad_diff"""'], {'type': 'float', 'default': '(4)', 'show_default': '(True)', 'help': '"""Top distance between galaxies as a fraction of radius"""'}), "('--rad_diff', type=float, default=4, show_default=True, help=\n 'Top distance between galaxies as a fraction of radius')\n", (2168, 2291), False, 'import click\n'), ((2311, 2457), 'click.option', 'click.option', (['"""-t"""', '"""--test_ratio"""'], {'type': 'float', 'default': '(0.2)', 'show_default': '(True)', 'help': '"""Ratio of the input galaxies used only for the test set"""'}), "('-t', '--test_ratio', type=float, default=0.2, show_default=\n True, help='Ratio of the input galaxies used only for the test set')\n", (2323, 2457), False, 'import click\n'), ((2804, 2898), 'click.option', 'click.option', (['"""-s"""', '"""--seed"""'], {'type': 'int', 'default': '(42)', 'show_default': '(True)', 'help': '"""Random seed"""'}), "('-s', '--seed', type=int, default=42, show_default=True, help=\n 'Random seed')\n", (2816, 2898), False, 'import click\n'), ((296, 356), 'numpy.save', 'np.save', (['f"""{outdir}/{prefix}_blend_{idx:06d}.npy"""', 'blend.img'], {}), "(f'{outdir}/{prefix}_blend_{idx:06d}.npy', blend.img)\n", (303, 356), True, 'import numpy as np\n'), ((361, 428), 'numpy.save', 'np.save', (['f"""{outdir}/{prefix}_blend_seg_{idx:06d}.npy"""', 'blend.segmap'], {}), "(f'{outdir}/{prefix}_blend_seg_{idx:06d}.npy', blend.segmap)\n", (368, 428), True, 'import numpy as np\n'), ((3187, 3197), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (3195, 3197), False, 'from pathlib import Path\n'), ((3526, 3641), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'outlog', 'level': 'logging.INFO', 'format': '"""%(asctime)s [ %(levelname)s ] : %(message)s"""'}), "(filename=outlog, level=logging.INFO, format=\n '%(asctime)s [ %(levelname)s ] : %(message)s')\n", (3545, 3641), False, 'import logging\n'), ((3683, 3815), 'blender.Blender', 'Blender', (['input_stamps', 'input_segmaps', 'input_catalog'], {'train_test_ratio': 'test_ratio', 'magdiff': 'mag_diff', 'raddiff': 'rad_diff', 'seed': 'seed'}), '(input_stamps, input_segmaps, input_catalog, train_test_ratio=\n test_ratio, magdiff=mag_diff, raddiff=rad_diff, seed=seed)\n', (3690, 3815), False, 'from blender import Blender, Blend\n'), ((3888, 3915), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3905, 3915), False, 'import logging\n'), ((4535, 4623), 'click.echo', 'click.echo', (['f"""Selecting galaxies in the magnitude range {mag_low} < m < {mag_high}"""'], {}), "(\n f'Selecting galaxies in the magnitude range {mag_low} < m < {mag_high}')\n", (4545, 4623), False, 'import click\n'), ((4883, 4990), 'click.echo', 'click.echo', (['f"""After the cuts, there are {blender.n_gal} individual galaxies left in the catalog."""'], {}), "(\n f'After the cuts, there are {blender.n_gal} individual galaxies left in the catalog.'\n )\n", (4893, 4990), False, 'import click\n'), ((5229, 5277), 'click.echo', 'click.echo', ([], {'message': 'f"""Images stored in {outdir}"""'}), "(message=f'Images stored in {outdir}')\n", (5239, 5277), False, 'import click\n'), ((1102, 1115), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (1112, 1115), False, 'import csv\n'), ((4777, 4820), 'click.echo', 'click.echo', (['f"""Excluding {galtype} galaxies"""'], {}), "(f'Excluding {galtype} galaxies')\n", (4787, 4820), False, 'import click\n'), ((2537, 2581), 'click.Choice', 'click.Choice', (["['irr', 'disk', 'sph', 'sphd']"], {}), "(['irr', 'disk', 'sph', 'sphd'])\n", (2549, 2581), False, 'import click\n'), ((2700, 2723), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (2710, 2723), False, 'import click\n'), ((1507, 1533), 'blender.catalog.blend2cat', 'blend2cat', (['blend', 'blend_id'], {}), '(blend, blend_id)\n', (1516, 1533), False, 'from blender.catalog import blend2cat, CATALOG_HEADER\n')] |
from typing import List
import itertools
import numpy as np
import torch
from skimage.color import label2rgb
def get_val_from_metric(metric_value):
if isinstance(metric_value, (int, float)):
pass
elif torch.is_tensor(metric_value):
metric_value = metric_value.item()
else:
metric_value = metric_value.value()
if isinstance(metric_value, (tuple, list)):
metric_value = metric_value[0]
if torch.is_tensor(metric_value):
metric_value = metric_value.item()
return metric_value
def process_epoch_metrics(
epoch_metrics,
best_metrics,
valid_loader="valid",
main_metric="loss",
minimize=True
):
valid_metrics = epoch_metrics[valid_loader]
is_best = True \
if best_metrics is None \
else (minimize != (
valid_metrics[main_metric] > best_metrics[main_metric]))
best_metrics = valid_metrics if is_best else best_metrics
return best_metrics, valid_metrics, is_best
def to_batch_metrics(*, state, metric_key, state_key=None):
metric = state.get_key(state_key or metric_key)
if isinstance(metric, dict):
for key, value in metric.items():
state.batch_metrics[f"{metric_key}_{key}"] = \
get_val_from_metric(value)
else:
state.batch_metrics[f"{metric_key}"] = \
get_val_from_metric(metric)
def get_optimizer_momentum(optimizer):
if isinstance(optimizer, torch.optim.Adam):
return list(optimizer.param_groups)[0]["betas"][0]
elif isinstance(optimizer, torch.optim.SGD):
return list(optimizer.param_groups)[0]["momentum"]
else:
return None
def scheduler_step(scheduler, valid_metric=None):
if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
scheduler.step(valid_metric)
lr = list(scheduler.optimizer.param_groups)[0]["lr"]
else:
scheduler.step()
lr = scheduler.get_lr()[0]
momentum = get_optimizer_momentum(scheduler.optimizer)
return lr, momentum
def binary_mask_to_overlay_image(image: np.ndarray, masks: List[np.ndarray]):
"""Draws every mask for with some color over image"""
h, w = image.shape[:2]
labels = np.zeros((h, w), np.uint8)
for idx, mask in enumerate(masks):
labels[mask > 0] = idx + 1
image_with_overlay = label2rgb(labels, image)
image_with_overlay = (image_with_overlay * 255).round().astype(np.uint8)
return image_with_overlay
def tensor_from_rgb_image(image: np.ndarray) -> torch.Tensor:
image = np.moveaxis(image, -1, 0)
image = np.ascontiguousarray(image)
image = torch.from_numpy(image)
return image
def plot_confusion_matrix(
cm,
class_names=None,
normalize=False,
title="confusion matrix",
fname=None,
show=True,
figsize=12,
fontsize=32,
colormap="Blues"
):
"""
Render the confusion matrix and return matplotlib"s figure with it.
Normalization can be applied by setting `normalize=True`.
"""
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plt.ioff()
cmap = plt.cm.__dict__[colormap]
if class_names is None:
class_names = [str(i) for i in range(len(np.diag(cm)))]
if normalize:
cm = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]
plt.rcParams.update({"font.size": int(fontsize/np.log2(len(class_names)))})
f = plt.figure(figsize=(figsize, figsize))
plt.title(title)
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45, ha="right")
plt.yticks(tick_marks, class_names)
fmt = ".2f" if normalize else "d"
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
if fname is not None:
plt.savefig(fname=fname)
if show:
plt.show()
return f
def render_figure_to_tensor(figure):
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plt.ioff()
figure.canvas.draw()
image = np.array(figure.canvas.renderer._renderer)
plt.close(figure)
del figure
image = tensor_from_rgb_image(image)
return image
| [
"matplotlib.pyplot.ylabel",
"torch.from_numpy",
"numpy.ascontiguousarray",
"numpy.array",
"numpy.moveaxis",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.use",
... | [((2235, 2261), 'numpy.zeros', 'np.zeros', (['(h, w)', 'np.uint8'], {}), '((h, w), np.uint8)\n', (2243, 2261), True, 'import numpy as np\n'), ((2363, 2387), 'skimage.color.label2rgb', 'label2rgb', (['labels', 'image'], {}), '(labels, image)\n', (2372, 2387), False, 'from skimage.color import label2rgb\n'), ((2572, 2597), 'numpy.moveaxis', 'np.moveaxis', (['image', '(-1)', '(0)'], {}), '(image, -1, 0)\n', (2583, 2597), True, 'import numpy as np\n'), ((2610, 2637), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['image'], {}), '(image)\n', (2630, 2637), True, 'import numpy as np\n'), ((2650, 2673), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (2666, 2673), False, 'import torch\n'), ((3066, 3087), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (3080, 3087), False, 'import matplotlib\n'), ((3128, 3138), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (3136, 3138), True, 'import matplotlib.pyplot as plt\n'), ((3446, 3484), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(figsize, figsize)'}), '(figsize=(figsize, figsize))\n', (3456, 3484), True, 'import matplotlib.pyplot as plt\n'), ((3489, 3505), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3498, 3505), True, 'import matplotlib.pyplot as plt\n'), ((3510, 3560), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (3520, 3560), True, 'import matplotlib.pyplot as plt\n'), ((3565, 3579), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3577, 3579), True, 'import matplotlib.pyplot as plt\n'), ((3630, 3690), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'class_names'], {'rotation': '(45)', 'ha': '"""right"""'}), "(tick_marks, class_names, rotation=45, ha='right')\n", (3640, 3690), True, 'import matplotlib.pyplot as plt\n'), ((3696, 3731), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'class_names'], {}), '(tick_marks, class_names)\n', (3706, 3731), True, 'import matplotlib.pyplot as plt\n'), ((4040, 4058), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4056, 4058), True, 'import matplotlib.pyplot as plt\n'), ((4063, 4087), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (4073, 4087), True, 'import matplotlib.pyplot as plt\n'), ((4092, 4121), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (4102, 4121), True, 'import matplotlib.pyplot as plt\n'), ((4294, 4315), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (4308, 4315), False, 'import matplotlib\n'), ((4356, 4366), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (4364, 4366), True, 'import matplotlib.pyplot as plt\n'), ((4406, 4448), 'numpy.array', 'np.array', (['figure.canvas.renderer._renderer'], {}), '(figure.canvas.renderer._renderer)\n', (4414, 4448), True, 'import numpy as np\n'), ((4453, 4470), 'matplotlib.pyplot.close', 'plt.close', (['figure'], {}), '(figure)\n', (4462, 4470), True, 'import matplotlib.pyplot as plt\n'), ((219, 248), 'torch.is_tensor', 'torch.is_tensor', (['metric_value'], {}), '(metric_value)\n', (234, 248), False, 'import torch\n'), ((4157, 4181), 'matplotlib.pyplot.savefig', 'plt.savefig', ([], {'fname': 'fname'}), '(fname=fname)\n', (4168, 4181), True, 'import matplotlib.pyplot as plt\n'), ((4204, 4214), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4212, 4214), True, 'import matplotlib.pyplot as plt\n'), ((453, 482), 'torch.is_tensor', 'torch.is_tensor', (['metric_value'], {}), '(metric_value)\n', (468, 482), False, 'import torch\n'), ((3255, 3266), 'numpy.diag', 'np.diag', (['cm'], {}), '(cm)\n', (3262, 3266), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
import sys
import classifier as c
from plotDecBoundaries import plotDecBoundaries
def error_rate(classifications, true_classifications):
if (np.shape(classifications) != np.shape(true_classifications)):
raise RuntimeError("Size not equal")
return np.count_nonzero(classifications - true_classifications) / np.size(classifications)
def main(training_csv, test_csv, plot_fname = "", features = np.array([0, 1])):
print ("Training Data: %s" % training_csv)
print ("Test Data: %s" % test_csv)
print("Features", features)
features = np.append(features, -1)
training_data = np.loadtxt(training_csv, delimiter = ',', usecols = features)
test_data = np.loadtxt(test_csv, delimiter = ',', usecols = features)
training_data_shape = np.shape(training_data)
test_data_shape = np.shape(test_data)
if (training_data_shape[1] != test_data_shape[1]):
raise RuntimeError("Size of training and test data do not match")
return
classifier = c.NearestMeanClassifier(training_data_shape[1] - 1)
classifier = classifier.train(training_data)
classifications = classifier.evaluate(training_data[:, :-1])
error_rate_training = error_rate(classifications, training_data[:, -1])
print("Error-rate of training data classifications = %.4f" % error_rate_training)
classifications = classifier.evaluate(test_data[:, :-1])
error_rate_test = error_rate(classifications, test_data[:, -1])
print("Error-rate of test data classifications = %.4f" % error_rate_test)
if (len(plot_fname)):
plt = plotDecBoundaries(training_data[:, :-1], training_data[:, -1], classifier.feature_means)
plt.savefig(plot_fname)
plt.clf()
return (error_rate_training, error_rate_test)
if (__name__ == '__main__'):
if (len(sys.argv) < 3):
print("Unspecified traning data and/or test data")
exit(1)
elif (len(sys.argv) == 3):
feature_size = 2
else:
feature_size = int(sys.argv[3])
if (feature_size < 2):
print("feature size has to be 2 or above")
exit(1)
training_csv = sys.argv[1]
test_csv = sys.argv[2]
error_rate_training_list = np.array([])
error_rate_test_list = np.array([])
features_list = np.array([])
if (len(sys.argv) > 4):
plot_fname = sys.argv[4]
try:
if (len(sys.argv) < 7):
for i in range(feature_size - 1):
for j in range(i + 1, feature_size):
features = np.array([i, j])
if (len(sys.argv) < 5):
(er_training, er_test) = main(training_csv, test_csv, features = features)
else:
(er_training, er_test) = main(training_csv, test_csv, plot_fname, features)
error_rate_training_list = np.append(error_rate_training_list, er_training)
error_rate_test_list = np.append(error_rate_test_list, er_test)
if (np.size(features_list) == 0):
features_list = np.array([features])
else:
features_list = np.concatenate((features_list, np.array([features])))
if (feature_size > 2):
print("Standard deviation of error-rate on traning/test: %s" % np.std([error_rate_training_list, error_rate_test_list], axis = 1))
print("Minimum error-rate on training %.4f, with featrues: %s" % (np.min(error_rate_training_list), features_list[np.argmin(error_rate_training_list)]))
print("Maximum error-rate on training %.4f, with featrues: %s" % (np.max(error_rate_training_list), features_list[np.argmax(error_rate_training_list)]))
print("Minimum error-rate on test %.4f, with featrues: %s" % (np.min(error_rate_test_list), features_list[np.argmin(error_rate_test_list)]))
print("Maximum error-rate on test %.4f, with featrues: %s" % (np.max(error_rate_test_list), features_list[np.argmax(error_rate_test_list)]))
else:
main(training_csv, test_csv, plot_fname, features = np.array([int(sys.argv[5]), int(sys.argv[6])]))
except RuntimeError as e:
print("RuntimeError raised\n", e.args)
exit(1)
| [
"numpy.shape",
"matplotlib.pyplot.savefig",
"numpy.size",
"classifier.NearestMeanClassifier",
"matplotlib.pyplot.clf",
"numpy.min",
"numpy.argmax",
"numpy.max",
"numpy.append",
"numpy.array",
"numpy.count_nonzero",
"plotDecBoundaries.plotDecBoundaries",
"numpy.std",
"numpy.argmin",
"nump... | [((482, 498), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (490, 498), True, 'import numpy as np\n'), ((635, 658), 'numpy.append', 'np.append', (['features', '(-1)'], {}), '(features, -1)\n', (644, 658), True, 'import numpy as np\n'), ((679, 736), 'numpy.loadtxt', 'np.loadtxt', (['training_csv'], {'delimiter': '""","""', 'usecols': 'features'}), "(training_csv, delimiter=',', usecols=features)\n", (689, 736), True, 'import numpy as np\n'), ((757, 810), 'numpy.loadtxt', 'np.loadtxt', (['test_csv'], {'delimiter': '""","""', 'usecols': 'features'}), "(test_csv, delimiter=',', usecols=features)\n", (767, 810), True, 'import numpy as np\n'), ((841, 864), 'numpy.shape', 'np.shape', (['training_data'], {}), '(training_data)\n', (849, 864), True, 'import numpy as np\n'), ((887, 906), 'numpy.shape', 'np.shape', (['test_data'], {}), '(test_data)\n', (895, 906), True, 'import numpy as np\n'), ((1070, 1121), 'classifier.NearestMeanClassifier', 'c.NearestMeanClassifier', (['(training_data_shape[1] - 1)'], {}), '(training_data_shape[1] - 1)\n', (1093, 1121), True, 'import classifier as c\n'), ((2263, 2275), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2271, 2275), True, 'import numpy as np\n'), ((2303, 2315), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2311, 2315), True, 'import numpy as np\n'), ((2336, 2348), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2344, 2348), True, 'import numpy as np\n'), ((218, 243), 'numpy.shape', 'np.shape', (['classifications'], {}), '(classifications)\n', (226, 243), True, 'import numpy as np\n'), ((247, 277), 'numpy.shape', 'np.shape', (['true_classifications'], {}), '(true_classifications)\n', (255, 277), True, 'import numpy as np\n'), ((336, 392), 'numpy.count_nonzero', 'np.count_nonzero', (['(classifications - true_classifications)'], {}), '(classifications - true_classifications)\n', (352, 392), True, 'import numpy as np\n'), ((395, 419), 'numpy.size', 'np.size', (['classifications'], {}), '(classifications)\n', (402, 419), True, 'import numpy as np\n'), ((1649, 1742), 'plotDecBoundaries.plotDecBoundaries', 'plotDecBoundaries', (['training_data[:, :-1]', 'training_data[:, -1]', 'classifier.feature_means'], {}), '(training_data[:, :-1], training_data[:, -1], classifier.\n feature_means)\n', (1666, 1742), False, 'from plotDecBoundaries import plotDecBoundaries\n'), ((1746, 1769), 'matplotlib.pyplot.savefig', 'plt.savefig', (['plot_fname'], {}), '(plot_fname)\n', (1757, 1769), True, 'import matplotlib.pyplot as plt\n'), ((1778, 1787), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1785, 1787), True, 'import matplotlib.pyplot as plt\n'), ((2581, 2597), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (2589, 2597), True, 'import numpy as np\n'), ((2914, 2962), 'numpy.append', 'np.append', (['error_rate_training_list', 'er_training'], {}), '(error_rate_training_list, er_training)\n', (2923, 2962), True, 'import numpy as np\n'), ((3006, 3046), 'numpy.append', 'np.append', (['error_rate_test_list', 'er_test'], {}), '(error_rate_test_list, er_test)\n', (3015, 3046), True, 'import numpy as np\n'), ((3071, 3093), 'numpy.size', 'np.size', (['features_list'], {}), '(features_list)\n', (3078, 3093), True, 'import numpy as np\n'), ((3141, 3161), 'numpy.array', 'np.array', (['[features]'], {}), '([features])\n', (3149, 3161), True, 'import numpy as np\n'), ((3396, 3460), 'numpy.std', 'np.std', (['[error_rate_training_list, error_rate_test_list]'], {'axis': '(1)'}), '([error_rate_training_list, error_rate_test_list], axis=1)\n', (3402, 3460), True, 'import numpy as np\n'), ((3546, 3578), 'numpy.min', 'np.min', (['error_rate_training_list'], {}), '(error_rate_training_list)\n', (3552, 3578), True, 'import numpy as np\n'), ((3715, 3747), 'numpy.max', 'np.max', (['error_rate_training_list'], {}), '(error_rate_training_list)\n', (3721, 3747), True, 'import numpy as np\n'), ((3880, 3908), 'numpy.min', 'np.min', (['error_rate_test_list'], {}), '(error_rate_test_list)\n', (3886, 3908), True, 'import numpy as np\n'), ((4037, 4065), 'numpy.max', 'np.max', (['error_rate_test_list'], {}), '(error_rate_test_list)\n', (4043, 4065), True, 'import numpy as np\n'), ((3259, 3279), 'numpy.array', 'np.array', (['[features]'], {}), '([features])\n', (3267, 3279), True, 'import numpy as np\n'), ((3594, 3629), 'numpy.argmin', 'np.argmin', (['error_rate_training_list'], {}), '(error_rate_training_list)\n', (3603, 3629), True, 'import numpy as np\n'), ((3763, 3798), 'numpy.argmax', 'np.argmax', (['error_rate_training_list'], {}), '(error_rate_training_list)\n', (3772, 3798), True, 'import numpy as np\n'), ((3924, 3955), 'numpy.argmin', 'np.argmin', (['error_rate_test_list'], {}), '(error_rate_test_list)\n', (3933, 3955), True, 'import numpy as np\n'), ((4081, 4112), 'numpy.argmax', 'np.argmax', (['error_rate_test_list'], {}), '(error_rate_test_list)\n', (4090, 4112), True, 'import numpy as np\n')] |
# %%
import numpy as np
import os,sys
import argparse
import numpy as np
from collections import defaultdict
from keras.models import Model, load_model, model_from_json
from datetime import datetime
import plotly.graph_objects as go
import base64
from dominate.util import raw
from dominate.tags import *
from dominate import document
np.random.seed(12345)
os.environ['CUDA_VISIBLE_DEVICES']="-1"
# %%
#binding model
main_dir = "../models"
def import_model(main_dir):
models = []
json_f = open(main_dir + "/model.json", 'r')
loaded_model_json = json_f.read()
json_f.close()
for i in range(5):
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights((main_dir + "/model_"+str(i)+".h5"))
models.append(loaded_model)
return models
def scoring(models, data):
import numpy as np
'''
Use an ensemble of models to yield prediction scores for the data
Args:
1. models: Model ensemble
2. data: Data to be predicted
Return values:
1. probas_: Prediction scores
'''
probas_ = [np.transpose(model.predict(data))[0] for model in models]
probas_ = [np.mean(scores) for scores in zip(*probas_)]
return probas_
# %%
def read_fasta(fasta_file):
try:
fp = open(fasta_file)
except IOError:
exit()
else:
fp = open(fasta_file)
lines = fp.readlines()
fasta_dict = {}
gene_id = ""
for line in lines:
if line[0] == '>':
if gene_id != "":
fasta_dict[gene_id] = seq
seq = ""
gene_id = line.strip() # line.split('|')[1] all in > need to be id
else:
seq += line.strip()
fasta_dict[gene_id] = seq #last seq need to be record
return fasta_dict
# %%
def sample_fasta_peptides(sequences, peptide_lengths):
sample_peptides = {}
data_dict = defaultdict(defaultdict)
for (i, (name, sequence)) in enumerate(sequences.items()):
if not isinstance(sequence, str):
raise ValueError("Expected string, not %s (%s)" % (
sequence, type(sequence)))
for peptide_start in range(len(sequence) - min(peptide_lengths) + 1):
for peptide_length in peptide_lengths:
peptide = sequence[peptide_start: peptide_start + peptide_length]
start_stop = '_'.join([str(peptide_start + 1), str(peptide_start + peptide_length)])
if len(peptide) != peptide_length:
continue
if name not in sample_peptides.keys() :
sample_peptides[name] = [peptide]
else:
sample_peptides[name].append(peptide)
if name not in data_dict.keys() or peptide not in data_dict[name]:
data_dict[name][peptide] = [start_stop]
else:
data_dict[name][peptide].append(start_stop)
return sample_peptides, data_dict
# %%
def convertSampleToProbMatr(sampleSeq3DArr): #changed add one column for '1'
"""
Convertd the raw data to probability matrix
PARAMETER
---------
sampleSeq3DArr: 3D numpy array
X denoted the unknow amino acid.
probMatr: Probability Matrix for Samples. Shape (nb_samples, 1, nb_length_of_sequence, nb_AA)
"""
letterDict = {}
letterDict["A"] = 0
letterDict["C"] = 1
letterDict["D"] = 2
letterDict["E"] = 3
letterDict["F"] = 4
letterDict["G"] = 5
letterDict["H"] = 6
letterDict["I"] = 7
letterDict["K"] = 8
letterDict["L"] = 9
letterDict["M"] = 10
letterDict["N"] = 11
letterDict["P"] = 12
letterDict["Q"] = 13
letterDict["R"] = 14
letterDict["S"] = 15
letterDict["T"] = 16
letterDict["V"] = 17
letterDict["W"] = 18
letterDict["Y"] = 19
letterDict["-"] =20
AACategoryLen = 21
probMatr = np.zeros((len(sampleSeq3DArr), len(sampleSeq3DArr[0]), AACategoryLen))
sampleNo = 0
for sequence in sampleSeq3DArr:
AANo = 0
for AA in sequence:
if not AA in letterDict:
probMatr[sampleNo][AANo] = np.full((1,AACategoryLen), 1.0/AACategoryLen)
else:
index = letterDict[AA]
probMatr[sampleNo][AANo][index] = 1
AANo += 1
sampleNo += 1
del sampleSeq3DArr
return probMatr
def convertSampleToProbMatr_2(sample_peptides):
seq_matr_Dict = {}
for seq in sample_peptides.keys():
allele_data = sample_peptides[seq]
all_seqs = []
for ind, seq1 in enumerate(allele_data):
my_len = len(seq1)
if my_len < 24:
add = 24 - my_len
seq2 = seq1 + '-' * add
all_seqs.append(seq2)
seq_matr = convertSampleToProbMatr(all_seqs)
if seq not in seq_matr_Dict.keys():
seq_matr_Dict[seq] = seq_matr
return seq_matr_Dict
# %%
def preiction(fastafile, output_folder, peptide_lengths):
sequences = read_fasta(fastafile)
sample_peptides, sample_peptides_position = sample_fasta_peptides(sequences, peptide_lengths)
sample_peptides_matr = convertSampleToProbMatr_2(sample_peptides)
model_ligand = import_model(main_dir)
all_epi_scores = []
for seq in sample_peptides_matr.keys():
allele_data = sample_peptides_matr[seq]
allele_data = np.array(allele_data)
epi_scores = scoring(model_ligand, allele_data)
all_peptide = sample_peptides[seq]
for i in range(len(all_peptide)):
pos = sample_peptides_position[seq][all_peptide[i]]
all_epi_scores.append([seq,
all_peptide[i],
';'.join(pos).replace('_', ':'),
str(epi_scores[i])])
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
with open(output_folder + '/NetBCE_predictions.tsv', 'w') as f:
for line in all_epi_scores:
f.write('\t'.join(line) + '\n')
return all_epi_scores
# %%
def wrap_plotly_fig(fig: go.Figure, width: str = '100%', height: str = '100%'):
if 'px' in width:
fig = fig.to_html(include_plotlyjs=False, full_html=False, default_height=height, default_width=width)
return div(raw(fig), style=f'width: {width}')
else:
fig = fig.to_html(include_plotlyjs=False, full_html=False, default_height=height, default_width='100%')
return div(raw(fig), style=f'width: {width}')
def ploty_fig_to_image(fig: go.Figure, width: int = 360, height: int = 360):
fig_data = fig.to_image(format='svg', width=width, height=height).decode()
return img(src=f'data:image/svg+xml;base64,{fig_data}',
className='img-fluid',
style=f'width: 100%; height: auto')
def get_plotlyjs():
fig = go.Figure()
fig = fig.to_html(include_plotlyjs=True, full_html=False)
plotlyjs = fig[fig.index("<script"):fig.rindex("<div id=")] + "</div></script>"
return raw(plotlyjs)
def lab_logo():
lab_logo = base64.b64encode(
open(str('../logo/logo.png'), 'rb').read()).decode()
return img(src=f'data:image/jpg;base64,{lab_logo}', className='img-fluid',
style="max-width:100%; max-height:100%; margin-left: 10px;"
"margin-bottom: 8px") # can add opacity: 50% to style if desired
def prediction_table(all_epi_scores, className=None):
t = table(className=f'display nowrap',
style="text-align: center",
id='table_id_example')
t.add(
thead(
tr(
[
th(f'Protein identifier', style="padding: 5px"),
th(f'Candidate epitope', style="padding: 5px"),
th(f'Position (start:end)', style="padding: 5px"),
th('Predicion score', style="padding: 5px"),
]
)
)
)
tablebody = tbody()
for sample in all_epi_scores:
tablerow = tr()
tablerow.add(td(sample[0], style='word-break: break-word'))
tablerow.add(td(sample[1]))
tablerow.add(td(sample[2]))
tablerow.add(td(sample[3]))
tablebody.add(tablerow)
t.add(tablebody)
return div(t, className=f'table-responsive {className}' if className else 'table-responsive')
def gen_prediction_histogram(all_epi_scores, className=None):
n_peps_fig = go.Figure()
all_length = [len(sample[1]) for sample in all_epi_scores]
binders, counts = np.unique(all_length, return_counts=True)
n_peps_fig.add_trace(go.Bar(x=binders, y=counts, marker=dict(color = binders,
colorscale='Spectral')))
n_peps_fig.update_layout(margin=dict(l=20, r=20, t=20, b=20),
hovermode='x',
legend=dict(orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
bgcolor="rgba(255, 255, 255, 0.8)"),
font_color='#212529'
)
n_peps_fig.layout.title.xanchor = 'center'
n_peps_fig.update_yaxes(title_text='Number of peptides')
n_peps_fig.update_xaxes(title_text='Peptides length')
n_peps_fig.update_xaxes(titlefont={'size': 16}, tickfont={'size': 14})
n_peps_fig.update_yaxes(titlefont={'size': 16}, tickfont={'size': 14})
# n_peps_fig.write_image(str(fig_dir / 'binding_histogram.pdf'), engine="kaleido")
card = div(div(b('Epitope length distribution'), className='card-header'), className='card')
card.add(div(raw(n_peps_fig.to_html(full_html=False, include_plotlyjs=False)), className='card-body'))
return div(card, className=className)
def gen_prediction_boxplot(all_epi_scores, className=None):
n_peps_fig = go.Figure()
length_score_dict = {}
for i, prediction in enumerate(all_epi_scores):
pep_len = len(prediction[1])
pep_score = float(prediction[-1])
if pep_len not in length_score_dict.keys():
length_score_dict[pep_len] = [pep_score]
else:
length_score_dict[pep_len].append(pep_score)
binders = list(length_score_dict.keys())
counts = list(length_score_dict.values())
N = len(binders)
colors = ['hsl('+str(h)+',50%'+',50%)' for h in np.linspace(0, 360, N)]
for xd, yd, cls in zip(binders, counts, colors):
n_peps_fig.add_trace(go.Box(
y=yd,
name=xd,
boxpoints='all',
jitter=0.5,
whiskerwidth=0.2,
fillcolor=cls,
marker_size=2,
line_width=1)
)
n_peps_fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
hovermode='x',
legend=dict(orientation="h",
yanchor="bottom",
y=1.02,
xanchor="right",
x=1,
bgcolor="rgba(255, 255, 255, 0.8)"),
font_color='#212529')
n_peps_fig.layout.title.xanchor = 'center'
n_peps_fig.update_yaxes(title_text='Score')
n_peps_fig.update_xaxes(title_text='Peptides length')
n_peps_fig.update_xaxes(titlefont={'size': 16}, tickfont={'size': 14})
n_peps_fig.update_yaxes(titlefont={'size': 16}, tickfont={'size': 14})
# n_peps_fig.write_image(str(fig_dir / 'binding_histogram.pdf'), engine="kaleido")
card = div(div(b('Score distribution'), className='card-header'), className='card')
card.add(div(raw(n_peps_fig.to_html(full_html=False, include_plotlyjs=False)), className='card-body'))
return div(card, className=className)
# %%
def generate_html_report(all_epi_scores, peptide_lengths, output_folder):
doc = document(title='NetBCE Report')
with doc.head:
link(rel="stylesheet", href="https://stackpath.bootstrapcdn.com/bootstrap/4.5.0/css/bootstrap.min.css",
integrity="<KEY>",
crossorigin="anonymous")
link(rel="stylesheet", href="https://cdn.datatables.net/1.11.4/css/jquery.dataTables.min.css")
link(rel="stylesheet", href="https://cdn.datatables.net/buttons/2.2.2/css/buttons.dataTables.min.css")
script(src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js")
script(src="https://maxcdn.bootstrapcdn.com/bootstrap/4.5.0/js/bootstrap.min.js")
script(src="https://cdn.datatables.net/1.11.4/js/jquery.dataTables.min.js")
script(src="https://cdn.datatables.net/buttons/2.2.2/js/dataTables.buttons.min.js")
script(src="https://cdnjs.cloudflare.com/ajax/libs/jszip/3.1.3/jszip.min.js")
script(src="https://cdnjs.cloudflare.com/ajax/libs/pdfmake/0.1.53/pdfmake.min.js")
script(src="https://cdnjs.cloudflare.com/ajax/libs/pdfmake/0.1.53/vfs_fonts.js")
script(src="https://cdn.datatables.net/buttons/2.2.2/js/buttons.html5.min.js")
script(src="https://cdn.datatables.net/buttons/2.2.2/js/buttons.print.min.js")
body(onload="plots = document.getElementsByClassName('plotly-graph-div');"
"l = plots.length;"
"for (i=0; i < l; i++) {Plotly.relayout(plots[i], {autosize: true});}")
script("$(document).ready(function(){$('.nav-tabs a').click(function(){$(this).tab('show');});"
"$('.nav-tabs a').on('shown.bs.tab',"
"function(){"
"plots = document.getElementsByClassName('plotly-graph-div');"
"l = plots.length;"
"for (i=0; i < l; i++) {Plotly.update(plots[i]);}"
"});});")
# script("$(document).ready(function(){$('#table_id_example').DataTable();});")
script("$(document).ready(function(){$('#table_id_example').DataTable({dom: 'Bfrtip',buttons: ['copy', 'csv', 'excel', 'pdf', 'print'], aaSorting: [[3, 'desc']]});});")
with doc:
get_plotlyjs()
with div(id='layout', className='container', style='max-width: 1600px;'
'min-width: 1000px;'
'margin-top: 20px;'
'margin-bottom: 20px'):
with div(className='row'):
with div(className='col-12', style='display: flex; height: 60px'):
div([h1('N'), h3('et'), h1('BCE'),
h5(f' (v1)', style="white-space: pre"),
h1(' - Analysis report')],
style="background-color: #0c0c0c; padding: 5px; color: white;"
"border-radius: 6px; width: 100%; display: flex"),
lab_logo()
hr()
with div(className='row'):
with div(className='col-10', style='margin: 0'):
h3('NetBCE enables accurate prediction of linear B-cell epitopes with interpretable deep neural network')
p('The identification of B-cell epitopes is of great value for the development of specific serodiagnostic assays and the optimization of medical therapy.Here, we present NetBCE, a python tool which uses a deep neural network to detect linear B-cell epitope regions on individual protein sequences. NetBCE exceeds all other currently used linear B-cell epitope prediction tools. Our software is shown to reliably predict linear B-cell epitopes of a given protein sequence, thus contributing to a significant reduction of laboratory experiments and costs required for the conventional approach.', style="font-size: 20px; padding: 5px;")
p([b('Developers: '), f'<NAME>, <NAME> @ CPH, UTHealth-Houston SBMI.'])
p([b('Lab website: '), a(f'https://www.uth.edu/bioinfo/.', href='https://www.uth.edu/bioinfo/')])
p([b('Date: '), f'{str(datetime.now().date())}'])
hr()
h3('Predicion results:')
hr()
with div(className='row', style="max-height:600px; overflow-y: scroll;"):
with div(className='col-10', style='margin: 10px'):
prediction_table(all_epi_scores, className='col')
hr()
h3("Predicion statistics:")
hr()
with div(className='row'):
if len(peptide_lengths) <= 12:
gen_prediction_histogram(all_epi_scores, className='col-6')
gen_prediction_boxplot(all_epi_scores, className='col-6')
else:
gen_prediction_histogram(all_epi_scores, className='col-10')
gen_prediction_boxplot(all_epi_scores, className='col-10')
hr()
loc = f'{str("%s/report.html" % output_folder)}'
with open(loc, 'w') as f:
f.write(doc.render().replace("<", "<"))
# %%
def main():
parser = argparse.ArgumentParser(description="progrom usage")
parser.add_argument("-f", "--fasta", type=str, help="The input of antigen proteins")
parser.add_argument("-l", "--length", type=int, default=[16,15,12,20], nargs='*', help="The length range of peptides")
parser.add_argument("-o", "--out", type=str, help="prediction output path")
args = parser.parse_args()
fastafile = args.fasta
peptide_lengths = args.length
output_folder = args.out
print('Job start')
all_epi_scores = preiction(fastafile, output_folder, peptide_lengths)
generate_html_report(all_epi_scores, peptide_lengths, output_folder)
print('Job finish')
if __name__ == '__main__':
main()
# %%
# python /collab2/hxu6/B_cell_epitope/python/NetBCE_prediction.py -f '/collab2/hxu6/B_cell_epitope/data/seq/test.fasta' -l 16 15 14 20 21 -o '/collab2/hxu6/B_cell_epitope/prediction'
| [
"numpy.mean",
"numpy.unique",
"dominate.document",
"argparse.ArgumentParser",
"os.makedirs",
"plotly.graph_objects.Box",
"keras.models.model_from_json",
"plotly.graph_objects.Figure",
"numpy.array",
"os.path.isdir",
"collections.defaultdict",
"numpy.random.seed",
"numpy.linspace",
"dominat... | [((335, 356), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (349, 356), True, 'import numpy as np\n'), ((1960, 1984), 'collections.defaultdict', 'defaultdict', (['defaultdict'], {}), '(defaultdict)\n', (1971, 1984), False, 'from collections import defaultdict\n'), ((6973, 6984), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (6982, 6984), True, 'import plotly.graph_objects as go\n'), ((7142, 7155), 'dominate.util.raw', 'raw', (['plotlyjs'], {}), '(plotlyjs)\n', (7145, 7155), False, 'from dominate.util import raw\n'), ((8565, 8576), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (8574, 8576), True, 'import plotly.graph_objects as go\n'), ((8662, 8703), 'numpy.unique', 'np.unique', (['all_length'], {'return_counts': '(True)'}), '(all_length, return_counts=True)\n', (8671, 8703), True, 'import numpy as np\n'), ((10127, 10138), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (10136, 10138), True, 'import plotly.graph_objects as go\n'), ((12183, 12214), 'dominate.document', 'document', ([], {'title': '"""NetBCE Report"""'}), "(title='NetBCE Report')\n", (12191, 12214), False, 'from dominate import document\n'), ((17347, 17399), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""progrom usage"""'}), "(description='progrom usage')\n", (17370, 17399), False, 'import argparse\n'), ((637, 671), 'keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (652, 671), False, 'from keras.models import Model, load_model, model_from_json\n'), ((1163, 1178), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1170, 1178), True, 'import numpy as np\n'), ((5469, 5490), 'numpy.array', 'np.array', (['allele_data'], {}), '(allele_data)\n', (5477, 5490), True, 'import numpy as np\n'), ((5914, 5942), 'os.path.isdir', 'os.path.isdir', (['output_folder'], {}), '(output_folder)\n', (5927, 5942), False, 'import os, sys\n'), ((5952, 5978), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (5963, 5978), False, 'import os, sys\n'), ((6425, 6433), 'dominate.util.raw', 'raw', (['fig'], {}), '(fig)\n', (6428, 6433), False, 'from dominate.util import raw\n'), ((6601, 6609), 'dominate.util.raw', 'raw', (['fig'], {}), '(fig)\n', (6604, 6609), False, 'from dominate.util import raw\n'), ((10639, 10661), 'numpy.linspace', 'np.linspace', (['(0)', '(360)', 'N'], {}), '(0, 360, N)\n', (10650, 10661), True, 'import numpy as np\n'), ((10750, 10866), 'plotly.graph_objects.Box', 'go.Box', ([], {'y': 'yd', 'name': 'xd', 'boxpoints': '"""all"""', 'jitter': '(0.5)', 'whiskerwidth': '(0.2)', 'fillcolor': 'cls', 'marker_size': '(2)', 'line_width': '(1)'}), "(y=yd, name=xd, boxpoints='all', jitter=0.5, whiskerwidth=0.2,\n fillcolor=cls, marker_size=2, line_width=1)\n", (10756, 10866), True, 'import plotly.graph_objects as go\n'), ((4220, 4268), 'numpy.full', 'np.full', (['(1, AACategoryLen)', '(1.0 / AACategoryLen)'], {}), '((1, AACategoryLen), 1.0 / AACategoryLen)\n', (4227, 4268), True, 'import numpy as np\n'), ((16333, 16347), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16345, 16347), False, 'from datetime import datetime\n')] |
# @Date: 2019-05-13
# @Email: <EMAIL> <NAME>
# @Last modified time: 2020-10-07
import sys
#sys.path.insert(0, '/work/qiu/data4Keran/code/modelPredict')
sys.path.insert(0, '/home/xx02tmp/code3/modelPredict')
from img2mapC05 import img2mapC
import numpy as np
import time
sys.path.insert(0, '/home/xx02tmp/code3/dataPrepare')
import basic4dataPre
import h5py
import os
import glob2
import scipy.io as sio
from scipy import stats
import scipy.ndimage
import numpy.matlib
from numpy import argmax
from keras.utils import to_categorical
import skimage.measure
#image folder
imgFile_s2='/home/xx02tmp/image/to run49/'
#gt file folder
foldRef_LCZ=imgFile_s2
#class number
num_lcz=3
#stride to cut patches
step=24
patch_shape = (48, 48, 6)
#new line
img_shape = (48, 48)
#save folder
foldS='/home/xx02tmp/patch/patch50_11_02_48/'
params = {'dim_x': patch_shape[0],
'dim_y': patch_shape[1],
'dim_z': patch_shape[2],
'step': step,
'Bands': [0,1,2,3,4,5],
'scale':1.0,
'ratio':1,
'isSeg':0,
'nanValu':0,
'dim_x_img': img_shape[0],#the actuall extracted image patch
'dim_y_img': img_shape[1]}
#name of images
cities = ['summerrs2014_segA150sd']
#names of gt files
cities_ = ['class14_segA5530vp02n1_tra']
citiesval = ['summerrs2014_segA150sd']
cities_val = ['class14_segA5530vp02n1_val']
#tra and vali patch numbers of each images
patchNum = np.zeros((2,len(cities)), dtype= np.int64) ;
#class number of each class
classNum = np.zeros((len(cities),3), dtype= np.int64) ; #change here
if not os.path.exists(foldS+'vali/'):
os.makedirs(foldS+'vali/')
if not os.path.exists(foldS+'trai/'):
os.makedirs(foldS+'trai/')
###########training patch#################
for idCity in np.arange(len(cities)):
params['Bands'] = [0]
params['scale'] = 1
img2mapCLass=img2mapC(**params);
###lcz to patches
#load file
prj0, trans0, ref0= img2mapCLass.loadImgMat(foldRef_LCZ+cities_[idCity]+'.tif')
print('ref0 size', ref0.shape)
ref = np.int8(ref0)
#print('lcz file size', ref.shape, trans0, ref.dtype)
# to patches
patchLCZ, R, C = img2mapCLass.label2patches_all(ref, 1)
print('lcz patches, beginning', patchLCZ.shape, patchLCZ.dtype)
#load img
file =imgFile_s2 + cities[idCity] + '.tif'
params['Bands'] = [0,1,2,3,4,5]
params['scale'] = 1.0#!!!!!!!!!!!!!!!!!!!
img2mapCLass=img2mapC(**params);
prj0, trans0, img_= img2mapCLass.loadImgMat(file)
print('img size', img_.shape)
#image to patches
patch_summer, R, C, idxNan = img2mapCLass.Bands2patches(img_, 1)
print('image patches', patch_summer.shape, patch_summer.dtype)
#try not delete idxNan (by Karen)
print('lcz patches, before delete idxNan', patchLCZ.shape, patchLCZ.dtype)
patchLCZ = np.delete(patchLCZ, idxNan, axis=0)
print('lcz patches, after delete idxNan', patchLCZ.shape, patchLCZ.dtype)
############manupulate the patches############
#delete patches without lcz
#change here, try 0.5
c3Idx=basic4dataPre.patch2labelInx_lt(patchLCZ, 0, patchLCZ.shape[1], patchLCZ.shape[2]*patchLCZ.shape[1]*0.044*1)
patchLCZ = np.delete(patchLCZ, c3Idx, axis=0)
print('lcz patches, after delete noLCZ', patchLCZ.shape, patchLCZ.dtype)
patch_summer = np.delete(patch_summer, c3Idx, axis=0)
print('image patches, after delete noLCZ', patch_summer.shape, patch_summer.dtype)
#print('delete no lcz patch: ', patchHSE.shape, patch_summer.shape, patchLCZ.shape)
#NOT downsample to have a 90m gt
#keep original 90m because of the new inputs of label has resoluiton at 90m
#patchLCZ=skimage.measure.block_reduce(patchLCZ, (1,3,3,1), np.mean)
patchLCZ=skimage.measure.block_reduce(patchLCZ, (1,1,1,1), np.mean)
print('downsampled patchHSE:', patchLCZ.shape)
###statistic of class number
tmp=patchLCZ.reshape((-1,1))
for c in np.arange(1,4): #change here class=1, 2, 3,4
idx_=np.where(tmp==c)
idx = idx_[0]
classNum[idCity, c-1]=idx.shape[0]
#reset the labels
patchLCZ=patchLCZ-1; #0123, -1012
#print('print(np.unique(patchHSE))',np.unique(patchLCZ))
patchLCZ[patchLCZ==-1 ] = 3 #change here the low density class (0123)
#patchLCZ=basic4dataPre.patchIndex2oneHot(patchLCZ, num_lcz)
#print('final LCZ:', patchLCZ.shape, np.unique(patchLCZ))
print('print(np.unique(patchLCZ))',np.unique(patchLCZ))
print('shape', patchLCZ.shape, patch_summer.shape)
patchNum_tra =basic4dataPre.savePatch_fold_single(patch_summer, patchLCZ, foldS+'trai/', cities[idCity])
patchNum[0,idCity]=patchNum_tra
#patchNum[1,idCity]=patchNum_val
print(patchNum, classNum)
##############validation patch##############
print('start validation patch')
for idCity in np.arange(len(citiesval)):
params['Bands'] = [0]
params['scale'] = 1
img2mapCLass=img2mapC(**params);
###lcz to patches
#load file
prj0, trans0, ref0= img2mapCLass.loadImgMat(foldRef_LCZ+cities_val[idCity]+'.tif')
print('ref0 size', ref0.shape)
ref = np.int8(ref0)
#print('lcz file size', ref.shape, trans0, ref.dtype)
# to patches
patchLCZ, R, C = img2mapCLass.label2patches_all(ref, 1)
print('lcz patches, beginning', patchLCZ.shape, patchLCZ.dtype)
#load img
file =imgFile_s2 + citiesval[idCity] + '.tif'
params['Bands'] = [0,1,2,3,4,5]
params['scale'] = 1.0#!!!!!!!!!!!!!!!!!!!
img2mapCLass=img2mapC(**params);
prj0, trans0, img_= img2mapCLass.loadImgMat(file)
print('img size', img_.shape)
#image to patches
patch_summer, R, C, idxNan = img2mapCLass.Bands2patches(img_, 1)
print('image patches', patch_summer.shape, patch_summer.dtype)
#try not delete idxNan (by Karen)
print('lcz patches, before delete idxNan', patchLCZ.shape, patchLCZ.dtype)
patchLCZ = np.delete(patchLCZ, idxNan, axis=0)
print('lcz patches, after delete idxNan', patchLCZ.shape, patchLCZ.dtype)
############manupulate the patches############
#delete patches without lcz
#change here
c3Idx=basic4dataPre.patch2labelInx_lt(patchLCZ, 0, patchLCZ.shape[1], patchLCZ.shape[2]*patchLCZ.shape[1]*0.044*1)
patchLCZ = np.delete(patchLCZ, c3Idx, axis=0)
print('lcz patches, after delete noLCZ', patchLCZ.shape, patchLCZ.dtype)
patch_summer = np.delete(patch_summer, c3Idx, axis=0)
print('image patches, after delete noLCZ', patch_summer.shape, patch_summer.dtype)
#print('delete no lcz patch: ', patchHSE.shape, patch_summer.shape, patchLCZ.shape)
#NOT downsample to have a 90m gt
#keep original 90m because of the new inputs of label has resoluiton at 90m
#patchLCZ=skimage.measure.block_reduce(patchLCZ, (1,3,3,1), np.mean)
patchLCZ=skimage.measure.block_reduce(patchLCZ, (1,1,1,1), np.mean)
print('downsampled patchHSE:', patchLCZ.shape)
###statistic of class number
tmp=patchLCZ.reshape((-1,1))
for c in np.arange(1,4): #change here
idx_=np.where(tmp==c)
idx = idx_[0]
#classNum[idCity, c-1]=idx.shape[0]
#reset the labels
patchLCZ=patchLCZ-1;
#print('print(np.unique(patchHSE))',np.unique(patchLCZ))
patchLCZ[patchLCZ==-1 ] = 3 #change here
#patchLCZ=basic4dataPre.patchIndex2oneHot(patchLCZ, num_lcz)
#print('final LCZ:', patchLCZ.shape, np.unique(patchLCZ))
print('print(np.unique(patchLCZ))',np.unique(patchLCZ))
print('shape', patchLCZ.shape, patch_summer.shape)
patchNum_val =basic4dataPre.savePatch_fold_singlev(patch_summer, patchLCZ, foldS+'vali/', cities[idCity])
#patchNum[0,idCity]=patchNum_tra
patchNum[1,idCity]=patchNum_val
print(patchNum, classNum)
sio.savemat((foldS +'patchNum.mat'), {'patchNum': patchNum, 'classNum':classNum})
| [
"os.path.exists",
"numpy.int8",
"sys.path.insert",
"scipy.io.savemat",
"numpy.unique",
"os.makedirs",
"numpy.where",
"numpy.delete",
"basic4dataPre.savePatch_fold_singlev",
"img2mapC05.img2mapC",
"basic4dataPre.savePatch_fold_single",
"basic4dataPre.patch2labelInx_lt",
"numpy.arange"
] | [((162, 216), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/xx02tmp/code3/modelPredict"""'], {}), "(0, '/home/xx02tmp/code3/modelPredict')\n", (177, 216), False, 'import sys\n'), ((286, 339), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/xx02tmp/code3/dataPrepare"""'], {}), "(0, '/home/xx02tmp/code3/dataPrepare')\n", (301, 339), False, 'import sys\n'), ((7549, 7634), 'scipy.io.savemat', 'sio.savemat', (["(foldS + 'patchNum.mat')", "{'patchNum': patchNum, 'classNum': classNum}"], {}), "(foldS + 'patchNum.mat', {'patchNum': patchNum, 'classNum':\n classNum})\n", (7560, 7634), True, 'import scipy.io as sio\n'), ((1612, 1643), 'os.path.exists', 'os.path.exists', (["(foldS + 'vali/')"], {}), "(foldS + 'vali/')\n", (1626, 1643), False, 'import os\n'), ((1648, 1676), 'os.makedirs', 'os.makedirs', (["(foldS + 'vali/')"], {}), "(foldS + 'vali/')\n", (1659, 1676), False, 'import os\n'), ((1683, 1714), 'os.path.exists', 'os.path.exists', (["(foldS + 'trai/')"], {}), "(foldS + 'trai/')\n", (1697, 1714), False, 'import os\n'), ((1719, 1747), 'os.makedirs', 'os.makedirs', (["(foldS + 'trai/')"], {}), "(foldS + 'trai/')\n", (1730, 1747), False, 'import os\n'), ((1894, 1912), 'img2mapC05.img2mapC', 'img2mapC', ([], {}), '(**params)\n', (1902, 1912), False, 'from img2mapC05 import img2mapC\n'), ((2073, 2086), 'numpy.int8', 'np.int8', (['ref0'], {}), '(ref0)\n', (2080, 2086), True, 'import numpy as np\n'), ((2434, 2452), 'img2mapC05.img2mapC', 'img2mapC', ([], {}), '(**params)\n', (2442, 2452), False, 'from img2mapC05 import img2mapC\n'), ((2825, 2860), 'numpy.delete', 'np.delete', (['patchLCZ', 'idxNan'], {'axis': '(0)'}), '(patchLCZ, idxNan, axis=0)\n', (2834, 2860), True, 'import numpy as np\n'), ((3050, 3169), 'basic4dataPre.patch2labelInx_lt', 'basic4dataPre.patch2labelInx_lt', (['patchLCZ', '(0)', 'patchLCZ.shape[1]', '(patchLCZ.shape[2] * patchLCZ.shape[1] * 0.044 * 1)'], {}), '(patchLCZ, 0, patchLCZ.shape[1], patchLCZ.\n shape[2] * patchLCZ.shape[1] * 0.044 * 1)\n', (3081, 3169), False, 'import basic4dataPre\n'), ((3174, 3208), 'numpy.delete', 'np.delete', (['patchLCZ', 'c3Idx'], {'axis': '(0)'}), '(patchLCZ, c3Idx, axis=0)\n', (3183, 3208), True, 'import numpy as np\n'), ((3301, 3339), 'numpy.delete', 'np.delete', (['patch_summer', 'c3Idx'], {'axis': '(0)'}), '(patch_summer, c3Idx, axis=0)\n', (3310, 3339), True, 'import numpy as np\n'), ((3898, 3913), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (3907, 3913), True, 'import numpy as np\n'), ((4464, 4560), 'basic4dataPre.savePatch_fold_single', 'basic4dataPre.savePatch_fold_single', (['patch_summer', 'patchLCZ', "(foldS + 'trai/')", 'cities[idCity]'], {}), "(patch_summer, patchLCZ, foldS + 'trai/',\n cities[idCity])\n", (4499, 4560), False, 'import basic4dataPre\n'), ((4844, 4862), 'img2mapC05.img2mapC', 'img2mapC', ([], {}), '(**params)\n', (4852, 4862), False, 'from img2mapC05 import img2mapC\n'), ((5026, 5039), 'numpy.int8', 'np.int8', (['ref0'], {}), '(ref0)\n', (5033, 5039), True, 'import numpy as np\n'), ((5390, 5408), 'img2mapC05.img2mapC', 'img2mapC', ([], {}), '(**params)\n', (5398, 5408), False, 'from img2mapC05 import img2mapC\n'), ((5781, 5816), 'numpy.delete', 'np.delete', (['patchLCZ', 'idxNan'], {'axis': '(0)'}), '(patchLCZ, idxNan, axis=0)\n', (5790, 5816), True, 'import numpy as np\n'), ((5997, 6116), 'basic4dataPre.patch2labelInx_lt', 'basic4dataPre.patch2labelInx_lt', (['patchLCZ', '(0)', 'patchLCZ.shape[1]', '(patchLCZ.shape[2] * patchLCZ.shape[1] * 0.044 * 1)'], {}), '(patchLCZ, 0, patchLCZ.shape[1], patchLCZ.\n shape[2] * patchLCZ.shape[1] * 0.044 * 1)\n', (6028, 6116), False, 'import basic4dataPre\n'), ((6120, 6154), 'numpy.delete', 'np.delete', (['patchLCZ', 'c3Idx'], {'axis': '(0)'}), '(patchLCZ, c3Idx, axis=0)\n', (6129, 6154), True, 'import numpy as np\n'), ((6247, 6285), 'numpy.delete', 'np.delete', (['patch_summer', 'c3Idx'], {'axis': '(0)'}), '(patch_summer, c3Idx, axis=0)\n', (6256, 6285), True, 'import numpy as np\n'), ((6844, 6859), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {}), '(1, 4)\n', (6853, 6859), True, 'import numpy as np\n'), ((7353, 7450), 'basic4dataPre.savePatch_fold_singlev', 'basic4dataPre.savePatch_fold_singlev', (['patch_summer', 'patchLCZ', "(foldS + 'vali/')", 'cities[idCity]'], {}), "(patch_summer, patchLCZ, foldS +\n 'vali/', cities[idCity])\n", (7389, 7450), False, 'import basic4dataPre\n'), ((3951, 3969), 'numpy.where', 'np.where', (['(tmp == c)'], {}), '(tmp == c)\n', (3959, 3969), True, 'import numpy as np\n'), ((4372, 4391), 'numpy.unique', 'np.unique', (['patchLCZ'], {}), '(patchLCZ)\n', (4381, 4391), True, 'import numpy as np\n'), ((6881, 6899), 'numpy.where', 'np.where', (['(tmp == c)'], {}), '(tmp == c)\n', (6889, 6899), True, 'import numpy as np\n'), ((7261, 7280), 'numpy.unique', 'np.unique', (['patchLCZ'], {}), '(patchLCZ)\n', (7270, 7280), True, 'import numpy as np\n')] |
import numpy as np
from numba import jit
# Tridiag solver from Carnahan
# Not used in main program
def TDMAsolver_carnahan(A, B, C, D):
"""
Our solution for the TDMA solver based on carnahan (not used in main program)
"""
# send the vectors a, b, c, d with the coefficents
vector_len = D.shape[0] # defines the length of the coefficent vector (including a = 0)
V = np.zeros(vector_len) # solution vector
beta = np.zeros(vector_len) # temp vector
gamma = np.zeros(vector_len) # temp vector
beta[0] = B[0]
gamma[0] = D[0] / beta[0]
for i in range(1, vector_len):
beta[i] = B[i] - (A[i] * C[i - 1]) / beta[i - 1]
gamma[i] = (D[i] - A[i] * gamma[i - 1]) / beta[i]
# compute the final solution vector sv
V[vector_len - 1] = gamma[vector_len - 1]
for k in np.flip(np.arange(vector_len - 1)):
# k = vector_len - i
V[k] = gamma[k] - C[k] * V[k + 1] / beta[k]
return V
## Tri Diagonal Matrix Algorithm(a.k.a Thomas algorithm) solver
# https://gist.github.com/cbellei/8ab3ab8551b8dfc8b081c518ccd9ada9
# Not used in main program (slow)
def TDMAsolver_no_vec(a, b, c, d):
"""
TDMA solver, a b c d can be NumPy array type or Python list type.
refer to http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
and to http://www.cfd-online.com/Wiki/Tridiagonal_matrix_algorithm_-_TDMA_(Thomas_algorithm)
"""
# a = coeffs[1:, 0]
# b = coeffs[:, 1]
# c = coeffs[:-1, 2]
# d = coeffs[:, 3]
nf = len(d) # number of equations
ac, bc, cc, dc = map(np.array, (a, b, c, d)) # copy arrays
for it in range(1, nf):
mc = ac[it - 1] / bc[it - 1]
bc[it] = bc[it] - mc * cc[it - 1]
dc[it] = dc[it] - mc * dc[it - 1]
xc = bc
xc[-1] = dc[-1] / bc[-1]
for il in range(nf - 2, 1, -1):
xc[il] = (dc[il] - cc[il] * xc[il + 1]) / bc[il]
return xc
# Compiled TDMA solver
# https://stackoverflow.com/questions/8733015/tridiagonal-matrix-algorithm- \
# tdma-aka-thomas-algorithm-using-python-with-nump
# Used in main program
@jit
def TDMAsolver(a, b, c, d):
# Set up diagonal coefficients
n = len(d)
w = np.zeros(n - 1)
g = np.zeros(n)
p = np.zeros(n)
w[0] = c[0] / b[0]
g[0] = d[0] / b[0]
for i in range(1, n - 1):
w[i] = c[i] / (b[i] - a[i - 1] * w[i - 1])
for i in range(1, n):
g[i] = (d[i] - a[i - 1] * g[i - 1]) / (b[i] - a[i - 1] * w[i - 1])
p[n - 1] = g[n - 1]
for i in range(n - 1, 0, -1):
p[i - 1] = g[i - 1] - w[i - 1] * p[i]
return p
| [
"numpy.zeros",
"numpy.arange"
] | [((392, 412), 'numpy.zeros', 'np.zeros', (['vector_len'], {}), '(vector_len)\n', (400, 412), True, 'import numpy as np\n'), ((443, 463), 'numpy.zeros', 'np.zeros', (['vector_len'], {}), '(vector_len)\n', (451, 463), True, 'import numpy as np\n'), ((491, 511), 'numpy.zeros', 'np.zeros', (['vector_len'], {}), '(vector_len)\n', (499, 511), True, 'import numpy as np\n'), ((2184, 2199), 'numpy.zeros', 'np.zeros', (['(n - 1)'], {}), '(n - 1)\n', (2192, 2199), True, 'import numpy as np\n'), ((2208, 2219), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2216, 2219), True, 'import numpy as np\n'), ((2228, 2239), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2236, 2239), True, 'import numpy as np\n'), ((839, 864), 'numpy.arange', 'np.arange', (['(vector_len - 1)'], {}), '(vector_len - 1)\n', (848, 864), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright (c) 2021
#
# See the LICENSE file for details
# see the AUTHORS file for authors
# ----------------------------------------------------------------------
#--------------------
# System wide imports
# -------------------
import os
import os.path
import logging
import statistics
# ---------------------
# Third party libraries
# ---------------------
import PIL
import numpy as np
import matplotlib.pyplot as plt
import sklearn.cluster as cluster
from matplotlib.widgets import Button
#--------------
# local imports
# -------------
from streetool.utils import get_image, paging
# -----------------------
# Module global variables
# -----------------------
log = logging.getLogger("streetoool")
# ----------------
# Module constants
# ----------------
class Cycler:
def __init__(self, connection, subject_list, **kwargs):
self.subject = [] if subject_list is None else subject_list
self.i = 0
self.N = len(self.subject)
self.conn = connection
self.epsilon = kwargs.get('epsilon',1)
self.compute = kwargs.get('compute',False)
self.fix = kwargs.get('fix',False)
self.reset()
if self.compute:
self.one_compute_step(0)
else:
self.one_database_step(0)
def reset(self):
self.fig, self.axe = plt.subplots()
# The dimensions are [left, bottom, width, height]
# All quantities are in fractions of figure width and height.
axnext = self.fig.add_axes([0.90, 0.01, 0.095, 0.050])
self.bnext = Button(axnext, 'Next')
self.bnext.on_clicked(self.next)
axprev = self.fig.add_axes([0.79, 0.01, 0.095, 0.050])
self.bprev = Button(axprev, 'Previous')
self.bprev.on_clicked(self.prev)
self.axe.set_xlabel("X, pixels")
self.axe.set_ylabel("Y, pixels")
self.axim = None
self.sca = list()
self.txt = list()
self.prev_extent = dict()
def load(self, i):
subject_id = self.subject[i][0]
log.info(f"Searching for image whose subject id is {subject_id}")
filename, image_id = get_image(self.conn, subject_id)
if not filename:
raise Exception(f"No image for subject-id {subject_id}")
img = PIL.Image.open(filename)
width, height = img.size
if self.axim is None:
self.axim = self.axe.imshow(img, alpha=0.5, zorder=-1, aspect='equal', origin='upper')
self.prev_extent[(width,height)] = self.axim.get_extent()
else:
self.axim.set_data(img)
ext = self.prev_extent.get((width,height), None)
# Swap exteent components when current image is rotated respect to the previous
if ext is None:
ext = list(self.prev_extent[(height,width)])
tmp = ext[1]
ext[1] = ext[2]
ext[2] = tmp
self.prev_extent[(width,height)] = tuple(ext)
self.axim.set_extent(ext)
self.axe.relim()
self.axe.autoscale_view()
return subject_id, image_id
def update(self, i):
# remove whats drawn in the scatter plots
for sca in self.sca:
sca.remove()
self.sca = list()
for txt in self.txt:
txt.remove()
self.txt = list()
if self.compute:
self.one_compute_step(i)
else:
self.one_database_step(i)
self.fig.canvas.draw_idle()
self.fig.canvas.flush_events()
def next(self, event):
self.i = (self.i +1) % self.N
self.update(self.i)
def prev(self, event):
self.i = (self.i -1 + self.N) % self.N
self.update(self.i)
def one_database_step(self, i):
subject_id, image_id = self.load(i)
self.axe.set_title(f'Subject {subject_id}\nEC5 Id {image_id}\nLight Sources from the database')
cursor = self.conn.cursor()
cursor.execute('''
SELECT DISTINCT cluster_id
FROM spectra_classification_v
WHERE subject_id = :subject_id
ORDER BY cluster_id ASC
''',
{'subject_id': subject_id}
)
cluster_ids = cursor.fetchall()
for (cluster_id,) in cluster_ids:
cursor2 = self.conn.cursor()
cursor2.execute('''
SELECT source_x, source_y, epsilon
FROM spectra_classification_v
WHERE subject_id = :subject_id
AND cluster_id = :cluster_id
''',
{'subject_id': subject_id, 'cluster_id': cluster_id}
)
coordinates = cursor2.fetchall()
N_Classifications = len(coordinates)
log.info(f"Subject {subject_id}: cluster_id {cluster_id} has {N_Classifications} data points")
X, Y, EPS = tuple(zip(*coordinates))
Xc = statistics.mean(X); Yc = statistics.mean(Y);
sca = self.axe.scatter(X, Y, marker='o', zorder=1)
self.sca.append(sca)
txt = self.axe.text(Xc+EPS[0], Yc+EPS[0], cluster_id, fontsize=9, zorder=2)
self.txt.append(txt)
def one_compute_step(self, i):
fix = self.fix
epsilon = self.epsilon
subject_id, image_id = self.load(i)
self.axe.set_title(f'Subject {subject_id}\nEC5 Id {image_id}\nDetected light sources by DBSCAN (\u03B5 = {epsilon} px)')
cursor = self.conn.cursor()
cursor.execute('''
SELECT source_x, source_y
FROM spectra_classification_v
WHERE subject_id = :subject_id
''',
{'subject_id': subject_id}
)
coordinates = cursor.fetchall()
N_Classifications = len(coordinates)
coordinates = np.array(coordinates)
model = cluster.DBSCAN(eps=epsilon, min_samples=2)
# Fit the model and predict clusters
yhat = model.fit_predict(coordinates)
# retrieve unique clusters
clusters = np.unique(yhat)
log.info(f"Subject {subject_id}: {len(clusters)} clusters from {N_Classifications} classifications, ids: {clusters}")
for cl in clusters:
# get row indexes for samples with this cluster
row_ix = np.where(yhat == cl)
X = coordinates[row_ix, 0][0]; Y = coordinates[row_ix, 1][0]
if(cl != -1):
Xc = np.average(X); Yc = np.average(Y)
sca = self.axe.scatter(X, Y, marker='o', zorder=1)
self.sca.append(sca)
txt = self.axe.text(Xc+epsilon, Yc+epsilon, cl+1, fontsize=9, zorder=2)
self.txt.append(txt)
elif fix:
start = max(clusters)+2 # we will shift also the normal ones ...
for i in range(len(X)) :
cluster_id = start + i
sca = self.axe.scatter(X[i], Y[i], marker='o', zorder=1)
self.sca.append(sca)
txt = self.axe.text(X[i]+epsilon, Y[i]+epsilon, cluster_id, fontsize=9, zorder=2)
self.txt.append(txt)
else:
sca = self.axe.scatter(X, Y, marker='o', zorder=1)
self.sca.append(sca)
start = max(clusters)+2 # we will shift also the normal ones ...
for i in range(len(X)) :
txt = self.axe.text(X[i]+epsilon, Y[i]+epsilon, cl, fontsize=9, zorder=2)
self.txt.append(txt)
# ========
# COMMANDS
# ========
def purge(connection, options):
log.info("Purging all source ids from the database")
cursor = connection.cursor()
cursor.execute('''
UPDATE light_sources_t
SET cluster_id = NULL , aggregated = NULL
WHERE cluster_id IS NOT NULL;
'''
)
connection.commit()
def duplicates(connection, options):
cursor = connection.cursor()
cursor.execute('''
SELECT a.subject_id, a.cluster_id, b.cluster_id, a.source_x, a.source_y
FROM spectra_classification_v AS a
JOIN spectra_classification_v AS b
ON a.subject_id = b.subject_id AND a.source_x = b.source_x AND a.source_y = b.source_y
WHERE a.cluster_id < b.cluster_id
'''
)
headers = ("Subject Id", "Source Id A", "Source Id B", "X", "Y")
paging(
iterable = cursor,
headers = headers,
)
def plot(connection, options):
'''Perform clustering analysis over source light selection'''
subject_id = options.subject_id
ec5_id = options.ec5_id
compute = options.compute
if subject_id is not None:
Cycler(connection, [(subject_id,)],
compute = options.compute,
epsilon = options.epsilon,
fix = options.fix
)
elif ec5_id is not None:
cursor = connection.cursor()
filter_dict = {'image_id': ec5_id}
cursor.execute("SELECT subject_id FROM spectra_classification_t WHERE image_id = :image_id",filter_dict)
Cycler(connection, cursor.fetchall(),
compute = options.compute,
epsilon = options.epsilon,
fix = options.fix
)
else:
cursor = connection.cursor()
cursor.execute("SELECT DISTINCT subject_id FROM spectra_classification_t ORDER BY subject_id")
Cycler(connection, cursor.fetchall(),
compute = options.compute,
epsilon = options.epsilon,
fix = options.fix
)
plt.show()
def view(connection, options):
subject_id = options.subject_id
cursor = connection.cursor()
if options.all:
cursor.execute('''
SELECT subject_id, count(*), count(DISTINCT cluster_id)
FROM spectra_classification_t
GROUP BY subject_id
''',
)
header = ("Subject Id", "# Classif.", "# Source Ids")
elif options.summary:
if not subject_id:
raise ValueError("missing --subject-id")
cursor.execute('''
SELECT subject_id, count(*), count(DISTINCT cluster_id)
FROM spectra_classification_t
WHERE subject_id = :subject_id
''',
{'subject_id': subject_id}
)
header = ("Subject Id", "# Classif.", "# Source Ids")
elif options.normal:
if not subject_id:
raise ValueError("missing --subject-id")
cursor.execute('''
SELECT subject_id, cluster_id, count(*)
FROM spectra_classification_t
WHERE subject_id = :subject_id
GROUP BY cluster_id
''',
{'subject_id': subject_id}
)
header = ("Subject Id", "Source Id", "# Classif.")
elif options.detail:
if not subject_id:
raise ValueError("missing --subject-id")
cursor.execute('''
SELECT subject_id, cluster_id, source_x, source_y, spectrum_type
FROM spectra_classification_t
WHERE subject_id = :subject_id
''',
{'subject_id': subject_id}
)
header = ("Subject Id", "Source Id", "X", "Y", "Spectrum")
paging(
iterable = cursor,
headers = header,
) | [
"logging.getLogger",
"streetool.utils.paging",
"statistics.mean",
"PIL.Image.open",
"numpy.unique",
"numpy.average",
"numpy.where",
"matplotlib.widgets.Button",
"streetool.utils.get_image",
"numpy.array",
"matplotlib.pyplot.subplots",
"sklearn.cluster.DBSCAN",
"matplotlib.pyplot.show"
] | [((780, 811), 'logging.getLogger', 'logging.getLogger', (['"""streetoool"""'], {}), "('streetoool')\n", (797, 811), False, 'import logging\n'), ((8498, 8538), 'streetool.utils.paging', 'paging', ([], {'iterable': 'cursor', 'headers': 'headers'}), '(iterable=cursor, headers=headers)\n', (8504, 8538), False, 'from streetool.utils import get_image, paging\n'), ((9682, 9692), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9690, 9692), True, 'import matplotlib.pyplot as plt\n'), ((11361, 11400), 'streetool.utils.paging', 'paging', ([], {'iterable': 'cursor', 'headers': 'header'}), '(iterable=cursor, headers=header)\n', (11367, 11400), False, 'from streetool.utils import get_image, paging\n'), ((1430, 1444), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1442, 1444), True, 'import matplotlib.pyplot as plt\n'), ((1658, 1680), 'matplotlib.widgets.Button', 'Button', (['axnext', '"""Next"""'], {}), "(axnext, 'Next')\n", (1664, 1680), False, 'from matplotlib.widgets import Button\n'), ((1806, 1832), 'matplotlib.widgets.Button', 'Button', (['axprev', '"""Previous"""'], {}), "(axprev, 'Previous')\n", (1812, 1832), False, 'from matplotlib.widgets import Button\n'), ((2241, 2273), 'streetool.utils.get_image', 'get_image', (['self.conn', 'subject_id'], {}), '(self.conn, subject_id)\n', (2250, 2273), False, 'from streetool.utils import get_image, paging\n'), ((2382, 2406), 'PIL.Image.open', 'PIL.Image.open', (['filename'], {}), '(filename)\n', (2396, 2406), False, 'import PIL\n'), ((5963, 5984), 'numpy.array', 'np.array', (['coordinates'], {}), '(coordinates)\n', (5971, 5984), True, 'import numpy as np\n'), ((6001, 6043), 'sklearn.cluster.DBSCAN', 'cluster.DBSCAN', ([], {'eps': 'epsilon', 'min_samples': '(2)'}), '(eps=epsilon, min_samples=2)\n', (6015, 6043), True, 'import sklearn.cluster as cluster\n'), ((6189, 6204), 'numpy.unique', 'np.unique', (['yhat'], {}), '(yhat)\n', (6198, 6204), True, 'import numpy as np\n'), ((5063, 5081), 'statistics.mean', 'statistics.mean', (['X'], {}), '(X)\n', (5078, 5081), False, 'import statistics\n'), ((5088, 5106), 'statistics.mean', 'statistics.mean', (['Y'], {}), '(Y)\n', (5103, 5106), False, 'import statistics\n'), ((6440, 6460), 'numpy.where', 'np.where', (['(yhat == cl)'], {}), '(yhat == cl)\n', (6448, 6460), True, 'import numpy as np\n'), ((6581, 6594), 'numpy.average', 'np.average', (['X'], {}), '(X)\n', (6591, 6594), True, 'import numpy as np\n'), ((6601, 6614), 'numpy.average', 'np.average', (['Y'], {}), '(Y)\n', (6611, 6614), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import argparse
import copy
from collections import defaultdict
from pathlib import Path
import os
import sys
import time
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score, precision_recall_fscore_support, log_loss, average_precision_score
import torch
import torch.optim
from torch.utils.data import DataLoader
import src.configuration as C
from src.models import get_img_model
import src.utils as utils
from src.utils import get_logger
from src.criterion import ImgLoss
from src.datasets import RsnaDataset, RsnaDataset3D
import src.factory as factory
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("mode", choices=['train', 'valid', 'test'], help="train valid")
parser.add_argument("config", help="Config file path")
parser.add_argument("--fold", type=int, default=0, help="fold")
parser.add_argument("--apex", action='store_true', default=False, help="apex")
parser.add_argument("--output", "-o", help="output path for validation")
parser.add_argument("--snapshot", "-s", help="snapshot weight path")
# parser.add_argument("--resume-from", help="snapshot to resume train")
return parser.parse_args()
args = get_args()
if args.apex:
from apex import amp
EXP_ID = os.path.splitext(os.path.basename(args.config))[0]
SEED = 42 + 1
DEVICE = "cuda"
import json
setting_json = open('SETTINGS.json', 'r')
setting_json = json.load(setting_json)
output_dir = Path(setting_json["OUTPUT"]) / EXP_ID
output_dir.mkdir(exist_ok=True, parents=True)
_logger = get_logger(output_dir / f"fold{args.fold}_output.log")
def log(msg): _logger.info(msg)
def log_w(msg): _logger.warn(msg)
log(f'EXP {EXP_ID} start')
def main():
config = utils.load_config(args.config)
config["weighted"] = "weighted" in config.keys()
# copy args to config
config["mode"] = args.mode
config["fold"] = args.fold
config["apex"] = args.apex
config["output"] = args.output
config["snapshot"] = args.snapshot
# config["resume_from"] = args.resume_from if args.resume_from
utils.set_seed(SEED)
device = torch.device(DEVICE)
log(f"Fold {args.fold}")
model = factory.get_model(config).to(device)
log(f"Model type: {model.__class__.__name__}")
if config["mode"] == 'train':
train(config, model)
valid(config, model)
elif config["mode"] == 'valid':
valid(config, model)
elif config["mode"] == 'test':
valid(config, model, all_exam=True)
def valid(_cfg, model, all_exam=False):
cfg = copy.deepcopy(_cfg)
if all_exam:
cfg["dataset"]["param"]["posexam_only"] = False # validation for all slices
assert cfg["output"]
assert not os.path.exists(cfg["output"])
criterion = factory.get_criterion(cfg)
path = os.path.join(output_dir, 'fold%d_ep0.pt' % (cfg['fold']))
print(f'best path: {str(path)}')
utils.load_model(str(path), model)
loader_valid = factory.get_loader_valid(cfg)
with torch.no_grad():
results = run_nn(cfg, 'valid', model, loader_valid, criterion=criterion)
utils.save_pickle(results, cfg["output"])
log('saved to %s' % cfg["output"])
def train(cfg, model):
criterion = factory.get_criterion(cfg)
# optim = torch.optim.Adam(model.parameters(), lr=1e-3)
optim = factory.get_optimizer(cfg, model.parameters())
best = {
'loss': float('inf'),
'score': 0.0,
'epoch': -1,
}
if "resume_from" in cfg.keys() and cfg["resume_from"]:
detail = utils.load_model(cfg["resume_from"], model, optim=optim)
best.update({
'loss': detail['loss'],
'score': detail['score'],
'epoch': detail['epoch'],
})
# to set lr manually after resumed
for param_group in optim.param_groups:
param_group['lr'] = cfg["optimizer"]["param"]["lr"]
log(f"initial lr {utils.get_lr(optim)}")
scheduler, is_reduce_lr = factory.get_scheduler(cfg, optim)
log(f"is_reduce_lr: {is_reduce_lr}")
loader_train = factory.get_loader_train(cfg)
loader_valid = factory.get_loader_valid(cfg)
log('train data: loaded %d records' % len(loader_train.dataset))
log('valid data: loaded %d records' % len(loader_valid.dataset))
log('apex %s' % cfg["apex"])
if cfg["apex"]:
amp.initialize(model, optim, opt_level='O1')
for epoch in range(best['epoch']+1, cfg["epoch"]):
log(f'\n----- epoch {epoch} -----')
run_nn(cfg, 'train', model, loader_train, criterion=criterion, optim=optim, apex=cfg["apex"])
with torch.no_grad():
val = run_nn(cfg, 'valid', model, loader_valid, criterion=criterion)
detail = {
'score': val['score'],
'loss': val['loss'],
'epoch': epoch,
}
if val['loss'] <= best['loss']:
best.update(detail)
utils.save_model(model, optim, detail, cfg["fold"], output_dir, best=True)
utils.save_model(model, optim, detail, cfg["fold"], output_dir)
log('[best] ep:%d loss:%.4f score:%.4f' % (best['epoch'], best['loss'], best['score']))
if is_reduce_lr:
scheduler.step(val['loss']) # reducelronplateau
else:
scheduler.step()
def run_nn(cfg, mode, model, loader, criterion=None, optim=None, scheduler=None, apex=None):
print('weighted:', cfg['weighted'])
if mode in ['train']:
model.train()
elif mode in ['valid', 'test']:
model.eval()
else:
raise RuntimeError('Unexpected mode %s' % mode)
t1 = time.time()
losses = []
ids_all = []
targets_all = defaultdict(list)
outputs_all = defaultdict(list)
for i, (inputs, targets, ids, weights) in enumerate(loader):
batch_size = len(inputs)
inputs, weights = inputs.cuda(), weights.cuda()
for k in targets.keys():
targets[k] = targets[k].cuda()
outputs = model(inputs)
if mode in ['train', 'valid']:
non_weight_losses = criterion(outputs, targets)
weights = weights * 2 # Set the average of the weight to 1
if not cfg['weighted']:
weights = 1
loss = torch.mean(non_weight_losses*weights)
with torch.no_grad():
losses.append(loss.item())
if mode in ['train']:
if apex:
with amp.scale_loss(loss, optim) as scaled_loss:
scaled_loss.backward()
else:
loss.backward() # accumulate loss
if (i+1) % cfg["n_grad_acc"] == 0:
optim.step() # update
optim.zero_grad() # flush
with torch.no_grad():
ids_all.extend(ids)
for _k in outputs.keys(): # iter over output keys
outputs_all[_k].extend(torch.sigmoid(outputs[_k]).cpu().numpy())
if mode != 'test':
for _k in list(outputs.keys()) + ['pe_present_portion']:
targets_all[_k].extend(targets[_k].cpu().numpy())
#outputs_all.extend(torch.sigmoid(outputs["pe_present_on_image"]).cpu().numpy())
#outputs_all.append(torch.softmax(outputs, dim=1).cpu().numpy())
elapsed = int(time.time() - t1)
eta = int(elapsed / (i+1) * (len(loader)-(i+1)))
progress = f'\r[{mode}] {i+1}/{len(loader)} {elapsed}(s) eta:{eta}(s) loss:{(np.sum(losses)/(i+1)):.6f} loss200:{(np.sum(losses[-200:])/(min(i+1,200))):.6f} lr:{utils.get_lr(optim):.2e} '
print(progress, end='')
sys.stdout.flush()
result = {
'ids': ids_all,
'targets': dict([(k, np.array(v)) for k, v in targets_all.items()]),
'outputs': dict([(k, np.array(v)) for k, v in outputs_all.items()]),
'loss': np.sum(losses) / (i+1),
}
if mode in ['train', 'valid']:
KEYS = list(result["outputs"].keys())
SCORE_KEY = "logloss_" + KEYS[0]
### indeterminate
# SCORE_KEY = "logloss_indeterminate"
# KEYS = ["indeterminate", "qa_contrast", "qa_motion"]
# ### PE+pe_position
# SCORE_KEY = "logloss_pe_present_on_image"
# KEYS = ["pe_present_on_image"] + ["rightsided_pe", "leftsided_pe", "central_pe"]
### PE+pe_type(acute/choronic)
# SCORE_KEY = "logloss_pe_present_on_image"
# KEYS = ["pe_present_on_image"] # + ["chronic_pe", "acute_and_chronic_pe"] # + ["acute_pe"]
result.update(calc_acc(result['targets'], result['outputs'], KEYS))
result.update(calc_f1(result['targets'], result['outputs'], KEYS))
result.update(calc_map(result['targets'], result['outputs'], KEYS))
result.update(calc_logloss(result['targets'], result['outputs'], KEYS))
if "pe_present_on_image" in KEYS:
result.update(calc_map_dummy(result['targets'], result['outputs'], KEYS)) # debug
result.update(calc_logloss_weighted_present(result['targets'], result['outputs']))
result['score'] = result[SCORE_KEY]
# SHOW_KEYS = ["acc_indeterminate", "acc_qa_contrast", "acc_qa_motion"]
SHOW_KEYS = [k for k in result.keys() if not k in ['ids', 'targets', 'outputs', 'loss']]
# log(progress + ' '.join([k+':%.4f ' % result[k] for k in SHOW_KEYS]))
_metric_str = ""
for index in range(0, len(SHOW_KEYS), len(KEYS)):
_metric_str += ' ' + ' '.join([k+':%.4f ' % result[k] for k in SHOW_KEYS[index:index+len(KEYS)]]) + '\n'
log(progress + "\n" + _metric_str)
log('ave_loss:%.6f' % (result['loss']))
else:
log('')
return result
# metric functions. return {"metric_name": val}
def calc_acc_nokey(targets, outputs): # not used now
cor = np.sum(targets == np.round(outputs))
return {"acc": cor / float(len(targets))}
def calc_acc(targets, outputs, keys):
ret = {}
for k in keys:
cor = np.sum(np.round(targets[k]) == np.round(outputs[k]))
ret["acc_" + k] = cor / float(len(targets[k]))
return ret
def calc_f1(targets, outputs, keys):
ret = {}
for k in keys:
pre, rec, f1, _ = precision_recall_fscore_support(np.round(targets[k]), np.round(outputs[k]), average='binary')
ret["pre_" + k] = pre
ret["rec_" + k] = rec
ret["f1_" + k] = f1
# keep same order as other metrics: pre_key1,pre_key2,... ,rec_key1,rec_key2,...
return {**{k:v for k,v in ret.items() if k.startswith("pre_")},
**{k:v for k,v in ret.items() if k.startswith("rec_")},
**{k:v for k,v in ret.items() if k.startswith("f1_" )},
}
# return ret
def calc_map(targets, outputs, keys):
ret = {}
for k in keys:
ret["ap_" + k] = average_precision_score(np.round(targets[k]), outputs[k])
return ret
def calc_map_dummy(targets, outputs, keys): # use pe_present_on_image as prediction
ret = {}
for k in keys:
ret["ap_dummy" + k] = average_precision_score(np.round(targets[k]), outputs['pe_present_on_image'])
return ret
def calc_logloss(targets, outputs, keys):
ret = {}
for k in keys:
ret["logloss_" + k] = log_loss(np.round(targets[k]), outputs[k], labels=[0,1], eps=1e-7)
return ret
def calc_logloss_weighted_present(targets, outputs):
weight = targets['pe_present_portion']
logloss = log_loss(np.round(targets['pe_present_on_image']), outputs['pe_present_on_image'],
sample_weight=weight, labels=[0,1], eps=1e-7)
return {'logloss_pe_present_weighted': logloss}
if __name__ == "__main__":
main()
| [
"apex.amp.scale_loss",
"src.factory.get_model",
"src.utils.load_model",
"numpy.array",
"apex.amp.initialize",
"copy.deepcopy",
"src.factory.get_loader_train",
"os.path.exists",
"argparse.ArgumentParser",
"pathlib.Path",
"src.utils.save_pickle",
"torch.mean",
"src.utils.get_logger",
"src.ut... | [((1436, 1459), 'json.load', 'json.load', (['setting_json'], {}), '(setting_json)\n', (1445, 1459), False, 'import json\n'), ((1568, 1622), 'src.utils.get_logger', 'get_logger', (["(output_dir / f'fold{args.fold}_output.log')"], {}), "(output_dir / f'fold{args.fold}_output.log')\n", (1578, 1622), False, 'from src.utils import get_logger\n'), ((637, 662), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (660, 662), False, 'import argparse\n'), ((1474, 1502), 'pathlib.Path', 'Path', (["setting_json['OUTPUT']"], {}), "(setting_json['OUTPUT'])\n", (1478, 1502), False, 'from pathlib import Path\n'), ((1742, 1772), 'src.utils.load_config', 'utils.load_config', (['args.config'], {}), '(args.config)\n', (1759, 1772), True, 'import src.utils as utils\n'), ((2092, 2112), 'src.utils.set_seed', 'utils.set_seed', (['SEED'], {}), '(SEED)\n', (2106, 2112), True, 'import src.utils as utils\n'), ((2126, 2146), 'torch.device', 'torch.device', (['DEVICE'], {}), '(DEVICE)\n', (2138, 2146), False, 'import torch\n'), ((2567, 2586), 'copy.deepcopy', 'copy.deepcopy', (['_cfg'], {}), '(_cfg)\n', (2580, 2586), False, 'import copy\n'), ((2775, 2801), 'src.factory.get_criterion', 'factory.get_criterion', (['cfg'], {}), '(cfg)\n', (2796, 2801), True, 'import src.factory as factory\n'), ((2814, 2869), 'os.path.join', 'os.path.join', (['output_dir', "('fold%d_ep0.pt' % cfg['fold'])"], {}), "(output_dir, 'fold%d_ep0.pt' % cfg['fold'])\n", (2826, 2869), False, 'import os\n'), ((2968, 2997), 'src.factory.get_loader_valid', 'factory.get_loader_valid', (['cfg'], {}), '(cfg)\n', (2992, 2997), True, 'import src.factory as factory\n'), ((3109, 3150), 'src.utils.save_pickle', 'utils.save_pickle', (['results', "cfg['output']"], {}), "(results, cfg['output'])\n", (3126, 3150), True, 'import src.utils as utils\n'), ((3231, 3257), 'src.factory.get_criterion', 'factory.get_criterion', (['cfg'], {}), '(cfg)\n', (3252, 3257), True, 'import src.factory as factory\n'), ((3983, 4016), 'src.factory.get_scheduler', 'factory.get_scheduler', (['cfg', 'optim'], {}), '(cfg, optim)\n', (4004, 4016), True, 'import src.factory as factory\n'), ((4078, 4107), 'src.factory.get_loader_train', 'factory.get_loader_train', (['cfg'], {}), '(cfg)\n', (4102, 4107), True, 'import src.factory as factory\n'), ((4127, 4156), 'src.factory.get_loader_valid', 'factory.get_loader_valid', (['cfg'], {}), '(cfg)\n', (4151, 4156), True, 'import src.factory as factory\n'), ((5618, 5629), 'time.time', 'time.time', ([], {}), '()\n', (5627, 5629), False, 'import time\n'), ((5681, 5698), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5692, 5698), False, 'from collections import defaultdict\n'), ((5717, 5734), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5728, 5734), False, 'from collections import defaultdict\n'), ((1302, 1331), 'os.path.basename', 'os.path.basename', (['args.config'], {}), '(args.config)\n', (1318, 1331), False, 'import os\n'), ((2729, 2758), 'os.path.exists', 'os.path.exists', (["cfg['output']"], {}), "(cfg['output'])\n", (2743, 2758), False, 'import os\n'), ((3007, 3022), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3020, 3022), False, 'import torch\n'), ((3546, 3602), 'src.utils.load_model', 'utils.load_model', (["cfg['resume_from']", 'model'], {'optim': 'optim'}), "(cfg['resume_from'], model, optim=optim)\n", (3562, 3602), True, 'import src.utils as utils\n'), ((4358, 4402), 'apex.amp.initialize', 'amp.initialize', (['model', 'optim'], {'opt_level': '"""O1"""'}), "(model, optim, opt_level='O1')\n", (4372, 4402), False, 'from apex import amp\n'), ((5013, 5076), 'src.utils.save_model', 'utils.save_model', (['model', 'optim', 'detail', "cfg['fold']", 'output_dir'], {}), "(model, optim, detail, cfg['fold'], output_dir)\n", (5029, 5076), True, 'import src.utils as utils\n'), ((7612, 7630), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7628, 7630), False, 'import sys\n'), ((11395, 11435), 'numpy.round', 'np.round', (["targets['pe_present_on_image']"], {}), "(targets['pe_present_on_image'])\n", (11403, 11435), True, 'import numpy as np\n'), ((2190, 2215), 'src.factory.get_model', 'factory.get_model', (['config'], {}), '(config)\n', (2207, 2215), True, 'import src.factory as factory\n'), ((4621, 4636), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4634, 4636), False, 'import torch\n'), ((4929, 5003), 'src.utils.save_model', 'utils.save_model', (['model', 'optim', 'detail', "cfg['fold']", 'output_dir'], {'best': '(True)'}), "(model, optim, detail, cfg['fold'], output_dir, best=True)\n", (4945, 5003), True, 'import src.utils as utils\n'), ((6256, 6295), 'torch.mean', 'torch.mean', (['(non_weight_losses * weights)'], {}), '(non_weight_losses * weights)\n', (6266, 6295), False, 'import torch\n'), ((6740, 6755), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6753, 6755), False, 'import torch\n'), ((7841, 7855), 'numpy.sum', 'np.sum', (['losses'], {}), '(losses)\n', (7847, 7855), True, 'import numpy as np\n'), ((9811, 9828), 'numpy.round', 'np.round', (['outputs'], {}), '(outputs)\n', (9819, 9828), True, 'import numpy as np\n'), ((10211, 10231), 'numpy.round', 'np.round', (['targets[k]'], {}), '(targets[k])\n', (10219, 10231), True, 'import numpy as np\n'), ((10233, 10253), 'numpy.round', 'np.round', (['outputs[k]'], {}), '(outputs[k])\n', (10241, 10253), True, 'import numpy as np\n'), ((10801, 10821), 'numpy.round', 'np.round', (['targets[k]'], {}), '(targets[k])\n', (10809, 10821), True, 'import numpy as np\n'), ((11021, 11041), 'numpy.round', 'np.round', (['targets[k]'], {}), '(targets[k])\n', (11029, 11041), True, 'import numpy as np\n'), ((11203, 11223), 'numpy.round', 'np.round', (['targets[k]'], {}), '(targets[k])\n', (11211, 11223), True, 'import numpy as np\n'), ((6311, 6326), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6324, 6326), False, 'import torch\n'), ((7301, 7312), 'time.time', 'time.time', ([], {}), '()\n', (7310, 7312), False, 'import time\n'), ((7545, 7564), 'src.utils.get_lr', 'utils.get_lr', (['optim'], {}), '(optim)\n', (7557, 7564), True, 'import src.utils as utils\n'), ((9968, 9988), 'numpy.round', 'np.round', (['targets[k]'], {}), '(targets[k])\n', (9976, 9988), True, 'import numpy as np\n'), ((9992, 10012), 'numpy.round', 'np.round', (['outputs[k]'], {}), '(outputs[k])\n', (10000, 10012), True, 'import numpy as np\n'), ((3929, 3948), 'src.utils.get_lr', 'utils.get_lr', (['optim'], {}), '(optim)\n', (3941, 3948), True, 'import src.utils as utils\n'), ((6444, 6471), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optim'], {}), '(loss, optim)\n', (6458, 6471), False, 'from apex import amp\n'), ((7461, 7475), 'numpy.sum', 'np.sum', (['losses'], {}), '(losses)\n', (7467, 7475), True, 'import numpy as np\n'), ((7498, 7519), 'numpy.sum', 'np.sum', (['losses[-200:]'], {}), '(losses[-200:])\n', (7504, 7519), True, 'import numpy as np\n'), ((7700, 7711), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (7708, 7711), True, 'import numpy as np\n'), ((7777, 7788), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (7785, 7788), True, 'import numpy as np\n'), ((6891, 6917), 'torch.sigmoid', 'torch.sigmoid', (['outputs[_k]'], {}), '(outputs[_k])\n', (6904, 6917), False, 'import torch\n')] |
# coding=UTF-8
import numpy as np
import videoseam as vs
class weights_delegate(object):
"""Delegate class to manage the weightning for the graph construction"""
def __init__(self, parent, fill_with=np.inf, ndim=3):
super(weights_delegate, self).__init__()
self.parent = parent
self.fill_with = fill_with
self.ndim = ndim
# Given a list of vectors and a list of links, creates an appropriate dictionary
# @vectors a list of tuples, that represents a structure type
# @links a list (or numpy array) of numpy arrays, that contains weights to be assigned to a specific structure
#
# returns: a dictionary
#
# Example:
# @vectors [(2, 1, 1), (1, 0, 1)]
# @links [[[1, 2], [1, 0]], [[0, 1], [1, 1]]]
# returns: {(2, 1, 1): [[1, 2], [1, 0]], (1, 0, 1): [[0, 1], [1, 1]]}
def to_hash(self, vectors, links):
return {k: v for k, v in zip(vectors, links)}
# Given an n-dimensional tuple, resizes its dimensions to fit the class settings (self.ndim)
# @tupleval a tuple
# returns: another tuple with the correct dimension
#
# Example:
# @listval (2, 1, 1)
# returns (assuming self.ndim = 2): (1, 1)
def adjust_dim(self, tupleval):
if len(tupleval) == self.ndim:
return tupleval
resize = len(tupleval) - self.ndim
return tupleval[resize:]
# Given a list of n-dimensional tuple, resizes the dimension of each tuple to fit the class settings (self.ndim)
# @listval a list a tuple
# returns: a list of tuple with correct dimensions
#
# Example:
# @listval [(2, 1, 1), (1, 0, 1)]
# returns (assuming self.ndim = 2): [(1, 1), (0, 1)]
def adjust_list(self, listval):
return [self.adjust_dim(t) for t in listval]
# Given I, it creates look-forward energies for that I, associated to the correct structure (1, 1, 2)
# @I: An image skeleton (or a list of them)
# returns: a dictionary that associates a structure key to an array of weights
#
# Example:
# @I [[2, 1, 0, 3], [1, 0, 2, 4], [5, 2, 1, 3], [6, 2, 4, 3]]
# returns {(1, 1, 2): [[inf, ?, ?, inf], [inf, ?, ?, inf], [inf, ?, ?, inf], [inf, ?, ?, inf]]}
def weights_structure(self, I):
vectors = self.adjust_list([(1, 1, 2)]) # left to right
links = np.zeros((1,) + I.shape)
# Formula: ((past_left - future_left)^2 + (past_right - future_right)^2) / 2
pastleft = I[..., 1:] - I[..., 0:-1]
futureleft = ((I[..., 1:-1] + I[..., 2:]) * 0.5) - I[..., 0:-2]
pastright = -pastleft # I[:, 0:-1] - I[:, 1:] = ME - sx
futureright = ((I[..., 0:-2] + I[..., 1:-1]) * 0.5) - I[..., 2:]
left = (pastleft[..., 0:-1] - futureleft) ** 2
right = (pastright[..., 0:-1] - futureright) ** 2
links[0, ..., 1:-2] = (left[..., 0:-1] + right[..., 1:]) * 0.5
links = links * self.parent.alpha
links[0, ..., -2] = self.fill_with
links[0, ..., 0] = self.fill_with
return self.to_hash(vectors, links)
# Given I, it creates look-forward energies for that I, associated to the correct structure (2, 1, 1)
# @I: An image skeleton (or a list of them)
# returns: a dictionary that associates a structure key to an array of weights
#
# Example:
# @I [[2, 1, 0, 3], [1, 0, 2, 4], [5, 2, 1, 3], [6, 2, 4, 3]]
# returns {(1, 1, 2): [[inf, ?, ?, inf], [inf, ?, ?, inf], [inf, ?, ?, inf], [inf, ?, ?, inf]]}
def weights_structure_time(self, I):
vectors = [(2, 1, 1)]
links = np.zeros((1,) + I.shape)
pastleft = I[1:, :, :] - I[0:-1, :, :]
futureleft = ((I[1:-1, :, :] + I[2:, :, :]) * 0.5) - I[0:-2, :, :]
pastright = -pastleft # I[:, 0:-1] - I[:, 1:] = ME - sx
futureright = ((I[0:-2, :, :] + I[1:-1, :, :]) * 0.5) - I[2:, :, :]
left = (pastleft[0:-1, :, :] - futureleft) ** 2
right = (pastright[0:-1, :, :] - futureright) ** 2
links[0, 1:-2, :, :] = (left[0:-1, :, :] + right[1:, :, :]) * 0.5
links = links
links[0, -2, :, :] = self.fill_with
links[0, 0, :, :] = self.fill_with
return self.to_hash(vectors, links)
# A generic method to apply an energy function to a certain structure key
# @I: The referring image
# @A: The energy function
# returns: a dictionary that associates a structure key to an array of weights
#
# Example:
# @I [[2, 1, 3], [1, 0, 5], [5, 2, 3]]
# @A [[2, 1], [1, 0], [5, 2]]
# returns {(1, 1, 2): [[2, 1, 0], [1, 0, 0], [5, 2, 0]]}
def weights_standard(self, I, A):
vectors = self.adjust_list([(1, 1, 2)]) # left to right
links = np.zeros((1,) + I.shape)
links[0, ..., 0:-1] = A
return self.to_hash(vectors, links)
# Applies the importance map to a structure key. It applies also it's appropriate multiplier
# @I: The referring image
# @imp: importance map
# returns: a dictionary that associates a structure key to an array of weights
def weights_importance(self, I, imp):
return self.weights_standard(I, imp * self.parent.gamma)
# Applies the iterations count to a structure key. It applies also it's appropriate multiplier
# @I: The referring image
# @imp: The iteration counter energy function
# returns: a dictionary that associates a structure key to an array of weights
def weights_iterations(self, I, ite):
return self.weights_standard(I, ite * self.parent.beta)
# Applies the vector map to a structure key. It applies also it's appropriate multiplier
# @I: The referring image
# @vector: The vector tracking enegy function
# returns: a dictionary that associates a structure key to an array of weights
def weights_vector(self, I, V):
return self.weights_standard(I, V * self.parent.delta)
def weights_frame_iterations(self, I, ite):
vectors = [(2, 1, 1)] # left to right
links = np.zeros((1,) + I.shape)
links[0, 0:-1, :, :] = ite * self.parent.beta
return self.to_hash(vectors, links)
def weights_deepness(self, I):
vectors = [(2, 1, 1), (0, 1, 1)]
links = np.zeros((2,) + I.shape)
# In profondità sx
links[0, 0:-1, :, 1:-1] = np.abs(((I[0:-1, :, 1:-1] + I[0:-1, :, 2:]) * 0.5) - ((I[1:, :, 0:-2] + I[1:, :, 1:-1]) * 0.5))
# In profondità dx
links[1, 1:, :, 1:-1] = np.abs(((I[0:-1, :, 0:-2] + I[0:-1, :, 1:-1]) * 0.5) - ((I[1:, :, 1:-1] + I[1:, :, 2:]) * 0.5))
return self.to_hash(vectors, links)
def weights_diagonal(self, I):
vectors = [(0, 1, 2), (2, 1, 2)]
energy = (I[:, :, 0:-1] - (I[:, :, 0:-1] + I[:, :, 1:]) * 0.5) ** 2
links = np.zeros((2,) + I.shape)
links[0, 1:, :, 0:-1] = energy[0:-1]
links[1, :, :, 0:-1] = energy
links = links / self.parent.alpha
return self.to_hash(vectors, links)
# Given a bitmask list of methods and all the useful energy functions, generates a tuple of dictionaries,
# that create an associations between a structure key and it's own energy function
# @I: The referring image (skeleton)
# @Imp: The importance map
# @ite: The iteration counter energy function
# @V: The vector tracking enegy function
# @methods: A bit mask to identify which method should be actived
# returns: a dictionary that associates a structure key to an array of weights
#
# Example:
# @I [[2, 1, 0], [0, 1, 3], [2, 2, 2]]
# @Imp [[2, 2], [1, 3], [0, 0]]
# @ite [[2, 1], [1, 1], [2, 4]]
# @V [[0, 0], [0, 0], [0, 0]]
# @methods vs.IMP | vs.ITE
# returns ({(1, 1, 2): [[2, 2, 0], [1, 3, 0], [0, 0, 0]]}, {(1, 1, 2): [[2, 1, 0], [1, 1, 0], [2, 4, 0]]})
def select_methods(self, I, Imp, ite, V, methods):
all_weights = ()
if (vs.STR & methods) != 0:
all_weights += (self.weights_structure(I),)
if (vs.IMP & methods) != 0:
all_weights += (self.weights_importance(I, Imp),)
if (vs.ITE & methods) != 0:
all_weights += (self.weights_iterations(I, ite),)
if (vs.FIT & methods) != 0:
all_weights += (self.weights_frame_iterations(I, ite),)
if (vs.DEE & methods) != 0:
all_weights += (self.weights_deepness(I),)
if (vs.DIA & methods) != 0:
all_weights += (self.weights_diagonal(I),)
if (vs.VEC & methods) != 0:
all_weights += (self.weights_vector(I, V),)
if (vs.TIM & methods) != 0:
all_weights += (self.weights_structure_time(I),)
return all_weights
| [
"numpy.abs",
"numpy.zeros"
] | [((2218, 2242), 'numpy.zeros', 'np.zeros', (['((1,) + I.shape)'], {}), '((1,) + I.shape)\n', (2226, 2242), True, 'import numpy as np\n'), ((3384, 3408), 'numpy.zeros', 'np.zeros', (['((1,) + I.shape)'], {}), '((1,) + I.shape)\n', (3392, 3408), True, 'import numpy as np\n'), ((4446, 4470), 'numpy.zeros', 'np.zeros', (['((1,) + I.shape)'], {}), '((1,) + I.shape)\n', (4454, 4470), True, 'import numpy as np\n'), ((5669, 5693), 'numpy.zeros', 'np.zeros', (['((1,) + I.shape)'], {}), '((1,) + I.shape)\n', (5677, 5693), True, 'import numpy as np\n'), ((5867, 5891), 'numpy.zeros', 'np.zeros', (['((2,) + I.shape)'], {}), '((2,) + I.shape)\n', (5875, 5891), True, 'import numpy as np\n'), ((5945, 6040), 'numpy.abs', 'np.abs', (['((I[0:-1, :, 1:-1] + I[0:-1, :, 2:]) * 0.5 - (I[1:, :, 0:-2] + I[1:, :, 1:-\n 1]) * 0.5)'], {}), '((I[0:-1, :, 1:-1] + I[0:-1, :, 2:]) * 0.5 - (I[1:, :, 0:-2] + I[1:,\n :, 1:-1]) * 0.5)\n', (5951, 6040), True, 'import numpy as np\n'), ((6092, 6187), 'numpy.abs', 'np.abs', (['((I[0:-1, :, 0:-2] + I[0:-1, :, 1:-1]) * 0.5 - (I[1:, :, 1:-1] + I[1:, :, 2\n :]) * 0.5)'], {}), '((I[0:-1, :, 0:-2] + I[0:-1, :, 1:-1]) * 0.5 - (I[1:, :, 1:-1] + I[1:,\n :, 2:]) * 0.5)\n', (6098, 6187), True, 'import numpy as np\n'), ((6383, 6407), 'numpy.zeros', 'np.zeros', (['((2,) + I.shape)'], {}), '((2,) + I.shape)\n', (6391, 6407), True, 'import numpy as np\n')] |
import os
import random
from typing import List
import numpy as np
import pandas as pd
from tqdm import tqdm
def fix_random_seed(seed: int = 42) -> None:
"""
乱数のシードを固定する。
Parameters
----------
seed : int
乱数のシード。
"""
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
def load_input_csv(input_filepath: str, usecols: List[str]) -> pd.DataFrame:
"""
入力 csv ファイルを、必要な columns を選択して読み込む。
Parameters
----------
input_filepath : str
入力 csv ファイルへのパス。
usecols : list of str
必要なカラム名。
Returns
-------
df_orig : pandas.DataFrame
入力 csv ファイルの、選択した columns の表。
"""
df_orig = pd.read_csv(input_filepath, sep='\s+|\s+', usecols=usecols,
engine='python', skipinitialspace=True,
skiprows=[1], skipfooter=1)
df_orig.dropna(inplace=True)
df_orig = df_orig.astype({'objectid': np.int64})
df_orig.reset_index(inplace=True, drop=True)
return df_orig
def get_unique_list(df: pd.DataFrame, col: str) -> np.ndarray:
"""
指定した pandas.DataFrame のカラムの unique なリストを、
昇順にソートしたものを返す。
Parameters
----------
df : pandas.DataFrame
目的の表。
col : str
目的のカラム名。
Returns
-------
unique_list : numpy.ndarray
指定した pandas.DataFrame のカラムの unique なリストを、
昇順にソートしたもの。
"""
unique_list = df[col].unique()
unique_list.sort()
return unique_list
def get_ididx_mjdcols_dataframe(
df: pd.DataFrame, df_source: pd.DataFrame) -> pd.DataFrame:
"""
df_source の unique な objectid を index、mjd を columns とした
pandas.DataFrame を作成し、df の m_ap30 の値を埋めたものを返す。
Parameters
----------
df : pandas.DataFrame
埋める m_ap30 の値を保持した表。
df_source : pandas.DataFrame
返り値の index となる objectid と、columns となる mjd を保持した表。
Returns
-------
df_return : pandas.DataFrame
df_source の unique な objectid を index、mjd を columns とした
pandas.DataFrame で、df の m_ap30 の値を埋めたもの。
"""
list_id = get_unique_list(df_source, 'objectid')
list_mjd = get_unique_list(df_source, 'mjd')
df_return = pd.DataFrame(index=list_id, columns=list_mjd)
df_reset_idx = df.reset_index(drop=True)
print('\nget_ididx_mjdcols_dataframe\n')
for i in tqdm(range(len(df_reset_idx))):
idx = df_reset_idx.at[i, 'objectid']
col = df_reset_idx.at[i, 'mjd']
df_return.at[idx, col] = df_reset_idx.at[i, 'm_ap30'].round(3)
return df_return
| [
"pandas.DataFrame",
"numpy.random.seed",
"random.seed",
"pandas.read_csv"
] | [((310, 327), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (321, 327), False, 'import random\n'), ((332, 352), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (346, 352), True, 'import numpy as np\n'), ((733, 867), 'pandas.read_csv', 'pd.read_csv', (['input_filepath'], {'sep': '"""\\\\s+|\\\\s+"""', 'usecols': 'usecols', 'engine': '"""python"""', 'skipinitialspace': '(True)', 'skiprows': '[1]', 'skipfooter': '(1)'}), "(input_filepath, sep='\\\\s+|\\\\s+', usecols=usecols, engine=\n 'python', skipinitialspace=True, skiprows=[1], skipfooter=1)\n", (744, 867), True, 'import pandas as pd\n'), ((2245, 2290), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'list_id', 'columns': 'list_mjd'}), '(index=list_id, columns=list_mjd)\n', (2257, 2290), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
from psychopy import locale_setup, visual, core
import numpy as np
from psychopy.hardware import keyboard
from psychopy import misc
def createPalette(size):
"""
Creates the color palette array in HSV and returns as RGB
"""
# Create array
hsv = np.ones([size,size,3], dtype=float)
# Set hue
hsv[:,:,0] = np.linspace(0,360, size, endpoint=False)
# Set saturation
for i in range(size):
hsv[:,i, 1] = np.linspace(0, 1, size, endpoint=False)
# Convert to RGB
rgb = misc.hsv2rgb(hsv)
# Make in range 0:1 for image stim
rgb[:][:][:] = (rgb[:][:][:] + 1) / 2
return rgb
def createValue(size):
"""
Creates the value palette array in HSV and returns as RGB
"""
# Create array
hsv = np.zeros([20,size,3], dtype=float)
# Set value
hsv[:,:,2] = np.linspace(0,1, size, endpoint=False)
# Convert to RGB
rgb = misc.hsv2rgb(hsv)
# Make in range 0:1 for image stim
rgb[:][:][:] = (rgb[:][:][:] + 1) / 2
return rgb
# Setup the Window
win = visual.Window(size=[1920, 1080], fullscr=False, units='height')
colorPalette = visual.ImageStim(win=win,name='colorPalette', units='pix',
image=None, mask=None,
texRes=64, depth=0.0)
valuePalette = visual.ImageStim(win=win, name='valuePalette', units='pix',
pos=(0, -250), depth=-1.0)
hueSlider = visual.Slider(win=win, name='hueSlider',
size=(.37, .02), pos=(0, 0.2),
labels=None, ticks=(0, 360), style=['rating'])
satSlider = visual.Slider(win=win, name='satSlider',
size=(.02, .37), pos=(0.2, 0),
labels=None, ticks=(0, 1), style=['rating'])
valSlider = visual.Slider(win=win, name='valSlider',
size=(.37, .02), pos=(0, -0.25),
labels=None, ticks=(0,1), style=['rating'])
visualFeedback = visual.Rect(win=win, name='visualFeedback',
width=(0.15, 0.15)[0], height=(0.15, 0.15)[1],
pos=(0, 0.35),fillColor=[0,0,0], fillColorSpace='hsv',
depth=-6.0)
hsvText = visual.TextStim(win=win, name='hsvText',
text=None, font='Arial',
pos=(.4, 0), height=0.03)
instText = visual.TextStim(win=win, name='instText',
text=("Use the sliders to change:\n---hue (top)\n---"
"saturation (right)\n---value (bottom)"),
font='Arial',
pos=(-.3, 0), height=0.03, wrapWidth=.4,
alignText='left', anchorHoriz='right')
quitText = visual.TextStim(win=win, name='quitText',
text='Press escape to quit to continue',
font='Arial',
pos=(0, -.35), height=0.025, depth=-8.0,
wrapWidth=.4)
paletteSize = 400 # in pixels
valRGB = createValue(paletteSize)
colPalRGB = createPalette(paletteSize)
hueSlider.reset()
satSlider.reset()
valSlider.reset()
colorPalette.setSize([paletteSize,paletteSize])
colorPalette.setImage(colPalRGB)
valuePalette.setSize((paletteSize, 20))
valuePalette.setImage(valRGB)
key_resp = keyboard.Keyboard()
while True:
h = hueSlider.getRating() or 0
s = satSlider.getRating() or 0
v = valSlider.getRating() or 0.5
visualFeedback.fillColor = [h,s,v]
hsvText.text = "Hue: {h:.0f}\nSat: {s:.2f}\nVal: {v:.2f}".format(h=h, s=s, v=v)
colorPalette.draw()
valuePalette.draw()
hueSlider.draw()
satSlider.draw()
valSlider.draw()
visualFeedback.draw()
instText.draw()
hsvText.draw()
quitText.draw()
theseKeys = key_resp.getKeys(keyList=['escape'], waitRelease=False)
if len(theseKeys):
theseKeys = theseKeys[0] # at least one key was pressed
# check for quit:
if "escape" == theseKeys:
win.close()
core.quit()
win.flip() | [
"psychopy.visual.Rect",
"psychopy.core.quit",
"numpy.ones",
"psychopy.misc.hsv2rgb",
"psychopy.visual.Slider",
"psychopy.visual.TextStim",
"numpy.linspace",
"numpy.zeros",
"psychopy.hardware.keyboard.Keyboard",
"psychopy.visual.Window",
"psychopy.visual.ImageStim"
] | [((1159, 1222), 'psychopy.visual.Window', 'visual.Window', ([], {'size': '[1920, 1080]', 'fullscr': '(False)', 'units': '"""height"""'}), "(size=[1920, 1080], fullscr=False, units='height')\n", (1172, 1222), False, 'from psychopy import locale_setup, visual, core\n'), ((1239, 1347), 'psychopy.visual.ImageStim', 'visual.ImageStim', ([], {'win': 'win', 'name': '"""colorPalette"""', 'units': '"""pix"""', 'image': 'None', 'mask': 'None', 'texRes': '(64)', 'depth': '(0.0)'}), "(win=win, name='colorPalette', units='pix', image=None,\n mask=None, texRes=64, depth=0.0)\n", (1255, 1347), False, 'from psychopy import locale_setup, visual, core\n'), ((1428, 1518), 'psychopy.visual.ImageStim', 'visual.ImageStim', ([], {'win': 'win', 'name': '"""valuePalette"""', 'units': '"""pix"""', 'pos': '(0, -250)', 'depth': '(-1.0)'}), "(win=win, name='valuePalette', units='pix', pos=(0, -250),\n depth=-1.0)\n", (1444, 1518), False, 'from psychopy import locale_setup, visual, core\n'), ((1565, 1689), 'psychopy.visual.Slider', 'visual.Slider', ([], {'win': 'win', 'name': '"""hueSlider"""', 'size': '(0.37, 0.02)', 'pos': '(0, 0.2)', 'labels': 'None', 'ticks': '(0, 360)', 'style': "['rating']"}), "(win=win, name='hueSlider', size=(0.37, 0.02), pos=(0, 0.2),\n labels=None, ticks=(0, 360), style=['rating'])\n", (1578, 1689), False, 'from psychopy import locale_setup, visual, core\n'), ((1753, 1875), 'psychopy.visual.Slider', 'visual.Slider', ([], {'win': 'win', 'name': '"""satSlider"""', 'size': '(0.02, 0.37)', 'pos': '(0.2, 0)', 'labels': 'None', 'ticks': '(0, 1)', 'style': "['rating']"}), "(win=win, name='satSlider', size=(0.02, 0.37), pos=(0.2, 0),\n labels=None, ticks=(0, 1), style=['rating'])\n", (1766, 1875), False, 'from psychopy import locale_setup, visual, core\n'), ((1939, 2063), 'psychopy.visual.Slider', 'visual.Slider', ([], {'win': 'win', 'name': '"""valSlider"""', 'size': '(0.37, 0.02)', 'pos': '(0, -0.25)', 'labels': 'None', 'ticks': '(0, 1)', 'style': "['rating']"}), "(win=win, name='valSlider', size=(0.37, 0.02), pos=(0, -0.25),\n labels=None, ticks=(0, 1), style=['rating'])\n", (1952, 2063), False, 'from psychopy import locale_setup, visual, core\n'), ((2131, 2301), 'psychopy.visual.Rect', 'visual.Rect', ([], {'win': 'win', 'name': '"""visualFeedback"""', 'width': '(0.15, 0.15)[0]', 'height': '(0.15, 0.15)[1]', 'pos': '(0, 0.35)', 'fillColor': '[0, 0, 0]', 'fillColorSpace': '"""hsv"""', 'depth': '(-6.0)'}), "(win=win, name='visualFeedback', width=(0.15, 0.15)[0], height=(\n 0.15, 0.15)[1], pos=(0, 0.35), fillColor=[0, 0, 0], fillColorSpace=\n 'hsv', depth=-6.0)\n", (2142, 2301), False, 'from psychopy import locale_setup, visual, core\n'), ((2387, 2483), 'psychopy.visual.TextStim', 'visual.TextStim', ([], {'win': 'win', 'name': '"""hsvText"""', 'text': 'None', 'font': '"""Arial"""', 'pos': '(0.4, 0)', 'height': '(0.03)'}), "(win=win, name='hsvText', text=None, font='Arial', pos=(0.4,\n 0), height=0.03)\n", (2402, 2483), False, 'from psychopy import locale_setup, visual, core\n'), ((2543, 2787), 'psychopy.visual.TextStim', 'visual.TextStim', ([], {'win': 'win', 'name': '"""instText"""', 'text': '"""Use the sliders to change:\n---hue (top)\n---saturation (right)\n---value (bottom)"""', 'font': '"""Arial"""', 'pos': '(-0.3, 0)', 'height': '(0.03)', 'wrapWidth': '(0.4)', 'alignText': '"""left"""', 'anchorHoriz': '"""right"""'}), '(win=win, name=\'instText\', text=\n """Use the sliders to change:\n---hue (top)\n---saturation (right)\n---value (bottom)"""\n , font=\'Arial\', pos=(-0.3, 0), height=0.03, wrapWidth=0.4, alignText=\n \'left\', anchorHoriz=\'right\')\n', (2558, 2787), False, 'from psychopy import locale_setup, visual, core\n'), ((2927, 3089), 'psychopy.visual.TextStim', 'visual.TextStim', ([], {'win': 'win', 'name': '"""quitText"""', 'text': '"""Press escape to quit to continue"""', 'font': '"""Arial"""', 'pos': '(0, -0.35)', 'height': '(0.025)', 'depth': '(-8.0)', 'wrapWidth': '(0.4)'}), "(win=win, name='quitText', text=\n 'Press escape to quit to continue', font='Arial', pos=(0, -0.35),\n height=0.025, depth=-8.0, wrapWidth=0.4)\n", (2942, 3089), False, 'from psychopy import locale_setup, visual, core\n'), ((3513, 3532), 'psychopy.hardware.keyboard.Keyboard', 'keyboard.Keyboard', ([], {}), '()\n', (3530, 3532), False, 'from psychopy.hardware import keyboard\n'), ((363, 400), 'numpy.ones', 'np.ones', (['[size, size, 3]'], {'dtype': 'float'}), '([size, size, 3], dtype=float)\n', (370, 400), True, 'import numpy as np\n'), ((435, 476), 'numpy.linspace', 'np.linspace', (['(0)', '(360)', 'size'], {'endpoint': '(False)'}), '(0, 360, size, endpoint=False)\n', (446, 476), True, 'import numpy as np\n'), ((626, 643), 'psychopy.misc.hsv2rgb', 'misc.hsv2rgb', (['hsv'], {}), '(hsv)\n', (638, 643), False, 'from psychopy import misc\n'), ((874, 910), 'numpy.zeros', 'np.zeros', (['[20, size, 3]'], {'dtype': 'float'}), '([20, size, 3], dtype=float)\n', (882, 910), True, 'import numpy as np\n'), ((942, 981), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'size'], {'endpoint': '(False)'}), '(0, 1, size, endpoint=False)\n', (953, 981), True, 'import numpy as np\n'), ((1012, 1029), 'psychopy.misc.hsv2rgb', 'misc.hsv2rgb', (['hsv'], {}), '(hsv)\n', (1024, 1029), False, 'from psychopy import misc\n'), ((550, 589), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'size'], {'endpoint': '(False)'}), '(0, 1, size, endpoint=False)\n', (561, 589), True, 'import numpy as np\n'), ((4249, 4260), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (4258, 4260), False, 'from psychopy import locale_setup, visual, core\n')] |
from scipy.stats import multivariate_normal # 生成多维概率分布的方法
import numpy as np
class GaussianMixture:
def __init__(self, n_components: int = 1, covariance_type: str = 'full',
tol: float = 0.001, reg_covar: float = 1e-06, max_iter: int = 100):
self.n_components = n_components
self.means_ = None
self.covariances_ = None
self.weights_ = None
self.reg_covar = reg_covar # 该参数是为了防止出现奇异协方差矩阵
self.max_iter = max_iter
def fit(self, X_train):
# 获取一些必要的数据信息
n_samples, n_feature = X_train.shape
self.reg_covar = self.reg_covar * np.identity(n_feature)
# 初始化一些必要的参数:均值,协方差,权重
self.means_ = np.random.randint(X_train.min() / 2, X_train.max() /
2, size=(self.n_components, n_feature))
self.covariances_ = np.zeros((self.n_components, n_feature, n_feature))
for k in range(self.n_components):
np.fill_diagonal(self.covariances_[k], 1)
self.weights_ = np.ones(self.n_components) / self.n_components
P_mat = np.zeros((n_samples, self.n_components)) # 概率矩阵
for i in range(self.max_iter):
# 分别对K各类概率
for k in range(self.n_components):
self.covariances_ += self.reg_covar # 防止出现奇异协方差矩阵
g = multivariate_normal(mean=self.means_[k], cov=self.covariances_[k])
#### E-step,计算概率 ####
P_mat[:, k] = self.weights_[k] * g.pdf(X_train) # 计算X在各分布下出现的频率
totol_N = P_mat.sum(axis=1) # 计算各样本出现的总频率
# 如果某一样本在各类中的出现频率和为0,则使用K来代替,相当于分配等概率
totol_N[totol_N == 0] = self.n_components
P_mat /= totol_N.reshape(-1, 1)
#### E-step,计算概率 ####
#### M-step,更新参数 ####
for k in range(self.n_components):
N_k = np.sum(P_mat[:, k], axis=0) # 类出现的频率
self.means_[k] = (1 / N_k) * np.sum(X_train *
P_mat[:, k].reshape(-1, 1), axis=0) # 该类的新均值
self.covariances_[k] = (1 / N_k) * np.dot((P_mat[:, k].reshape(-1, 1)
* (X_train - self.means_[k])).T,
(X_train - self.means_[k])) + self.reg_covar
self.weights_[k] = N_k / n_samples
#### M-step,更新参数 ####
def predict(self, X_test):
#### E-step,计算概率 ####
P_mat = np.zeros((X_test.shape[0], self.n_components))
for k in range(self.n_components):
g = multivariate_normal(mean=self.means_[k], cov=self.covariances_[k])
P_mat[:, k] = self.weights_[k] * g.pdf(X_test)
totol_N = P_mat.sum(axis=1)
totol_N[totol_N == 0] = self.n_components
P_mat /= totol_N.reshape(-1, 1)
#### E-step,计算概率 ####
return np.argmax(P_mat, axis=1)
if __name__ == '__main__':
from sklearn.datasets.samples_generator import make_blobs
from model_selection.train_test_split import train_test_split
X, _ = make_blobs(cluster_std=1.5, random_state=42, n_samples=1000, centers=3)
X = np.dot(X, np.random.RandomState(0).randn(2, 2)) # 生成斜形类簇
import matplotlib.pyplot as plt
plt.clf()
plt.scatter(X[:, 0], X[:, 1], alpha=0.3)
plt.show()
X_train, X_test = train_test_split(X, test_size=0.2)
n_samples, n_feature = X_train.shape
gmm = GaussianMixture(n_components=6)
gmm.fit(X_train)
Y_pred = gmm.predict(X_test)
plt.clf()
plt.scatter(X_test[:, 0], X_test[:, 1], c=Y_pred, alpha=0.3)
plt.show()
| [
"numpy.identity",
"numpy.ones",
"scipy.stats.multivariate_normal",
"matplotlib.pyplot.clf",
"numpy.argmax",
"numpy.fill_diagonal",
"numpy.sum",
"numpy.zeros",
"sklearn.datasets.samples_generator.make_blobs",
"model_selection.train_test_split.train_test_split",
"matplotlib.pyplot.scatter",
"num... | [((3113, 3184), 'sklearn.datasets.samples_generator.make_blobs', 'make_blobs', ([], {'cluster_std': '(1.5)', 'random_state': '(42)', 'n_samples': '(1000)', 'centers': '(3)'}), '(cluster_std=1.5, random_state=42, n_samples=1000, centers=3)\n', (3123, 3184), False, 'from sklearn.datasets.samples_generator import make_blobs\n'), ((3293, 3302), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3300, 3302), True, 'import matplotlib.pyplot as plt\n'), ((3307, 3347), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'alpha': '(0.3)'}), '(X[:, 0], X[:, 1], alpha=0.3)\n', (3318, 3347), True, 'import matplotlib.pyplot as plt\n'), ((3352, 3362), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3360, 3362), True, 'import matplotlib.pyplot as plt\n'), ((3386, 3420), 'model_selection.train_test_split.train_test_split', 'train_test_split', (['X'], {'test_size': '(0.2)'}), '(X, test_size=0.2)\n', (3402, 3420), False, 'from model_selection.train_test_split import train_test_split\n'), ((3564, 3573), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3571, 3573), True, 'import matplotlib.pyplot as plt\n'), ((3578, 3638), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_test[:, 0]', 'X_test[:, 1]'], {'c': 'Y_pred', 'alpha': '(0.3)'}), '(X_test[:, 0], X_test[:, 1], c=Y_pred, alpha=0.3)\n', (3589, 3638), True, 'import matplotlib.pyplot as plt\n'), ((3643, 3653), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3651, 3653), True, 'import matplotlib.pyplot as plt\n'), ((861, 912), 'numpy.zeros', 'np.zeros', (['(self.n_components, n_feature, n_feature)'], {}), '((self.n_components, n_feature, n_feature))\n', (869, 912), True, 'import numpy as np\n'), ((1099, 1139), 'numpy.zeros', 'np.zeros', (['(n_samples, self.n_components)'], {}), '((n_samples, self.n_components))\n', (1107, 1139), True, 'import numpy as np\n'), ((2515, 2561), 'numpy.zeros', 'np.zeros', (['(X_test.shape[0], self.n_components)'], {}), '((X_test.shape[0], self.n_components))\n', (2523, 2561), True, 'import numpy as np\n'), ((2919, 2943), 'numpy.argmax', 'np.argmax', (['P_mat'], {'axis': '(1)'}), '(P_mat, axis=1)\n', (2928, 2943), True, 'import numpy as np\n'), ((622, 644), 'numpy.identity', 'np.identity', (['n_feature'], {}), '(n_feature)\n', (633, 644), True, 'import numpy as np\n'), ((968, 1009), 'numpy.fill_diagonal', 'np.fill_diagonal', (['self.covariances_[k]', '(1)'], {}), '(self.covariances_[k], 1)\n', (984, 1009), True, 'import numpy as np\n'), ((1035, 1061), 'numpy.ones', 'np.ones', (['self.n_components'], {}), '(self.n_components)\n', (1042, 1061), True, 'import numpy as np\n'), ((2621, 2687), 'scipy.stats.multivariate_normal', 'multivariate_normal', ([], {'mean': 'self.means_[k]', 'cov': 'self.covariances_[k]'}), '(mean=self.means_[k], cov=self.covariances_[k])\n', (2640, 2687), False, 'from scipy.stats import multivariate_normal\n'), ((1344, 1410), 'scipy.stats.multivariate_normal', 'multivariate_normal', ([], {'mean': 'self.means_[k]', 'cov': 'self.covariances_[k]'}), '(mean=self.means_[k], cov=self.covariances_[k])\n', (1363, 1410), False, 'from scipy.stats import multivariate_normal\n'), ((1873, 1900), 'numpy.sum', 'np.sum', (['P_mat[:, k]'], {'axis': '(0)'}), '(P_mat[:, k], axis=0)\n', (1879, 1900), True, 'import numpy as np\n'), ((3203, 3227), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (3224, 3227), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
import matplotlib as mb
path = r'D:\data\20200213\100602_awg_sweep'
data_name = path+path[16:]+r'.dat'
data = np.loadtxt(data_name, unpack=True)
n = 701
# print(len(data[0]))
# print(len(data[0])/601.0)
curr = np.array_split(data[0],n)
freq = np.array_split(data[1],n)[0]
absol = np.array_split(data[2],n)
# plt.plot(freq, absol[37])
# plt.plot(freq, absol[-1])
fig, ax = plt.subplots()
# fig.title(path[8:])
img = ax.imshow(np.rot90(absol), aspect='auto',extent=[curr[0][0]/1e-3, curr[-1][0]/1e-3, freq[0]/1e9, freq[-1]/1e9], cmap = 'RdBu')
ax.set_xlabel(r'AWG Voltage (mV)')#, fontsize=24)
ax.set_ylabel('Frequency (GHz)')#, fontsize=18)
# ticks_font = mb.font_manager.FontProperties(family='times new roman', style='normal', size=18, weight='normal', stretch='normal')
# for label in ax.get_xticklabels():
# label.set_fontproperties(ticks_font)
# for label in ax.get_yticklabels():
# label.set_fontproperties(ticks_font)
fig.colorbar(img, ax=ax)
plt.title(path[8:])
plt.show()
# print(data_name)
| [
"numpy.array_split",
"numpy.rot90",
"matplotlib.pyplot.title",
"numpy.loadtxt",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((207, 241), 'numpy.loadtxt', 'np.loadtxt', (['data_name'], {'unpack': '(True)'}), '(data_name, unpack=True)\n', (217, 241), True, 'import numpy as np\n'), ((313, 339), 'numpy.array_split', 'np.array_split', (['data[0]', 'n'], {}), '(data[0], n)\n', (327, 339), True, 'import numpy as np\n'), ((385, 411), 'numpy.array_split', 'np.array_split', (['data[2]', 'n'], {}), '(data[2], n)\n', (399, 411), True, 'import numpy as np\n'), ((484, 498), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (496, 498), True, 'import matplotlib.pyplot as plt\n'), ((1084, 1103), 'matplotlib.pyplot.title', 'plt.title', (['path[8:]'], {}), '(path[8:])\n', (1093, 1103), True, 'import matplotlib.pyplot as plt\n'), ((1105, 1115), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1113, 1115), True, 'import matplotlib.pyplot as plt\n'), ((347, 373), 'numpy.array_split', 'np.array_split', (['data[1]', 'n'], {}), '(data[1], n)\n', (361, 373), True, 'import numpy as np\n'), ((539, 554), 'numpy.rot90', 'np.rot90', (['absol'], {}), '(absol)\n', (547, 554), True, 'import numpy as np\n')] |
import click
from numpy import argmax
from achilles.model import AchillesModel
from achilles.utils import get_dataset_labels
from colorama import Fore
from pathlib import Path
Y = Fore.YELLOW
G = Fore.GREEN
RE = Fore.RESET
@click.command()
@click.option(
"--model",
"-m",
default=None,
help="Model file HD5.",
show_default=True,
metavar="",
)
@click.option(
"--evaluation",
"-e",
default=None,
help="Evaluation file HD5 sampled from AchillesModel.",
show_default=True,
metavar="",
)
@click.option(
"--batch_size",
"-b",
default=500,
help="Evaluation batch size.",
show_default=True,
metavar="",
)
def evaluate(model, evaluation, batch_size):
""" Evaluate a model against a data set from PoreMongo """
achilles = AchillesModel(evaluation)
achilles.load_model(model_file=model)
print(f'{Y}Evaluating model: {G}{Path(model).name}{RE}')
print(f'{Y}Using evaluation data from: {G}{Path(evaluation).name}{RE}')
predicted = achilles.predict_generator(
data_type="data", batch_size=batch_size
)
print(predicted)
predicted = argmax(predicted, -1)
labels = get_dataset_labels(evaluation)
correct_labels = 0
false_labels = 0
for i, label in enumerate(predicted):
if int(label) == int(argmax(labels[i])):
correct_labels += 1
else:
false_labels += 1
print(f'False predictions in evaluation data: '
f'{correct_labels/false_labels:.2f}%')
| [
"achilles.model.AchillesModel",
"pathlib.Path",
"click.option",
"numpy.argmax",
"achilles.utils.get_dataset_labels",
"click.command"
] | [((227, 242), 'click.command', 'click.command', ([], {}), '()\n', (240, 242), False, 'import click\n'), ((244, 346), 'click.option', 'click.option', (['"""--model"""', '"""-m"""'], {'default': 'None', 'help': '"""Model file HD5."""', 'show_default': '(True)', 'metavar': '""""""'}), "('--model', '-m', default=None, help='Model file HD5.',\n show_default=True, metavar='')\n", (256, 346), False, 'import click\n'), ((371, 515), 'click.option', 'click.option', (['"""--evaluation"""', '"""-e"""'], {'default': 'None', 'help': '"""Evaluation file HD5 sampled from AchillesModel."""', 'show_default': '(True)', 'metavar': '""""""'}), "('--evaluation', '-e', default=None, help=\n 'Evaluation file HD5 sampled from AchillesModel.', show_default=True,\n metavar='')\n", (383, 515), False, 'import click\n'), ((535, 649), 'click.option', 'click.option', (['"""--batch_size"""', '"""-b"""'], {'default': '(500)', 'help': '"""Evaluation batch size."""', 'show_default': '(True)', 'metavar': '""""""'}), "('--batch_size', '-b', default=500, help=\n 'Evaluation batch size.', show_default=True, metavar='')\n", (547, 649), False, 'import click\n'), ((797, 822), 'achilles.model.AchillesModel', 'AchillesModel', (['evaluation'], {}), '(evaluation)\n', (810, 822), False, 'from achilles.model import AchillesModel\n'), ((1141, 1162), 'numpy.argmax', 'argmax', (['predicted', '(-1)'], {}), '(predicted, -1)\n', (1147, 1162), False, 'from numpy import argmax\n'), ((1177, 1207), 'achilles.utils.get_dataset_labels', 'get_dataset_labels', (['evaluation'], {}), '(evaluation)\n', (1195, 1207), False, 'from achilles.utils import get_dataset_labels\n'), ((1324, 1341), 'numpy.argmax', 'argmax', (['labels[i]'], {}), '(labels[i])\n', (1330, 1341), False, 'from numpy import argmax\n'), ((903, 914), 'pathlib.Path', 'Path', (['model'], {}), '(model)\n', (907, 914), False, 'from pathlib import Path\n'), ((974, 990), 'pathlib.Path', 'Path', (['evaluation'], {}), '(evaluation)\n', (978, 990), False, 'from pathlib import Path\n')] |
import numpy as np
import numpy.random as random
import matplotlib.pyplot as plt
amplitude = eval( input( "Enter amplitude of impulse noise: " ) )
probability = eval( input( "Enter probability of impulse noise(%): " ) )
t = np.linspace( 0, 1, 200, endpoint = False ) # 定義時間陣列
x = 10 * np.cos( 2 * np.pi * 5 * t ) # 原始訊號
noise = np.zeros( x.size ) # 脈衝雜訊
for i in range( x.size ):
p1 = random.uniform( 0, 1 )
if p1 < probability / 100:
p2 = random.uniform( 0, 1 )
if p2 < 0.5:
noise[i] = amplitude
else:
noise[i] = -amplitude
y = x + noise
plt.figure( 1 )
plt.plot( t, x )
plt.xlabel( 't (second)' )
plt.ylabel( 'Amplitude' )
plt.axis( [ 0, 1, -12, 12 ] )
plt.figure( 2 )
plt.stem( t, noise )
plt.xlabel( 't (second)' )
plt.ylabel( 'Amplitude' )
plt.figure( 3 )
plt.plot( t, y )
plt.xlabel( 't (second)' )
plt.ylabel( 'Amplitude' )
plt.axis( [ 0, 1, -15, 15 ] )
plt.show( ) | [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.stem",
"numpy.cos",
"numpy.random.uniform",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
] | [((226, 264), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(200)'], {'endpoint': '(False)'}), '(0, 1, 200, endpoint=False)\n', (237, 264), True, 'import numpy as np\n'), ((335, 351), 'numpy.zeros', 'np.zeros', (['x.size'], {}), '(x.size)\n', (343, 351), True, 'import numpy as np\n'), ((573, 586), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (583, 586), True, 'import matplotlib.pyplot as plt\n'), ((599, 613), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'x'], {}), '(t, x)\n', (607, 613), True, 'import matplotlib.pyplot as plt\n'), ((616, 640), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t (second)"""'], {}), "('t (second)')\n", (626, 640), True, 'import matplotlib.pyplot as plt\n'), ((643, 666), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (653, 666), True, 'import matplotlib.pyplot as plt\n'), ((669, 694), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 1, -12, 12]'], {}), '([0, 1, -12, 12])\n', (677, 694), True, 'import matplotlib.pyplot as plt\n'), ((700, 713), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (710, 713), True, 'import matplotlib.pyplot as plt\n'), ((716, 734), 'matplotlib.pyplot.stem', 'plt.stem', (['t', 'noise'], {}), '(t, noise)\n', (724, 734), True, 'import matplotlib.pyplot as plt\n'), ((737, 761), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t (second)"""'], {}), "('t (second)')\n", (747, 761), True, 'import matplotlib.pyplot as plt\n'), ((764, 787), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (774, 787), True, 'import matplotlib.pyplot as plt\n'), ((791, 804), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {}), '(3)\n', (801, 804), True, 'import matplotlib.pyplot as plt\n'), ((807, 821), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'y'], {}), '(t, y)\n', (815, 821), True, 'import matplotlib.pyplot as plt\n'), ((824, 848), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t (second)"""'], {}), "('t (second)')\n", (834, 848), True, 'import matplotlib.pyplot as plt\n'), ((851, 874), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (861, 874), True, 'import matplotlib.pyplot as plt\n'), ((877, 902), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 1, -15, 15]'], {}), '([0, 1, -15, 15])\n', (885, 902), True, 'import matplotlib.pyplot as plt\n'), ((908, 918), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (916, 918), True, 'import matplotlib.pyplot as plt\n'), ((288, 313), 'numpy.cos', 'np.cos', (['(2 * np.pi * 5 * t)'], {}), '(2 * np.pi * 5 * t)\n', (294, 313), True, 'import numpy as np\n'), ((399, 419), 'numpy.random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (413, 419), True, 'import numpy.random as random\n'), ((457, 477), 'numpy.random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (471, 477), True, 'import numpy.random as random\n')] |
from collections import namedtuple
import numpy as np
from scipy.interpolate import Akima1DInterpolator as Akima
import openmdao.api as om
"""United States standard atmosphere 1976 tables, data obtained from http://www.digitaldutch.com/atmoscalc/index.htm"""
USatm1976Data = namedtuple("USatm1976Data", ["alt", "T", "P", "rho", "speed_of_sound", "viscosity"])
USatm1976Data.alt = np.array(
[
-1000,
0,
1000,
2000,
3000,
4000,
5000,
6000,
7000,
8000,
9000,
10000,
11000,
12000,
13000,
14000,
15000,
16000,
17000,
18000,
19000,
20000,
21000,
22000,
23000,
24000,
25000,
26000,
27000,
28000,
29000,
30000,
31000,
32000,
33000,
34000,
35000,
36000,
37000,
38000,
39000,
40000,
41000,
42000,
43000,
44000,
45000,
46000,
47000,
48000,
49000,
50000,
51000,
52000,
53000,
54000,
55000,
56000,
57000,
58000,
59000,
60000,
61000,
62000,
63000,
64000,
65000,
66000,
67000,
68000,
69000,
70000,
71000,
72000,
73000,
74000,
75000,
76000,
77000,
78000,
79000,
80000,
81000,
82000,
83000,
84000,
85000,
86000,
87000,
88000,
89000,
90000,
91000,
92000,
93000,
94000,
95000,
96000,
97000,
98000,
99000,
100000,
105000,
110000,
115000,
120000,
125000,
130000,
135000,
140000,
145000,
150000,
]
) # units='ft'
USatm1976Data.T = np.array(
[
522.236,
518.67,
515.104,
511.538,
507.972,
504.405,
500.839,
497.273,
493.707,
490.141,
486.575,
483.008,
479.442,
475.876,
472.31,
468.744,
465.178,
461.611,
458.045,
454.479,
450.913,
447.347,
443.781,
440.214,
436.648,
433.082,
429.516,
425.95,
422.384,
418.818,
415.251,
411.685,
408.119,
404.553,
400.987,
397.421,
393.854,
390.288,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
389.97,
390.18,
390.729,
391.278,
391.826,
392.375,
392.923,
393.472,
394.021,
394.569,
395.118,
395.667,
396.215,
396.764,
397.313,
397.861,
398.41,
398.958,
399.507,
400.056,
400.604,
401.153,
401.702,
402.25,
402.799,
403.348,
403.896,
404.445,
404.994,
405.542,
406.091,
406.639,
407.188,
407.737,
408.285,
408.834,
411.59,
419.271,
426.952,
434.633,
442.314,
449.995,
457.676,
465.357,
473.038,
480.719,
]
) # units='degR'
USatm1976Data.P = np.array(
[
15.2348,
14.6959,
14.1726,
13.6644,
13.1711,
12.6923,
12.2277,
11.777,
11.3398,
10.9159,
10.5049,
10.1065,
9.7204,
9.34636,
8.98405,
8.63321,
8.29354,
7.96478,
7.64665,
7.33889,
7.4123,
6.75343,
6.47523,
6.20638,
5.94664,
5.69578,
5.45355,
5.21974,
4.9941,
4.77644,
4.56651,
4.36413,
4.16906,
3.98112,
3.8001,
3.6258,
3.45803,
3.29661,
3.14191,
2.99447,
2.85395,
2.72003,
2.59239,
2.47073,
2.35479,
2.24429,
2.13897,
2.0386,
1.94293,
1.85176,
1.76486,
1.68204,
1.60311,
1.52788,
1.45618,
1.38785,
1.32272,
1.26065,
1.20149,
1.14511,
1.09137,
1.04016,
0.991347,
0.944827,
0.900489,
0.858232,
0.817958,
0.779578,
0.743039,
0.708261,
0.675156,
0.643641,
0.613638,
0.585073,
0.557875,
0.531976,
0.507313,
0.483825,
0.461455,
0.440148,
0.419853,
0.400519,
0.382101,
0.364553,
0.347833,
0.331902,
0.31672,
0.302253,
0.288464,
0.275323,
0.262796,
0.250856,
0.239473,
0.228621,
0.218275,
0.20841,
0.199003,
0.190032,
0.181478,
0.173319,
0.165537,
0.158114,
0.12582,
0.10041,
0.08046,
0.064729,
0.0522725,
0.0423688,
0.0344637,
0.0281301,
0.0230369,
0.0189267,
]
) # units='psi'
USatm1976Data.rho = np.array(
[
0.00244752,
0.00237717,
0.00230839,
0.00224114,
0.00217539,
0.00211114,
0.00204834,
0.00198698,
0.00192704,
0.0018685,
0.00181132,
0.00175549,
0.00170099,
0.00164779,
0.00159588,
0.00154522,
0.00149581,
0.00144761,
0.00140061,
0.00135479,
0.00131012,
0.00126659,
0.00122417,
0.00118285,
0.0011426,
0.00110341,
0.00106526,
0.00102812,
0.000991984,
0.000956827,
0.000922631,
0.000889378,
0.00085705,
0.000825628,
0.000795096,
0.000765434,
0.000736627,
0.000708657,
0.000675954,
0.000644234,
0.000614002,
0.000585189,
0.000557728,
0.000531556,
0.000506612,
0.000482838,
0.00046018,
0.000438586,
0.000418004,
0.000398389,
0.000379694,
0.000361876,
0.000344894,
0.000328709,
0.000313284,
0.000298583,
0.000284571,
0.000271217,
0.00025849,
0.00024636,
0.000234799,
0.000223781,
0.000213279,
0.000203271,
0.000193732,
0.000184641,
0.000175976,
0.000167629,
0.000159548,
0.000151867,
0.000144566,
0.000137625,
0.000131026,
0.000124753,
0.000118788,
0.000113116,
0.000107722,
0.000102592,
9.77131e-05,
9.30725e-05,
8.86582e-05,
0.000084459,
8.04641e-05,
7.66632e-05,
7.30467e-05,
6.96054e-05,
6.63307e-05,
6.32142e-05,
6.02481e-05,
5.74249e-05,
5.47376e-05,
5.21794e-05,
4.97441e-05,
4.74254e-05,
4.52178e-05,
4.31158e-05,
0.000041114,
3.92078e-05,
3.73923e-05,
3.56632e-05,
3.40162e-05,
3.24473e-05,
2.56472e-05,
2.00926e-05,
1.58108e-05,
1.24948e-05,
9.9151e-06,
7.89937e-06,
6.3177e-06,
5.07154e-06,
4.08586e-06,
3.30323e-06,
]
) # units='slug/ft**3'
USatm1976Data.a = np.array(
[
1120.28,
1116.45,
1112.61,
1108.75,
1104.88,
1100.99,
1097.09,
1093.18,
1089.25,
1085.31,
1081.36,
1077.39,
1073.4,
1069.4,
1065.39,
1061.36,
1057.31,
1053.25,
1049.18,
1045.08,
1040.97,
1036.85,
1032.71,
1028.55,
1024.38,
1020.19,
1015.98,
1011.75,
1007.51,
1003.24,
998.963,
994.664,
990.347,
986.01,
981.655,
977.28,
972.885,
968.471,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.076,
968.337,
969.017,
969.698,
970.377,
971.056,
971.735,
972.413,
973.091,
973.768,
974.445,
975.121,
975.797,
976.472,
977.147,
977.822,
978.496,
979.169,
979.842,
980.515,
981.187,
981.858,
982.53,
983.2,
983.871,
984.541,
985.21,
985.879,
986.547,
987.215,
987.883,
988.55,
989.217,
989.883,
990.549,
991.214,
994.549,
1003.79,
1012.94,
1022.01,
1031,
1039.91,
1048.75,
1057.52,
1066.21,
1074.83,
]
) # units='ft/s'
USatm1976Data.viscosity = np.array(
[
3.81e-07,
3.78e-07,
3.76e-07,
3.74e-07,
3.72e-07,
3.70e-07,
3.68e-07,
3.66e-07,
3.64e-07,
3.62e-07,
3.60e-07,
3.57e-07,
3.55e-07,
3.53e-07,
3.51e-07,
3.49e-07,
3.47e-07,
3.45e-07,
3.42e-07,
3.40e-07,
3.38e-07,
3.36e-07,
3.34e-07,
3.31e-07,
3.29e-07,
3.27e-07,
3.25e-07,
3.22e-07,
3.20e-07,
3.18e-07,
3.16e-07,
3.13e-07,
3.11e-07,
3.09e-07,
3.06e-07,
3.04e-07,
3.02e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
2.99e-07,
3.00e-07,
3.00e-07,
3.00e-07,
3.01e-07,
3.01e-07,
3.01e-07,
3.02e-07,
3.02e-07,
3.03e-07,
3.03e-07,
3.03e-07,
3.04e-07,
3.04e-07,
3.04e-07,
3.05e-07,
3.05e-07,
3.05e-07,
3.06e-07,
3.06e-07,
3.06e-07,
3.07e-07,
3.07e-07,
3.08e-07,
3.08e-07,
3.08e-07,
3.09e-07,
3.09e-07,
3.09e-07,
3.10e-07,
3.10e-07,
3.10e-07,
3.11e-07,
3.11e-07,
3.11e-07,
3.13e-07,
3.18e-07,
3.23e-07,
3.28e-07,
3.33e-07,
3.37e-07,
3.42e-07,
3.47e-07,
3.51e-07,
3.56e-07,
]
) # units='lbf*s/ft**2'
T_interp = Akima(USatm1976Data.alt, USatm1976Data.T)
P_interp = Akima(USatm1976Data.alt, USatm1976Data.P)
rho_interp = Akima(USatm1976Data.alt, USatm1976Data.rho)
a_interp = Akima(USatm1976Data.alt, USatm1976Data.a)
viscosity_interp = Akima(USatm1976Data.alt, USatm1976Data.viscosity)
T_interp_deriv = T_interp.derivative(1)
P_interp_deriv = P_interp.derivative(1)
rho_interp_deriv = rho_interp.derivative(1)
a_interp_deriv = a_interp.derivative(1)
viscosity_interp_deriv = viscosity_interp.derivative(1)
class AtmosComp(om.ExplicitComponent):
def setup(self):
self.add_input("altitude", val=1.0, units="ft")
self.add_input("Mach_number", val=1.0)
self.add_output("T", val=1.0, units="degR")
self.add_output("P", val=1.0, units="psi")
self.add_output("rho", val=1.0, units="slug/ft**3")
self.add_output("speed_of_sound", val=1.0, units="ft/s")
self.add_output("mu", val=1.0, units="lbf*s/ft**2")
self.add_output("v", val=1.0, units="ft/s")
self.declare_partials(["T", "P", "rho", "speed_of_sound", "mu", "v"], "altitude")
self.declare_partials("v", "Mach_number")
def compute(self, inputs, outputs):
outputs["T"] = T_interp(inputs["altitude"])
outputs["P"] = P_interp(inputs["altitude"])
outputs["rho"] = rho_interp(inputs["altitude"])
outputs["speed_of_sound"] = a_interp(inputs["altitude"])
outputs["mu"] = viscosity_interp(inputs["altitude"])
outputs["v"] = outputs["speed_of_sound"] * inputs["Mach_number"]
def compute_partials(self, inputs, partials):
partials["T", "altitude"] = T_interp_deriv(inputs["altitude"])
partials["P", "altitude"] = P_interp_deriv(inputs["altitude"])
partials["rho", "altitude"] = rho_interp_deriv(inputs["altitude"])
partials["speed_of_sound", "altitude"] = a_interp_deriv(inputs["altitude"])
partials["mu", "altitude"] = viscosity_interp_deriv(inputs["altitude"])
partials["v", "altitude"] = a_interp_deriv(inputs["altitude"]) * inputs["Mach_number"]
partials["v", "Mach_number"] = a_interp(inputs["altitude"])
| [
"numpy.array",
"collections.namedtuple",
"scipy.interpolate.Akima1DInterpolator"
] | [((288, 376), 'collections.namedtuple', 'namedtuple', (['"""USatm1976Data"""', "['alt', 'T', 'P', 'rho', 'speed_of_sound', 'viscosity']"], {}), "('USatm1976Data', ['alt', 'T', 'P', 'rho', 'speed_of_sound',\n 'viscosity'])\n", (298, 376), False, 'from collections import namedtuple\n'), ((398, 1244), 'numpy.array', 'np.array', (['[-1000, 0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, \n 11000, 12000, 13000, 14000, 15000, 16000, 17000, 18000, 19000, 20000, \n 21000, 22000, 23000, 24000, 25000, 26000, 27000, 28000, 29000, 30000, \n 31000, 32000, 33000, 34000, 35000, 36000, 37000, 38000, 39000, 40000, \n 41000, 42000, 43000, 44000, 45000, 46000, 47000, 48000, 49000, 50000, \n 51000, 52000, 53000, 54000, 55000, 56000, 57000, 58000, 59000, 60000, \n 61000, 62000, 63000, 64000, 65000, 66000, 67000, 68000, 69000, 70000, \n 71000, 72000, 73000, 74000, 75000, 76000, 77000, 78000, 79000, 80000, \n 81000, 82000, 83000, 84000, 85000, 86000, 87000, 88000, 89000, 90000, \n 91000, 92000, 93000, 94000, 95000, 96000, 97000, 98000, 99000, 100000, \n 105000, 110000, 115000, 120000, 125000, 130000, 135000, 140000, 145000,\n 150000]'], {}), '([-1000, 0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, \n 10000, 11000, 12000, 13000, 14000, 15000, 16000, 17000, 18000, 19000, \n 20000, 21000, 22000, 23000, 24000, 25000, 26000, 27000, 28000, 29000, \n 30000, 31000, 32000, 33000, 34000, 35000, 36000, 37000, 38000, 39000, \n 40000, 41000, 42000, 43000, 44000, 45000, 46000, 47000, 48000, 49000, \n 50000, 51000, 52000, 53000, 54000, 55000, 56000, 57000, 58000, 59000, \n 60000, 61000, 62000, 63000, 64000, 65000, 66000, 67000, 68000, 69000, \n 70000, 71000, 72000, 73000, 74000, 75000, 76000, 77000, 78000, 79000, \n 80000, 81000, 82000, 83000, 84000, 85000, 86000, 87000, 88000, 89000, \n 90000, 91000, 92000, 93000, 94000, 95000, 96000, 97000, 98000, 99000, \n 100000, 105000, 110000, 115000, 120000, 125000, 130000, 135000, 140000,\n 145000, 150000])\n', (406, 1244), True, 'import numpy as np\n'), ((2250, 3291), 'numpy.array', 'np.array', (['[522.236, 518.67, 515.104, 511.538, 507.972, 504.405, 500.839, 497.273, \n 493.707, 490.141, 486.575, 483.008, 479.442, 475.876, 472.31, 468.744, \n 465.178, 461.611, 458.045, 454.479, 450.913, 447.347, 443.781, 440.214,\n 436.648, 433.082, 429.516, 425.95, 422.384, 418.818, 415.251, 411.685, \n 408.119, 404.553, 400.987, 397.421, 393.854, 390.288, 389.97, 389.97, \n 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97,\n 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97,\n 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97,\n 390.18, 390.729, 391.278, 391.826, 392.375, 392.923, 393.472, 394.021, \n 394.569, 395.118, 395.667, 396.215, 396.764, 397.313, 397.861, 398.41, \n 398.958, 399.507, 400.056, 400.604, 401.153, 401.702, 402.25, 402.799, \n 403.348, 403.896, 404.445, 404.994, 405.542, 406.091, 406.639, 407.188,\n 407.737, 408.285, 408.834, 411.59, 419.271, 426.952, 434.633, 442.314, \n 449.995, 457.676, 465.357, 473.038, 480.719]'], {}), '([522.236, 518.67, 515.104, 511.538, 507.972, 504.405, 500.839, \n 497.273, 493.707, 490.141, 486.575, 483.008, 479.442, 475.876, 472.31, \n 468.744, 465.178, 461.611, 458.045, 454.479, 450.913, 447.347, 443.781,\n 440.214, 436.648, 433.082, 429.516, 425.95, 422.384, 418.818, 415.251, \n 411.685, 408.119, 404.553, 400.987, 397.421, 393.854, 390.288, 389.97, \n 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97,\n 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97,\n 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97,\n 389.97, 390.18, 390.729, 391.278, 391.826, 392.375, 392.923, 393.472, \n 394.021, 394.569, 395.118, 395.667, 396.215, 396.764, 397.313, 397.861,\n 398.41, 398.958, 399.507, 400.056, 400.604, 401.153, 401.702, 402.25, \n 402.799, 403.348, 403.896, 404.445, 404.994, 405.542, 406.091, 406.639,\n 407.188, 407.737, 408.285, 408.834, 411.59, 419.271, 426.952, 434.633, \n 442.314, 449.995, 457.676, 465.357, 473.038, 480.719])\n', (2258, 3291), True, 'import numpy as np\n'), ((4294, 5429), 'numpy.array', 'np.array', (['[15.2348, 14.6959, 14.1726, 13.6644, 13.1711, 12.6923, 12.2277, 11.777, \n 11.3398, 10.9159, 10.5049, 10.1065, 9.7204, 9.34636, 8.98405, 8.63321, \n 8.29354, 7.96478, 7.64665, 7.33889, 7.4123, 6.75343, 6.47523, 6.20638, \n 5.94664, 5.69578, 5.45355, 5.21974, 4.9941, 4.77644, 4.56651, 4.36413, \n 4.16906, 3.98112, 3.8001, 3.6258, 3.45803, 3.29661, 3.14191, 2.99447, \n 2.85395, 2.72003, 2.59239, 2.47073, 2.35479, 2.24429, 2.13897, 2.0386, \n 1.94293, 1.85176, 1.76486, 1.68204, 1.60311, 1.52788, 1.45618, 1.38785,\n 1.32272, 1.26065, 1.20149, 1.14511, 1.09137, 1.04016, 0.991347, \n 0.944827, 0.900489, 0.858232, 0.817958, 0.779578, 0.743039, 0.708261, \n 0.675156, 0.643641, 0.613638, 0.585073, 0.557875, 0.531976, 0.507313, \n 0.483825, 0.461455, 0.440148, 0.419853, 0.400519, 0.382101, 0.364553, \n 0.347833, 0.331902, 0.31672, 0.302253, 0.288464, 0.275323, 0.262796, \n 0.250856, 0.239473, 0.228621, 0.218275, 0.20841, 0.199003, 0.190032, \n 0.181478, 0.173319, 0.165537, 0.158114, 0.12582, 0.10041, 0.08046, \n 0.064729, 0.0522725, 0.0423688, 0.0344637, 0.0281301, 0.0230369, 0.0189267]'], {}), '([15.2348, 14.6959, 14.1726, 13.6644, 13.1711, 12.6923, 12.2277, \n 11.777, 11.3398, 10.9159, 10.5049, 10.1065, 9.7204, 9.34636, 8.98405, \n 8.63321, 8.29354, 7.96478, 7.64665, 7.33889, 7.4123, 6.75343, 6.47523, \n 6.20638, 5.94664, 5.69578, 5.45355, 5.21974, 4.9941, 4.77644, 4.56651, \n 4.36413, 4.16906, 3.98112, 3.8001, 3.6258, 3.45803, 3.29661, 3.14191, \n 2.99447, 2.85395, 2.72003, 2.59239, 2.47073, 2.35479, 2.24429, 2.13897,\n 2.0386, 1.94293, 1.85176, 1.76486, 1.68204, 1.60311, 1.52788, 1.45618, \n 1.38785, 1.32272, 1.26065, 1.20149, 1.14511, 1.09137, 1.04016, 0.991347,\n 0.944827, 0.900489, 0.858232, 0.817958, 0.779578, 0.743039, 0.708261, \n 0.675156, 0.643641, 0.613638, 0.585073, 0.557875, 0.531976, 0.507313, \n 0.483825, 0.461455, 0.440148, 0.419853, 0.400519, 0.382101, 0.364553, \n 0.347833, 0.331902, 0.31672, 0.302253, 0.288464, 0.275323, 0.262796, \n 0.250856, 0.239473, 0.228621, 0.218275, 0.20841, 0.199003, 0.190032, \n 0.181478, 0.173319, 0.165537, 0.158114, 0.12582, 0.10041, 0.08046, \n 0.064729, 0.0522725, 0.0423688, 0.0344637, 0.0281301, 0.0230369, 0.0189267]\n )\n', (4302, 5429), True, 'import numpy as np\n'), ((6419, 7949), 'numpy.array', 'np.array', (['[0.00244752, 0.00237717, 0.00230839, 0.00224114, 0.00217539, 0.00211114, \n 0.00204834, 0.00198698, 0.00192704, 0.0018685, 0.00181132, 0.00175549, \n 0.00170099, 0.00164779, 0.00159588, 0.00154522, 0.00149581, 0.00144761,\n 0.00140061, 0.00135479, 0.00131012, 0.00126659, 0.00122417, 0.00118285,\n 0.0011426, 0.00110341, 0.00106526, 0.00102812, 0.000991984, 0.000956827,\n 0.000922631, 0.000889378, 0.00085705, 0.000825628, 0.000795096, \n 0.000765434, 0.000736627, 0.000708657, 0.000675954, 0.000644234, \n 0.000614002, 0.000585189, 0.000557728, 0.000531556, 0.000506612, \n 0.000482838, 0.00046018, 0.000438586, 0.000418004, 0.000398389, \n 0.000379694, 0.000361876, 0.000344894, 0.000328709, 0.000313284, \n 0.000298583, 0.000284571, 0.000271217, 0.00025849, 0.00024636, \n 0.000234799, 0.000223781, 0.000213279, 0.000203271, 0.000193732, \n 0.000184641, 0.000175976, 0.000167629, 0.000159548, 0.000151867, \n 0.000144566, 0.000137625, 0.000131026, 0.000124753, 0.000118788, \n 0.000113116, 0.000107722, 0.000102592, 9.77131e-05, 9.30725e-05, \n 8.86582e-05, 8.4459e-05, 8.04641e-05, 7.66632e-05, 7.30467e-05, \n 6.96054e-05, 6.63307e-05, 6.32142e-05, 6.02481e-05, 5.74249e-05, \n 5.47376e-05, 5.21794e-05, 4.97441e-05, 4.74254e-05, 4.52178e-05, \n 4.31158e-05, 4.1114e-05, 3.92078e-05, 3.73923e-05, 3.56632e-05, \n 3.40162e-05, 3.24473e-05, 2.56472e-05, 2.00926e-05, 1.58108e-05, \n 1.24948e-05, 9.9151e-06, 7.89937e-06, 6.3177e-06, 5.07154e-06, \n 4.08586e-06, 3.30323e-06]'], {}), '([0.00244752, 0.00237717, 0.00230839, 0.00224114, 0.00217539, \n 0.00211114, 0.00204834, 0.00198698, 0.00192704, 0.0018685, 0.00181132, \n 0.00175549, 0.00170099, 0.00164779, 0.00159588, 0.00154522, 0.00149581,\n 0.00144761, 0.00140061, 0.00135479, 0.00131012, 0.00126659, 0.00122417,\n 0.00118285, 0.0011426, 0.00110341, 0.00106526, 0.00102812, 0.000991984,\n 0.000956827, 0.000922631, 0.000889378, 0.00085705, 0.000825628, \n 0.000795096, 0.000765434, 0.000736627, 0.000708657, 0.000675954, \n 0.000644234, 0.000614002, 0.000585189, 0.000557728, 0.000531556, \n 0.000506612, 0.000482838, 0.00046018, 0.000438586, 0.000418004, \n 0.000398389, 0.000379694, 0.000361876, 0.000344894, 0.000328709, \n 0.000313284, 0.000298583, 0.000284571, 0.000271217, 0.00025849, \n 0.00024636, 0.000234799, 0.000223781, 0.000213279, 0.000203271, \n 0.000193732, 0.000184641, 0.000175976, 0.000167629, 0.000159548, \n 0.000151867, 0.000144566, 0.000137625, 0.000131026, 0.000124753, \n 0.000118788, 0.000113116, 0.000107722, 0.000102592, 9.77131e-05, \n 9.30725e-05, 8.86582e-05, 8.4459e-05, 8.04641e-05, 7.66632e-05, \n 7.30467e-05, 6.96054e-05, 6.63307e-05, 6.32142e-05, 6.02481e-05, \n 5.74249e-05, 5.47376e-05, 5.21794e-05, 4.97441e-05, 4.74254e-05, \n 4.52178e-05, 4.31158e-05, 4.1114e-05, 3.92078e-05, 3.73923e-05, \n 3.56632e-05, 3.40162e-05, 3.24473e-05, 2.56472e-05, 2.00926e-05, \n 1.58108e-05, 1.24948e-05, 9.9151e-06, 7.89937e-06, 6.3177e-06, \n 5.07154e-06, 4.08586e-06, 3.30323e-06])\n', (6427, 7949), True, 'import numpy as np\n'), ((8917, 9985), 'numpy.array', 'np.array', (['[1120.28, 1116.45, 1112.61, 1108.75, 1104.88, 1100.99, 1097.09, 1093.18, \n 1089.25, 1085.31, 1081.36, 1077.39, 1073.4, 1069.4, 1065.39, 1061.36, \n 1057.31, 1053.25, 1049.18, 1045.08, 1040.97, 1036.85, 1032.71, 1028.55,\n 1024.38, 1020.19, 1015.98, 1011.75, 1007.51, 1003.24, 998.963, 994.664,\n 990.347, 986.01, 981.655, 977.28, 972.885, 968.471, 968.076, 968.076, \n 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076,\n 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076,\n 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076,\n 968.076, 968.076, 968.076, 968.337, 969.017, 969.698, 970.377, 971.056,\n 971.735, 972.413, 973.091, 973.768, 974.445, 975.121, 975.797, 976.472,\n 977.147, 977.822, 978.496, 979.169, 979.842, 980.515, 981.187, 981.858,\n 982.53, 983.2, 983.871, 984.541, 985.21, 985.879, 986.547, 987.215, \n 987.883, 988.55, 989.217, 989.883, 990.549, 991.214, 994.549, 1003.79, \n 1012.94, 1022.01, 1031, 1039.91, 1048.75, 1057.52, 1066.21, 1074.83]'], {}), '([1120.28, 1116.45, 1112.61, 1108.75, 1104.88, 1100.99, 1097.09, \n 1093.18, 1089.25, 1085.31, 1081.36, 1077.39, 1073.4, 1069.4, 1065.39, \n 1061.36, 1057.31, 1053.25, 1049.18, 1045.08, 1040.97, 1036.85, 1032.71,\n 1028.55, 1024.38, 1020.19, 1015.98, 1011.75, 1007.51, 1003.24, 998.963,\n 994.664, 990.347, 986.01, 981.655, 977.28, 972.885, 968.471, 968.076, \n 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076,\n 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076,\n 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076,\n 968.076, 968.076, 968.076, 968.076, 968.337, 969.017, 969.698, 970.377,\n 971.056, 971.735, 972.413, 973.091, 973.768, 974.445, 975.121, 975.797,\n 976.472, 977.147, 977.822, 978.496, 979.169, 979.842, 980.515, 981.187,\n 981.858, 982.53, 983.2, 983.871, 984.541, 985.21, 985.879, 986.547, \n 987.215, 987.883, 988.55, 989.217, 989.883, 990.549, 991.214, 994.549, \n 1003.79, 1012.94, 1022.01, 1031, 1039.91, 1048.75, 1057.52, 1066.21, \n 1074.83])\n', (8925, 9985), True, 'import numpy as np\n'), ((10993, 12187), 'numpy.array', 'np.array', (['[3.81e-07, 3.78e-07, 3.76e-07, 3.74e-07, 3.72e-07, 3.7e-07, 3.68e-07, \n 3.66e-07, 3.64e-07, 3.62e-07, 3.6e-07, 3.57e-07, 3.55e-07, 3.53e-07, \n 3.51e-07, 3.49e-07, 3.47e-07, 3.45e-07, 3.42e-07, 3.4e-07, 3.38e-07, \n 3.36e-07, 3.34e-07, 3.31e-07, 3.29e-07, 3.27e-07, 3.25e-07, 3.22e-07, \n 3.2e-07, 3.18e-07, 3.16e-07, 3.13e-07, 3.11e-07, 3.09e-07, 3.06e-07, \n 3.04e-07, 3.02e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, \n 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, \n 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, \n 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, \n 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 3e-07, 3e-07, 3e-07, \n 3.01e-07, 3.01e-07, 3.01e-07, 3.02e-07, 3.02e-07, 3.03e-07, 3.03e-07, \n 3.03e-07, 3.04e-07, 3.04e-07, 3.04e-07, 3.05e-07, 3.05e-07, 3.05e-07, \n 3.06e-07, 3.06e-07, 3.06e-07, 3.07e-07, 3.07e-07, 3.08e-07, 3.08e-07, \n 3.08e-07, 3.09e-07, 3.09e-07, 3.09e-07, 3.1e-07, 3.1e-07, 3.1e-07, \n 3.11e-07, 3.11e-07, 3.11e-07, 3.13e-07, 3.18e-07, 3.23e-07, 3.28e-07, \n 3.33e-07, 3.37e-07, 3.42e-07, 3.47e-07, 3.51e-07, 3.56e-07]'], {}), '([3.81e-07, 3.78e-07, 3.76e-07, 3.74e-07, 3.72e-07, 3.7e-07, \n 3.68e-07, 3.66e-07, 3.64e-07, 3.62e-07, 3.6e-07, 3.57e-07, 3.55e-07, \n 3.53e-07, 3.51e-07, 3.49e-07, 3.47e-07, 3.45e-07, 3.42e-07, 3.4e-07, \n 3.38e-07, 3.36e-07, 3.34e-07, 3.31e-07, 3.29e-07, 3.27e-07, 3.25e-07, \n 3.22e-07, 3.2e-07, 3.18e-07, 3.16e-07, 3.13e-07, 3.11e-07, 3.09e-07, \n 3.06e-07, 3.04e-07, 3.02e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, \n 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, \n 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, \n 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, \n 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 3e-07, \n 3e-07, 3e-07, 3.01e-07, 3.01e-07, 3.01e-07, 3.02e-07, 3.02e-07, \n 3.03e-07, 3.03e-07, 3.03e-07, 3.04e-07, 3.04e-07, 3.04e-07, 3.05e-07, \n 3.05e-07, 3.05e-07, 3.06e-07, 3.06e-07, 3.06e-07, 3.07e-07, 3.07e-07, \n 3.08e-07, 3.08e-07, 3.08e-07, 3.09e-07, 3.09e-07, 3.09e-07, 3.1e-07, \n 3.1e-07, 3.1e-07, 3.11e-07, 3.11e-07, 3.11e-07, 3.13e-07, 3.18e-07, \n 3.23e-07, 3.28e-07, 3.33e-07, 3.37e-07, 3.42e-07, 3.47e-07, 3.51e-07, \n 3.56e-07])\n', (11001, 12187), True, 'import numpy as np\n'), ((13185, 13226), 'scipy.interpolate.Akima1DInterpolator', 'Akima', (['USatm1976Data.alt', 'USatm1976Data.T'], {}), '(USatm1976Data.alt, USatm1976Data.T)\n', (13190, 13226), True, 'from scipy.interpolate import Akima1DInterpolator as Akima\n'), ((13239, 13280), 'scipy.interpolate.Akima1DInterpolator', 'Akima', (['USatm1976Data.alt', 'USatm1976Data.P'], {}), '(USatm1976Data.alt, USatm1976Data.P)\n', (13244, 13280), True, 'from scipy.interpolate import Akima1DInterpolator as Akima\n'), ((13295, 13338), 'scipy.interpolate.Akima1DInterpolator', 'Akima', (['USatm1976Data.alt', 'USatm1976Data.rho'], {}), '(USatm1976Data.alt, USatm1976Data.rho)\n', (13300, 13338), True, 'from scipy.interpolate import Akima1DInterpolator as Akima\n'), ((13351, 13392), 'scipy.interpolate.Akima1DInterpolator', 'Akima', (['USatm1976Data.alt', 'USatm1976Data.a'], {}), '(USatm1976Data.alt, USatm1976Data.a)\n', (13356, 13392), True, 'from scipy.interpolate import Akima1DInterpolator as Akima\n'), ((13413, 13462), 'scipy.interpolate.Akima1DInterpolator', 'Akima', (['USatm1976Data.alt', 'USatm1976Data.viscosity'], {}), '(USatm1976Data.alt, USatm1976Data.viscosity)\n', (13418, 13462), True, 'from scipy.interpolate import Akima1DInterpolator as Akima\n')] |
import os
import string
from ast import literal_eval
import numpy as np
import operator
from random import shuffle
import gc
import EmbeddingsManager as em
from nltk import sent_tokenize
import re
import random
from collections import OrderedDict
SOS_TOKEN = 0 # Start of sentence token
EOS_TOKEN = 1 # End of sentence token and padding
UNK_TOKEN = 2 # Unknown word token
class CorpusProcessor(object):
def __init__(self, movie_lines_file, movie_conversations_file):
self.movie_lines_file = movie_lines_file
self.movie_conversations_file = movie_conversations_file
# RAW CORPUS operations ------------------------------------------------
def get_hashed_movie_lines(self):
line2content = {}
max_elements_per_line = 5
delimitator = ' +++$+++ '
line_code_index = 0
line_content_index = -1
with open(self.movie_lines_file, 'r', encoding='iso-8859-1') as f:
while True:
line = f.readline()
if not line:
break
elements = line.split(delimitator)
if len(elements) < max_elements_per_line:
continue
line_code = elements[line_code_index]
line_content = elements[line_content_index]
line2content[line_code] = line_content
return line2content
def get_coded_conversations(self):
coded_conversations = []
max_elements_per_line = 4
delimitator = ' +++$+++ '
coded_conversation_index = -1
with open(self.movie_conversations_file, 'r', encoding='iso-8859-1') as f:
while True:
line = f.readline()
if not line:
break
elements = line.split(delimitator)
if len(elements) < max_elements_per_line:
continue
coded_conversation_string = elements[coded_conversation_index]
coded_conversation_list = literal_eval(coded_conversation_string)
coded_conversations.append(coded_conversation_list)
return coded_conversations
def get_question_answer_set(self, line2content, coded_conversations):
questions = []
answers = []
for conversation in coded_conversations:
if len(conversation) < 2:
continue
for index in range(0, len(conversation), 2):
if index + 1 < len(conversation):
question_code = conversation[index]
answer_code = conversation[index + 1]
questions.append(line2content[question_code])
answers.append(line2content[answer_code])
return questions, answers
def save_clean_corpus(self, questions, answers, path):
train_data_filepath = os.path.join(path, 'cornell_train_data.txt')
train_data = open(train_data_filepath, 'w', encoding='utf8')
for i in range(len(questions)):
train_data.write(questions[i])
train_data.write(answers[i])
train_data.close()
def compute_clean_corpus(self, path):
line2content = self.get_hashed_movie_lines()
coded_conversations = self.get_coded_conversations()
questions, answers = self.get_question_answer_set(line2content, coded_conversations)
self.questions = questions
self.answers = answers
self.save_clean_corpus(questions, answers, path)
# -------------------------------------------------------------------------------------------------
# CLEAN CORPUS operations -------------------------------------------------------------------------
def load_clean_corpus(self, path, file_names):
questions = []
answers = []
for file_name in file_names:
file_path = os.path.join(path, file_name)
file = open(file_path, 'r', encoding='utf8')
for index, line in enumerate(file):
if index % 2 == 0:
questions.append(line)
else:
answers.append(line)
file.close()
self.questions = questions
self.answers = answers
def get_word_sentences(self):
word_questions = []
word_answers = []
vocabulary = {}
remove_punctuation = str.maketrans('', '', string.punctuation)
remove_digits = str.maketrans('', '', string.digits)
for index in range(len(self.questions)):
self.questions[index] = self.questions[index].lower().translate(remove_digits).translate(remove_punctuation)
q_words = self.questions[index].split()
self.answers[index] = self.answers[index].lower().translate(remove_digits).translate(remove_punctuation)
a_words = self.answers[index].split()
if len(q_words) > 0 and len(a_words) > 0:
word_questions.append([w for w in q_words])
word_answers.append([w for w in a_words])
for w in q_words:
if w not in vocabulary:
vocabulary[w] = 1
else:
vocabulary[w] += 1
for w in a_words:
if w not in vocabulary:
vocabulary[w] = 1
else:
vocabulary[w] += 1
self.word_questions = word_questions
self.word_answers = word_answers
self.vocabulary = vocabulary
self.sorted_vocabulary = sorted(vocabulary.items(), key=operator.itemgetter(1), reverse=True)
def filter_unk_sentences(self, sentence, sentence_max_len):
num_unk_tokens = 0
for token in sentence:
if token == UNK_TOKEN:
num_unk_tokens += 1
sentence_len = min(len(sentence), sentence_max_len)
if 100.0*num_unk_tokens/sentence_len > 20:
return False
return True
def get_index_sentences(self, embedding_manager, sentence_max_len):
word_questions = []
word_answers = []
for index in range(len(self.word_questions)):
q_converted_sentence = embedding_manager.sentence_to_word_indexes(self.word_questions[index])
a_converted_sentence = embedding_manager.sentence_to_word_indexes(self.word_answers[index])
if len(q_converted_sentence) > 0 and len(a_converted_sentence) > 0 \
and self.filter_unk_sentences(q_converted_sentence, sentence_max_len) \
and self.filter_unk_sentences(a_converted_sentence, sentence_max_len):
word_questions.append(q_converted_sentence)
word_answers.append(a_converted_sentence)
self.word_questions = word_questions
self.word_answers = word_answers
# Free unused memory
gc.collect()
# -----------------------------------------------------------------------------------------------------------
# Training methods ------------------------------------------------------------------------------------------
def get_train_batch(self, position, batch_size, max_len):
left = position
if position + batch_size - 1 >= len(self.word_questions):
left = len(self.word_questions) - batch_size
def truncate(sentence):
if max_len and len(sentence) > max_len:
return sentence[:max_len]
else:
return sentence
questions = [truncate(self.word_questions[i]) for i in range(left, left + batch_size)]
questions_len = [len(question) for question in questions]
answers = [truncate(self.word_answers[i]) for i in range(left, left + batch_size)]
answers_len =[len(answer) for answer in answers]
questions_len = np.array(questions_len)
answers_len = np.array(answers_len)
maxlen_questions = np.max(questions_len)
maxlen_answers = np.max(answers_len)
padded_questions = np.ones((batch_size, maxlen_questions)).astype('int32') * em.EOS_TOKEN
padded_answers = np.ones((batch_size, maxlen_answers)).astype('int32') * em.EOS_TOKEN
for index, [question, answer] in enumerate(zip(questions, answers)):
padded_questions[index, :questions_len[index]] = question
padded_answers[index, :answers_len[index]] = answer
return padded_questions, questions_len, padded_answers, answers_len
def shuffle_train_data(self):
cumulated_data = list(zip(self.word_questions, self.word_answers))
shuffle(cumulated_data)
self.word_questions = [sentence[0] for sentence in cumulated_data]
self.word_answers = [sentence[1] for sentence in cumulated_data]
# Einstein wikipedia corpus processing ----------------------------------------------
def split_into_sentences(self, path, file_name, data_file_name):
impersonal2personal = [("<NAME> is", 'I am'),
("<NAME>'s", 'my'),
("<NAME>", 'I'),
('Albert is', 'I am'),
('Einstein is', 'I am'),
("Albert's", 'my'),
("Einstein's", 'my'),
('Albert', 'I'),
('Einstein', 'I'),
('he is', 'I am'),
('He is', 'I am'),
('he', 'I'),
('He', 'I'),
('his', 'my'),
('His', 'My'),
('him', 'me'),
('Him', 'Me'),
('theirs', 'ours'),
('Theirs', 'Ours'),
('their', 'our'),
('Their', 'Our'),
]
file_path = os.path.join(path, file_name)
file = open(file_path, 'r', encoding='utf8')
data_file_path = os.path.join(path, data_file_name)
datafile = open(data_file_path, 'w', encoding='utf8')
for index, line in enumerate(file):
sentences = sent_tokenize(line)
for index in range(len(sentences)):
sentences[index] = re.sub("\[\d+\]", "", sentences[index])
for key in impersonal2personal:
sentences[index] = re.sub(r"\b%s\b" % key[0], key[1], sentences[index])
if len(sentences[index]) > 5:
datafile.write(sentences[index])
datafile.write('\n')
file.close()
datafile.close()
def get_personal_answers(self, path, file_name, embedding_manager):
file_path = os.path.join(path, file_name)
file = open(file_path, 'r', encoding='utf8')
personal_answers = []
for index, line in enumerate(file):
personal_answers.append(line)
self.personal_answers = personal_answers
word_personal_answers = []
remove_punctuation = str.maketrans('', '', string.punctuation)
remove_digits = str.maketrans('', '', string.digits)
for index in range(len(self.personal_answers)):
formatted = self.personal_answers[index].lower().translate(remove_digits).translate(remove_punctuation)
personal_words = formatted.split()
word_personal_answers.append([w for w in personal_words])
self.word_personal_answers = []
for index in range(len(word_personal_answers)):
converted_sentence = embedding_manager.sentence_to_word_indexes(word_personal_answers[index])
if len(converted_sentence) > 0:
self.word_personal_answers.append(converted_sentence)
def get_random_batch(self, position, batch_size, max_len):
left = position
if position + batch_size - 1 >= len(self.word_questions):
left = len(self.word_questions) - batch_size
def truncate(sentence):
if len(sentence) > max_len:
return sentence[:max_len]
else:
return sentence
negative_answers = []
for i in range(batch_size):
while True:
negative_index = random.randrange(len(self.word_answers))
if negative_index < left or negative_index >= left + batch_size:
break
negative_answers.append(truncate(self.word_answers[negative_index]))
negative_answers_len = [len(answer) for answer in negative_answers]
negative_answers_len = np.array(negative_answers_len)
maxlen_answers = np.max(negative_answers_len)
padded_answers = np.ones((batch_size, maxlen_answers)).astype('int32') * em.EOS_TOKEN
for index, answer in enumerate(negative_answers):
padded_answers[index, :negative_answers_len[index]] = answer
return padded_answers, negative_answers_len
def get_personal_answers_batch(self):
batch_size = len(self.word_personal_answers)
answers = [self.word_personal_answers[i] for i in range(batch_size)]
answers_len = [len(answer) for answer in answers]
answers_len = np.array(answers_len)
maxlen_answers = np.max(answers_len)
padded_answers = np.ones((batch_size, maxlen_answers)).astype('int32') * em.EOS_TOKEN
for index, answer in enumerate(answers):
padded_answers[index, :answers_len[index]] = answer
return batch_size, padded_answers, answers_len
def process_single_question(self, question, embedding_manager, max_len):
remove_punctuation = str.maketrans('', '', string.punctuation)
remove_digits = str.maketrans('', '', string.digits)
formatted = question.lower().translate(remove_digits).translate(remove_punctuation)
question_words = formatted.split()
question_indexes = embedding_manager.sentence_to_word_indexes(question_words)
def truncate(sentence):
if max_len and len(sentence) > max_len:
return sentence[:max_len]
else:
return sentence
questions = [truncate(question_indexes)]
questions_len = [len(question) for question in questions]
questions_len = np.array(questions_len)
maxlen_questions = np.max(questions_len)
padded_questions = np.ones((1, maxlen_questions)).astype('int32') * em.EOS_TOKEN
for index, question in enumerate(questions):
padded_questions[index, :questions_len[index]] = question
return padded_questions, questions_len
if __name__ == '__main__':
processor = CorpusProcessor("movie_lines.txt", "movie_conversations.txt")
#processor.compute_clean_corpus("./")
processor.split_into_sentences("./train_data/", "raw_einstein.txt", "clean_einstein.txt")
processor.load_clean_corpus("./train_data/", ["cornell_train_data.txt", "twitter_train_data.txt"])
processor.get_word_sentences()
embedding_manager = em.EmbeddingsManager("./embeddings/glove.6B.50d.txt", 50, 10000, processor.vocabulary, processor.sorted_vocabulary)
embedding_manager.load_embeddings()
processor.get_index_sentences(embedding_manager, 10)
processor.get_personal_answers("./train_data/", "clean_einstein.txt", embedding_manager)
| [
"random.shuffle",
"numpy.ones",
"os.path.join",
"EmbeddingsManager.EmbeddingsManager",
"numpy.max",
"ast.literal_eval",
"numpy.array",
"nltk.sent_tokenize",
"gc.collect",
"re.sub",
"operator.itemgetter"
] | [((15227, 15347), 'EmbeddingsManager.EmbeddingsManager', 'em.EmbeddingsManager', (['"""./embeddings/glove.6B.50d.txt"""', '(50)', '(10000)', 'processor.vocabulary', 'processor.sorted_vocabulary'], {}), "('./embeddings/glove.6B.50d.txt', 50, 10000, processor.\n vocabulary, processor.sorted_vocabulary)\n", (15247, 15347), True, 'import EmbeddingsManager as em\n'), ((2882, 2926), 'os.path.join', 'os.path.join', (['path', '"""cornell_train_data.txt"""'], {}), "(path, 'cornell_train_data.txt')\n", (2894, 2926), False, 'import os\n'), ((6929, 6941), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6939, 6941), False, 'import gc\n'), ((7893, 7916), 'numpy.array', 'np.array', (['questions_len'], {}), '(questions_len)\n', (7901, 7916), True, 'import numpy as np\n'), ((7939, 7960), 'numpy.array', 'np.array', (['answers_len'], {}), '(answers_len)\n', (7947, 7960), True, 'import numpy as np\n'), ((7989, 8010), 'numpy.max', 'np.max', (['questions_len'], {}), '(questions_len)\n', (7995, 8010), True, 'import numpy as np\n'), ((8036, 8055), 'numpy.max', 'np.max', (['answers_len'], {}), '(answers_len)\n', (8042, 8055), True, 'import numpy as np\n'), ((8656, 8679), 'random.shuffle', 'shuffle', (['cumulated_data'], {}), '(cumulated_data)\n', (8663, 8679), False, 'from random import shuffle\n'), ((10080, 10109), 'os.path.join', 'os.path.join', (['path', 'file_name'], {}), '(path, file_name)\n', (10092, 10109), False, 'import os\n'), ((10189, 10223), 'os.path.join', 'os.path.join', (['path', 'data_file_name'], {}), '(path, data_file_name)\n', (10201, 10223), False, 'import os\n'), ((10921, 10950), 'os.path.join', 'os.path.join', (['path', 'file_name'], {}), '(path, file_name)\n', (10933, 10950), False, 'import os\n'), ((12786, 12816), 'numpy.array', 'np.array', (['negative_answers_len'], {}), '(negative_answers_len)\n', (12794, 12816), True, 'import numpy as np\n'), ((12842, 12870), 'numpy.max', 'np.max', (['negative_answers_len'], {}), '(negative_answers_len)\n', (12848, 12870), True, 'import numpy as np\n'), ((13405, 13426), 'numpy.array', 'np.array', (['answers_len'], {}), '(answers_len)\n', (13413, 13426), True, 'import numpy as np\n'), ((13453, 13472), 'numpy.max', 'np.max', (['answers_len'], {}), '(answers_len)\n', (13459, 13472), True, 'import numpy as np\n'), ((14487, 14510), 'numpy.array', 'np.array', (['questions_len'], {}), '(questions_len)\n', (14495, 14510), True, 'import numpy as np\n'), ((14538, 14559), 'numpy.max', 'np.max', (['questions_len'], {}), '(questions_len)\n', (14544, 14559), True, 'import numpy as np\n'), ((3891, 3920), 'os.path.join', 'os.path.join', (['path', 'file_name'], {}), '(path, file_name)\n', (3903, 3920), False, 'import os\n'), ((10355, 10374), 'nltk.sent_tokenize', 'sent_tokenize', (['line'], {}), '(line)\n', (10368, 10374), False, 'from nltk import sent_tokenize\n'), ((2037, 2076), 'ast.literal_eval', 'literal_eval', (['coded_conversation_string'], {}), '(coded_conversation_string)\n', (2049, 2076), False, 'from ast import literal_eval\n'), ((5639, 5661), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (5658, 5661), False, 'import operator\n'), ((10459, 10501), 're.sub', 're.sub', (['"""\\\\[\\\\d+\\\\]"""', '""""""', 'sentences[index]'], {}), "('\\\\[\\\\d+\\\\]', '', sentences[index])\n", (10465, 10501), False, 'import re\n'), ((8084, 8123), 'numpy.ones', 'np.ones', (['(batch_size, maxlen_questions)'], {}), '((batch_size, maxlen_questions))\n', (8091, 8123), True, 'import numpy as np\n'), ((8180, 8217), 'numpy.ones', 'np.ones', (['(batch_size, maxlen_answers)'], {}), '((batch_size, maxlen_answers))\n', (8187, 8217), True, 'import numpy as np\n'), ((10587, 10640), 're.sub', 're.sub', (["('\\\\b%s\\\\b' % key[0])", 'key[1]', 'sentences[index]'], {}), "('\\\\b%s\\\\b' % key[0], key[1], sentences[index])\n", (10593, 10640), False, 'import re\n'), ((12897, 12934), 'numpy.ones', 'np.ones', (['(batch_size, maxlen_answers)'], {}), '((batch_size, maxlen_answers))\n', (12904, 12934), True, 'import numpy as np\n'), ((13498, 13535), 'numpy.ones', 'np.ones', (['(batch_size, maxlen_answers)'], {}), '((batch_size, maxlen_answers))\n', (13505, 13535), True, 'import numpy as np\n'), ((14587, 14617), 'numpy.ones', 'np.ones', (['(1, maxlen_questions)'], {}), '((1, maxlen_questions))\n', (14594, 14617), True, 'import numpy as np\n')] |
import os
import numpy as np
import cv2
import sys
import argparse
import pathlib
import glob
import time
sys.path.append('../../')
from util import env, inverse, project_so, make_dirs
from mesh import Mesh
import scipy.io as sio
"""
Draw a 3 by n point cloud using open3d library
"""
def draw(vertex):
import open3d
pcd = open3d.PointCloud()
pcd.points = open3d.Vector3dVector(vertex.T)
open3d.draw_geometries([pcd])
data_path = env()
print('home directory = %s' % data_path)
PATH_POSE = '%s/dataset/redwood/{}/{}.xf' % data_path
PATH_DEPTH = '%s/dataset/redwood/{}/{}.png' % data_path
PATH_MAT = '%s/processed_dataset/redwood/{}/{}.mat' % data_path
parser = argparse.ArgumentParser(description='Process Redwood Dataset')
parser.add_argument('--shapeid', type=str)
args = parser.parse_args()
def getData(shapeid):
depth_paths = []
poses = []
pose_paths = []
frames = glob.glob(PATH_DEPTH.format(shapeid, '*'))
frames.sort()
for i, frame in enumerate(frames):
frameid = frame.split('/')[-1].split('.')[0]
depth_path = PATH_DEPTH.format(shapeid, frameid)
#tmp = cv2.resize(cv2.imread(imgsPath, 2)/1000., (64,64))
#AuthenticdepthMap.append(tmp.reshape(1,tmp.shape[0],tmp.shape[1],1))
pose_fp = PATH_POSE.format(shapeid, frameid)
flag = True
try:
tmp = np.loadtxt(pose_fp)
assert abs(tmp[3, 3] - 1.0) < 1e-4, 'bottom right corner should be one'
assert (abs(tmp[3, :3]) < 1e-4).all(), '[3, :3] should be zero'
R = tmp[:3, :3]
assert np.linalg.det(R) > 0.01, 'determinant should be 1'
assert np.linalg.norm(R.dot(R.T) - np.eye(3), 'fro') ** 2 < 1e-4, 'should be a rotation matrix'
project_R = project_so(R)
assert np.linalg.norm(R-project_R, 'fro') ** 2 < 1e-4, 'projection onto SO3 should be identical'
tmp[:3, :3] = project_R
tmp = inverse(tmp)
except Exception as e:
print('error on {}: {}'.format(pose_fp, e))
#print(R.dot(R.T))
#print(np.linalg.norm(R.dot(R.T) - np.eye(3), 'fro'))
flag = False
if not flag:
print('ignoring frame {}'.format(frameid))
assert False
poses.append(tmp)
depth_paths.append(depth_path)
pose_paths.append(pose_fp)
T = np.concatenate(poses).reshape(-1,4,4)
return depth_paths, T, pose_paths
def main():
depth_paths, T, pose_paths = getData(args.shapeid)
n = len(depth_paths)
print('found %d clean depth images...' % n)
intrinsic = np.array([[525.0,0,319.5],[0,525.0,239.5],[0,0,1]])
np.random.seed(816)
indices = np.random.permutation(n)
print(indices[:100])
#indices = sorted(indices)
make_dirs(PATH_MAT.format(args.shapeid, 0))
import open3d
pcd_combined = open3d.PointCloud()
for i, idx in enumerate(indices):
import ipdb; ipdb.set_trace()
print('%d / %d' % (i, len(indices)))
mesh = Mesh.read(depth_paths[idx], mode='depth', intrinsic = intrinsic)
pcd = open3d.PointCloud()
pcd.points = open3d.Vector3dVector(mesh.vertex.T)
pcd.transform(inverse(T[idx]))
#pcd = open3d.voxel_down_sample(pcd, voxel_size=0.02)
pcd_combined += pcd
pcd_combined = open3d.voxel_down_sample(pcd_combined, voxel_size=0.02)
sio.savemat(PATH_MAT.format(args.shapeid, i), mdict={
'vertex': mesh.vertex,
'validIdx_rowmajor': mesh.validIdx,
'pose': T[idx],
'depth_path': depth_paths[idx],
'pose_path': pose_paths[idx]})
if i <= 50 and i >= 40:
pcd_combined_down = open3d.voxel_down_sample(pcd_combined, voxel_size=0.02)
open3d.draw_geometries([pcd_combined_down])
pcd_combined_down = open3d.voxel_down_sample(pcd_combined, voxel_size=0.02)
open3d.draw_geometries([pcd_combined_down])
#draw(mesh.vertex)
#sId = np.kron(np.array(range(n)), np.ones([n,1])).astype('int')
#tId = np.kron(np.array(range(n)).reshape(-1,1), np.ones([1,n])).astype('int')
#valId = (sId > tId)
#sId = sId[valId]
#tId = tId[valId]
#numEach = 1
#print('n=%d' % n)
#print('numEach=%d' % numEach)
#left = numEach * args.split
#right = min(numEach * (1 + args.split), len(sId))
#print('computing [%d:%d] out of [%d:%d]' % (left, right, 0, len(sId)))
#sId = sId[left:right]
#tId = tId[left:right]
#
#for i in range(len(sId)):
# sId_this = sId[i]
# tId_this = tId[i]
# print(sId_this, tId_this)
# sys.stdout.flush()
# outpath = os.path.join(outDir, '{}_{}.npy'.format(sId_this,tId_this))
# #if os.path.exists(outpath):
# # continue
#
# start_time = time.time()
# """
# sourceMeshNPY = convertMatlabFormat(DepthPath[sId_this])[np.newaxis,:]
# targetMeshNPY = convertMatlabFormat(DepthPath[tId_this])[np.newaxis,:]
# #import pdb; pdb.set_trace()
# print('convert')
# sys.stdout.flush()
# validId = (sourceMeshNPY.sum(2)!=0).squeeze()
# import util
# util.pc2obj(sourceMeshNPY[0,validId,:].T,'test1.obj')
# util.pc2obj(targetMeshNPY[0,validId,:].T,'test2.obj')
# print('source, target')
# print('time elapsed = %f' % (time.time() - start_time))
# sys.stdout.flush()
# sourceMesh = matlab.double(sourceMeshNPY.tolist())
# targetMesh = matlab.double(targetMeshNPY.tolist())
# #import pdb; pdb.set_trace()
# print('time elapsed = %f' % (time.time() - start_time))
# R_,t_,sigma=eng.pythonMain(sourceMesh,targetMesh,nargout=3)
# R = np.zeros([4,4])
# R[:3,:3] = np.array(R_)
# R[3,3] = 1
# R[:3,3] = np.array(t_).squeeze()
#
# #sourceMeshNPYHomo = np.ones([4,sourceMeshNPY.shape[1]])
# #sourceMeshNPYHomo[:3,:] = sourceMeshNPY[0].copy().T
# #sourceMeshNPYHomo = np.matmul(R, sourceMeshNPYHomo)[:3,:]
# #util.pc2obj(sourceMeshNPYHomo,'test1T.obj')
# """
# sourceMesh = Mesh.read(DepthPath[sId_this],mode='depth',intrinsic=intrinsic)
# print(sourceMesh.vertex.shape)
# print('done loading source')
# sys.stdout.flush()
# targetMesh = Mesh.read(DepthPath[tId_this],mode='depth',intrinsic=intrinsic)
# print('done loading target')
# sys.stdout.flush()
# #np.save('temp.npy', {'R':Pose[tgt][:3, :3].dot, 'src': sourceMesh.vertex, 'tgt': targetMesh.vertex, 'srcValidIdx': sourceMesh.validIdx, tgtValidIdx: targetMesh.validIdx})
# #assert False
# R,sigma = globalRegistration(sourceMesh, targetMesh, optsRGBD())
# print('done registration')
# ##import ipdb
# ##ipdb.set_trace()
#
# print('dumping to %s' % outpath)
# np.save(outpath, {'R':R, 'sigma':sigma})
# end_time = time.time()
# print('time elapsed = %f' % (end_time - start_time))
# sys.stdout.flush()
#snapshot = tracemalloc.take_snapshot()
#display_top(snapshot)
if __name__ == '__main__':
main()
| [
"open3d.PointCloud",
"util.project_so",
"open3d.draw_geometries",
"numpy.array",
"numpy.loadtxt",
"numpy.linalg.norm",
"sys.path.append",
"argparse.ArgumentParser",
"numpy.random.seed",
"numpy.concatenate",
"numpy.random.permutation",
"open3d.voxel_down_sample",
"numpy.eye",
"open3d.Vector... | [((106, 131), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (121, 131), False, 'import sys\n'), ((452, 457), 'util.env', 'env', ([], {}), '()\n', (455, 457), False, 'from util import env, inverse, project_so, make_dirs\n'), ((684, 746), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process Redwood Dataset"""'}), "(description='Process Redwood Dataset')\n", (707, 746), False, 'import argparse\n'), ((336, 355), 'open3d.PointCloud', 'open3d.PointCloud', ([], {}), '()\n', (353, 355), False, 'import open3d\n'), ((373, 404), 'open3d.Vector3dVector', 'open3d.Vector3dVector', (['vertex.T'], {}), '(vertex.T)\n', (394, 404), False, 'import open3d\n'), ((409, 438), 'open3d.draw_geometries', 'open3d.draw_geometries', (['[pcd]'], {}), '([pcd])\n', (431, 438), False, 'import open3d\n'), ((2621, 2680), 'numpy.array', 'np.array', (['[[525.0, 0, 319.5], [0, 525.0, 239.5], [0, 0, 1]]'], {}), '([[525.0, 0, 319.5], [0, 525.0, 239.5], [0, 0, 1]])\n', (2629, 2680), True, 'import numpy as np\n'), ((2677, 2696), 'numpy.random.seed', 'np.random.seed', (['(816)'], {}), '(816)\n', (2691, 2696), True, 'import numpy as np\n'), ((2711, 2735), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (2732, 2735), True, 'import numpy as np\n'), ((2877, 2896), 'open3d.PointCloud', 'open3d.PointCloud', ([], {}), '()\n', (2894, 2896), False, 'import open3d\n'), ((3876, 3931), 'open3d.voxel_down_sample', 'open3d.voxel_down_sample', (['pcd_combined'], {'voxel_size': '(0.02)'}), '(pcd_combined, voxel_size=0.02)\n', (3900, 3931), False, 'import open3d\n'), ((3936, 3979), 'open3d.draw_geometries', 'open3d.draw_geometries', (['[pcd_combined_down]'], {}), '([pcd_combined_down])\n', (3958, 3979), False, 'import open3d\n'), ((2956, 2972), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (2970, 2972), False, 'import ipdb\n'), ((3033, 3095), 'mesh.Mesh.read', 'Mesh.read', (['depth_paths[idx]'], {'mode': '"""depth"""', 'intrinsic': 'intrinsic'}), "(depth_paths[idx], mode='depth', intrinsic=intrinsic)\n", (3042, 3095), False, 'from mesh import Mesh\n'), ((3112, 3131), 'open3d.PointCloud', 'open3d.PointCloud', ([], {}), '()\n', (3129, 3131), False, 'import open3d\n'), ((3153, 3189), 'open3d.Vector3dVector', 'open3d.Vector3dVector', (['mesh.vertex.T'], {}), '(mesh.vertex.T)\n', (3174, 3189), False, 'import open3d\n'), ((3342, 3397), 'open3d.voxel_down_sample', 'open3d.voxel_down_sample', (['pcd_combined'], {'voxel_size': '(0.02)'}), '(pcd_combined, voxel_size=0.02)\n', (3366, 3397), False, 'import open3d\n'), ((1369, 1388), 'numpy.loadtxt', 'np.loadtxt', (['pose_fp'], {}), '(pose_fp)\n', (1379, 1388), True, 'import numpy as np\n'), ((1779, 1792), 'util.project_so', 'project_so', (['R'], {}), '(R)\n', (1789, 1792), False, 'from util import env, inverse, project_so, make_dirs\n'), ((1956, 1968), 'util.inverse', 'inverse', (['tmp'], {}), '(tmp)\n', (1963, 1968), False, 'from util import env, inverse, project_so, make_dirs\n'), ((2388, 2409), 'numpy.concatenate', 'np.concatenate', (['poses'], {}), '(poses)\n', (2402, 2409), True, 'import numpy as np\n'), ((3212, 3227), 'util.inverse', 'inverse', (['T[idx]'], {}), '(T[idx])\n', (3219, 3227), False, 'from util import env, inverse, project_so, make_dirs\n'), ((3726, 3781), 'open3d.voxel_down_sample', 'open3d.voxel_down_sample', (['pcd_combined'], {'voxel_size': '(0.02)'}), '(pcd_combined, voxel_size=0.02)\n', (3750, 3781), False, 'import open3d\n'), ((3794, 3837), 'open3d.draw_geometries', 'open3d.draw_geometries', (['[pcd_combined_down]'], {}), '([pcd_combined_down])\n', (3816, 3837), False, 'import open3d\n'), ((1596, 1612), 'numpy.linalg.det', 'np.linalg.det', (['R'], {}), '(R)\n', (1609, 1612), True, 'import numpy as np\n'), ((1812, 1848), 'numpy.linalg.norm', 'np.linalg.norm', (['(R - project_R)', '"""fro"""'], {}), "(R - project_R, 'fro')\n", (1826, 1848), True, 'import numpy as np\n'), ((1694, 1703), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1700, 1703), True, 'import numpy as np\n')] |
import time
import os
import gym
import gym_panda
import reflexxes
import pybullet as p
import math
import numpy as np
import cv2
import pandas as pd
class MovementData:
def __init__(self, id):
self.mov_id=id
self.currentPosition = [0.0, 0.0, 0.0]*2
self.currentVelocity = [0.0, 0.0, 0.0]*2
self.currentAcceleration = [0.0, 0.0, 0.0]*2
self.targetPosition = [0.0, 0.0, 0.0]*2
self.targetVelocity = [0.0, 0.0, 0.0]*2
self.min_sync_time = 0.0 # in rml, min_sync_time <= time_reach_target_pos_and_vel; if min_sync_time is larger than required, then motion will be less greedy and take time of min_sync; Otherwise, if required time is larger than given min_sync, required time is taken to ensure reach target
class Agent:
def __init__(self, hz=240):
self.gen = reflexxes.extra.PositionTrajectoryGenerator(
number_of_dofs=3,
cycle_time=1/float(hz),
max_velocity=[5.0, 5.0, 5.0],
max_acceleration=[10.0, 10.0, 10.0],
max_jerk=[20.0, 20.0, 20.0])
self.grasp_com_offset = [0.0, 0.0, -0.015]
self.iter = 0
def get_obj_position(self, obj_id):
return p.getBasePositionAndOrientation(obj_id)[0]
def get_obj_orientation(self, obj_id):
return p.getBasePositionAndOrientation(obj_id)[1]
def get_tip_position(self, env):
return p.getLinkState(env.pandaUid, 11)[0]
def get_tip_orientation(self, env):
return p.getLinkState(env.pandaUid, 11)[1]
def gen_motion_list(self, motion_data):
self.gen.current_position = motion_data.currentPosition[:]
self.gen.current_velocity = motion_data.currentVelocity[:]
self.gen.current_acceleration = motion_data.currentAcceleration[:]
pos_list = [motion_data.currentPosition[:]]
vel_list = [motion_data.currentVelocity[:]]
acc_list = [motion_data.currentAcceleration[:]]
# generate trajectory
# gen.trajectory(target_pos, target_vel, min_sync_time)
for pos, vel, acc in self.gen.trajectory(motion_data.targetPosition, motion_data.targetVelocity, motion_data.min_sync_time):
pos_list.append(pos)
vel_list.append(vel)
acc_list.append(acc)
return pos_list, vel_list, acc_list
def recording(self, env):
if self.iter == 0:
if not os.path.exists(env.storage_folder+"/"+env.object+"/"):
os.makedirs(env.storage_folder + "/" + env.object + "/")
os.makedirs(env.storage_folder + "/" + env.object + "/" + "color/")
os.makedirs(env.storage_folder + "/" + env.object + "/" + "cad/")
os.makedirs(env.storage_folder + "/" + env.object + "/" + "depth/")
os.makedirs(env.storage_folder + "/" + env.object + "/" + "annotations/")
os.makedirs(env.storage_folder + "/" + env.object + "/" + "mask/")
self.folder = env.storage_folder + "/" + env.object + "/"
self.filenames = []
self.LinkPositions = []
self.LinkOrientations = []
self.LinkR = []
self.ObjectPositions = []
self.ObjectOrientations = []
self.ObjectR = []
self.filenames.append(self.iter)
self.LinkPositions.append(self.get_tip_position(env)) # recording end effector postion
self.LinkOrientations.append(self.get_tip_orientation(env)) # recording end effector orientation
self.LinkR.append(p.getMatrixFromQuaternion(self.LinkOrientations[-1]))
self.ObjectPositions.append(self.get_obj_position(env.objectUid))
self.ObjectOrientations.append(self.get_obj_orientation(env.objectUid))
self.ObjectR.append(p.getMatrixFromQuaternion(self.ObjectOrientations[-1]))
rgb_filename = self.folder + 'color/%s.jpg' % str(self.iter)
cad_filename = self.folder + 'cad/%s.jpg' % str(self.iter)
depth_filename = self.folder + 'depth/%s.png' % str(self.iter)
annotation_filename = self.folder + 'annotations/%s.png' % str(self.iter)
mask_filename = self.folder + 'mask/%s.png' % str(self.iter)
rgb, cad, depth, annotation, mask = env.storage()
rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)
cad = cv2.cvtColor(cad, cv2.COLOR_RGB2BGR)
cv2.imwrite(rgb_filename, rgb)
cv2.imwrite(cad_filename, cad)
cv2.imwrite(depth_filename, depth)
cv2.imwrite(annotation_filename, annotation)
cv2.imwrite(mask_filename, mask)
self.iter += 1
if env.record_end:
robot_joints = pd.DataFrame({'filenames': self.filenames,
'LinkPositions': self.LinkPositions,
'LinkOrientations': self.LinkOrientations,
'LinkRotationMatrices': self.LinkR,
'ObjectPositions': self.ObjectPositions,
'ObjectOrientations': self.ObjectOrientations,
'ObjectRotationMatrices': self.ObjectR
})
robot_joints.to_csv(self.folder + '/robot_joints.csv', index=False)
if __name__ == "__main__":
RECORD = True
env = gym.make('panda-v0').env
# object to be grasped
env.object = "YcbCrackerBox"
# prior: grasping position offset w.r.t center of mass of object
grasp_offset_dict = {
"YcbPottedMeatCan": [0, 0.005, 0.015],
"YcbGelatinBox": [0, +0.003, 0.022],
"YcbMustardBottle": [0, 0, 0.08],
"YcbTomatoSoupCan": [0, 0.007, 0.025],
"YcbCrackerBox": [0, -0.01, 0.035],
"YcbSugarBox": [0, 0, 0.0],
"YcbBanana": [0, 0, 0],
"YcbTennisBall": [0, 0, 0.],
}
grasp_offset = grasp_offset_dict[env.object]
agent = Agent()
observation = env.reset()
while not env.is_static():
p.stepSimulation()
time.sleep(0.001)
fingers = 1
obj_position = agent.get_obj_position(env.objectUid)
prepick_position = [x+y for x,y in zip(obj_position, [0, 0, 0.15])]
grasp_position = [x + y for x, y in zip(obj_position, grasp_offset)]
init_tip_pose = agent.get_tip_position(env)
prepick_data = MovementData('prepick')
prepick_data.currentPosition = init_tip_pose
prepick_data.targetPosition = prepick_position
prepick_data.min_sync_time = 2.
pick_data = MovementData('pick')
pick_data.currentPosition = prepick_position
pick_data.targetPosition = grasp_position
pick_data.min_sync_time = 1
prepick_pos, prepick_vel, prepick_acc = agent.gen_motion_list(prepick_data)
pick_pos, pick_vel, pick_acc = agent.gen_motion_list(pick_data)
pick_group_pos = prepick_pos[:-1] + pick_pos # remove duplicated pos
pick_group_vel = prepick_vel[:-1] + pick_vel # remove duplicated vel
pick_group_acc = prepick_acc[:-1] + pick_acc # remove possible duplicated acc to match size
pick_group_traj_time = np.linspace(0, agent.gen.cycle_time * len(pick_group_pos),
len(pick_group_pos)).tolist()
pick_time = len(prepick_pos) * agent.gen.cycle_time
# Compute gripper orientation and rotation increments
init_tip_ori = agent.get_tip_orientation(env) # quartenion
fingers = [0.1, 0.1]
for i in range(len(pick_group_pos)):
action = ['pick', pick_group_pos[i], init_tip_ori, fingers]
observation, reward, done = env.step(action)
fingers = env.activate()
lift_position = [x + y for x, y in zip(grasp_position, [0, 0, 0.4])]
lift_data = MovementData('lift')
lift_data.currentPosition = grasp_position
lift_data.targetPosition = lift_position
lift_data.min_sync_time = 3
lift_pos, lift_vel, lift_acc = agent.gen_motion_list(lift_data)
for i in range(len(lift_pos)):
action = ['lift', lift_pos[i], init_tip_ori, fingers]
observation, reward, done = env.step(action)
if env.object == "YcbTennisBall":
p.createConstraint(env.pandaUid, 11, env.objectUid, -1, jointType=p.JOINT_FIXED,
jointAxis= [0,0,0], parentFramePosition=grasp_offset, childFramePosition=[0,0,0],
parentFrameOrientation=p.getLinkState(env.pandaUid,11)[1],)
# start recording
ee_position = agent.get_tip_position(env)
for j in range(6):
rotate_pos = [ee_position]* 120 # hz=240, lasting 0.5 second.
rotate_group_ori = []
pre_ori = agent.get_tip_orientation(env)
for i in range(len(rotate_pos)):
rotate_group_ori.append(p.getQuaternionFromEuler(
[0., -np.pi, np.pi / 2.+ np.pi/4*i/len(rotate_pos) + np.pi/4*j]
)) # quaternion
for i in range(len(rotate_pos)):
action = ['rotate', rotate_pos[i], rotate_group_ori[i], fingers]
observation, reward, done = env.step(action)
if RECORD: agent.recording(env)
# [0., -np.pi, np.pi / 2.]
for j in range(2):
rotate_pos = [ee_position]* 120 # hz=240, lasting 0.5 second.
rotate_group_ori = []
pre_ori = agent.get_tip_orientation(env)
for i in range(len(rotate_pos)):
rotate_group_ori.append(p.getQuaternionFromEuler(
[0., -np.pi, 2 * np.pi - np.pi / 4 * i / len(rotate_pos) - np.pi / 4 * j]
)) # quaternion
for i in range(len(rotate_pos)):
action = ['rotate', rotate_pos[i], rotate_group_ori[i], fingers]
observation, reward, done = env.step(action)
if RECORD: agent.recording(env)
offset = [
[0.1, 0, 0.1*(np.sqrt(2) - 1) ],
[0.1 * np.sqrt(2), 0, 0.15 * np.sqrt(2)]
]
rotate_position = ee_position
for j in range(2):
pre_position = rotate_position
rotate_position = [x + y for x, y in zip(ee_position, offset[j])]
rotate_data = MovementData('rotate')
rotate_data.currentPosition = pre_position
rotate_data.targetPosition = rotate_position
rotate_data.min_sync_time = 0.5
rotate_pos, rotate_vel, rotate_acc = agent.gen_motion_list(rotate_data)
rotate_group_ori = []
pre_ori = agent.get_tip_orientation(env)
for i in range(len(rotate_pos)):
rotate_group_ori.append(p.multiplyTransforms(positionA=[0, 0, 0], orientationA=p.getQuaternionFromEuler(
[0, -np.pi / 4 * i / len(rotate_pos), 0.]
), positionB=[0, 0, 0], orientationB=pre_ori)[1]
) # quaternion
for i in range(len(rotate_pos)):
action = ['rotate', rotate_pos[i], rotate_group_ori[i], fingers]
observation, reward, done = env.step(action)
if RECORD: agent.recording(env)
env.record_end = True
if RECORD: agent.recording(env)
env.close()
| [
"cv2.imwrite",
"pybullet.getMatrixFromQuaternion",
"os.path.exists",
"numpy.sqrt",
"pandas.DataFrame",
"os.makedirs",
"pybullet.getBasePositionAndOrientation",
"time.sleep",
"cv2.cvtColor",
"pybullet.stepSimulation",
"gym.make",
"pybullet.getLinkState"
] | [((4247, 4283), 'cv2.cvtColor', 'cv2.cvtColor', (['rgb', 'cv2.COLOR_RGB2BGR'], {}), '(rgb, cv2.COLOR_RGB2BGR)\n', (4259, 4283), False, 'import cv2\n'), ((4298, 4334), 'cv2.cvtColor', 'cv2.cvtColor', (['cad', 'cv2.COLOR_RGB2BGR'], {}), '(cad, cv2.COLOR_RGB2BGR)\n', (4310, 4334), False, 'import cv2\n'), ((4343, 4373), 'cv2.imwrite', 'cv2.imwrite', (['rgb_filename', 'rgb'], {}), '(rgb_filename, rgb)\n', (4354, 4373), False, 'import cv2\n'), ((4382, 4412), 'cv2.imwrite', 'cv2.imwrite', (['cad_filename', 'cad'], {}), '(cad_filename, cad)\n', (4393, 4412), False, 'import cv2\n'), ((4421, 4455), 'cv2.imwrite', 'cv2.imwrite', (['depth_filename', 'depth'], {}), '(depth_filename, depth)\n', (4432, 4455), False, 'import cv2\n'), ((4464, 4508), 'cv2.imwrite', 'cv2.imwrite', (['annotation_filename', 'annotation'], {}), '(annotation_filename, annotation)\n', (4475, 4508), False, 'import cv2\n'), ((4517, 4549), 'cv2.imwrite', 'cv2.imwrite', (['mask_filename', 'mask'], {}), '(mask_filename, mask)\n', (4528, 4549), False, 'import cv2\n'), ((5340, 5360), 'gym.make', 'gym.make', (['"""panda-v0"""'], {}), "('panda-v0')\n", (5348, 5360), False, 'import gym\n'), ((6001, 6019), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (6017, 6019), True, 'import pybullet as p\n'), ((6028, 6045), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (6038, 6045), False, 'import time\n'), ((1201, 1240), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['obj_id'], {}), '(obj_id)\n', (1232, 1240), True, 'import pybullet as p\n'), ((1303, 1342), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['obj_id'], {}), '(obj_id)\n', (1334, 1342), True, 'import pybullet as p\n'), ((1399, 1431), 'pybullet.getLinkState', 'p.getLinkState', (['env.pandaUid', '(11)'], {}), '(env.pandaUid, 11)\n', (1413, 1431), True, 'import pybullet as p\n'), ((1491, 1523), 'pybullet.getLinkState', 'p.getLinkState', (['env.pandaUid', '(11)'], {}), '(env.pandaUid, 11)\n', (1505, 1523), True, 'import pybullet as p\n'), ((3525, 3577), 'pybullet.getMatrixFromQuaternion', 'p.getMatrixFromQuaternion', (['self.LinkOrientations[-1]'], {}), '(self.LinkOrientations[-1])\n', (3550, 3577), True, 'import pybullet as p\n'), ((3761, 3815), 'pybullet.getMatrixFromQuaternion', 'p.getMatrixFromQuaternion', (['self.ObjectOrientations[-1]'], {}), '(self.ObjectOrientations[-1])\n', (3786, 3815), True, 'import pybullet as p\n'), ((4628, 4933), 'pandas.DataFrame', 'pd.DataFrame', (["{'filenames': self.filenames, 'LinkPositions': self.LinkPositions,\n 'LinkOrientations': self.LinkOrientations, 'LinkRotationMatrices': self\n .LinkR, 'ObjectPositions': self.ObjectPositions, 'ObjectOrientations':\n self.ObjectOrientations, 'ObjectRotationMatrices': self.ObjectR}"], {}), "({'filenames': self.filenames, 'LinkPositions': self.\n LinkPositions, 'LinkOrientations': self.LinkOrientations,\n 'LinkRotationMatrices': self.LinkR, 'ObjectPositions': self.\n ObjectPositions, 'ObjectOrientations': self.ObjectOrientations,\n 'ObjectRotationMatrices': self.ObjectR})\n", (4640, 4933), True, 'import pandas as pd\n'), ((2390, 2449), 'os.path.exists', 'os.path.exists', (["(env.storage_folder + '/' + env.object + '/')"], {}), "(env.storage_folder + '/' + env.object + '/')\n", (2404, 2449), False, 'import os\n'), ((2461, 2517), 'os.makedirs', 'os.makedirs', (["(env.storage_folder + '/' + env.object + '/')"], {}), "(env.storage_folder + '/' + env.object + '/')\n", (2472, 2517), False, 'import os\n'), ((2534, 2601), 'os.makedirs', 'os.makedirs', (["(env.storage_folder + '/' + env.object + '/' + 'color/')"], {}), "(env.storage_folder + '/' + env.object + '/' + 'color/')\n", (2545, 2601), False, 'import os\n'), ((2618, 2683), 'os.makedirs', 'os.makedirs', (["(env.storage_folder + '/' + env.object + '/' + 'cad/')"], {}), "(env.storage_folder + '/' + env.object + '/' + 'cad/')\n", (2629, 2683), False, 'import os\n'), ((2700, 2767), 'os.makedirs', 'os.makedirs', (["(env.storage_folder + '/' + env.object + '/' + 'depth/')"], {}), "(env.storage_folder + '/' + env.object + '/' + 'depth/')\n", (2711, 2767), False, 'import os\n'), ((2784, 2857), 'os.makedirs', 'os.makedirs', (["(env.storage_folder + '/' + env.object + '/' + 'annotations/')"], {}), "(env.storage_folder + '/' + env.object + '/' + 'annotations/')\n", (2795, 2857), False, 'import os\n'), ((2874, 2940), 'os.makedirs', 'os.makedirs', (["(env.storage_folder + '/' + env.object + '/' + 'mask/')"], {}), "(env.storage_folder + '/' + env.object + '/' + 'mask/')\n", (2885, 2940), False, 'import os\n'), ((9770, 9780), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9777, 9780), True, 'import numpy as np\n'), ((9792, 9802), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9799, 9802), True, 'import numpy as np\n'), ((8346, 8378), 'pybullet.getLinkState', 'p.getLinkState', (['env.pandaUid', '(11)'], {}), '(env.pandaUid, 11)\n', (8360, 8378), True, 'import pybullet as p\n'), ((9736, 9746), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9743, 9746), True, 'import numpy as np\n')] |
from pathlib import Path
import numpy as np
import pandas as pd
import nibabel as nib
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
color_tables_dir = Path(__file__).parent
class Parcellation:
def __init__(self, parcellation_path):
self.parcellation_path = Path(parcellation_path)
self._label_map = None
@property
def label_map(self):
if self._label_map is None:
label_map_nii = nib.load(self.parcellation_path)
self._label_map = label_map_nii.get_data().astype(np.uint16)
return self._label_map
def get_resected_structures(self, resection_seg_path, ignore=None):
if ignore is None:
ignore = []
resected_dict = self.get_resected_labels_and_counts(resection_seg_path)
structures = []
for resected_number, num_voxels_resected in resected_dict.items():
structure_name = self.color_table.get_structure_from_label_number(
resected_number)
ignore_this = False
for substring_to_ignore in ignore:
if substring_to_ignore in structure_name:
ignore_this = True
break
if ignore_this:
continue
num_voxels_parcellation = np.count_nonzero(
self.label_map == resected_number)
ratio = num_voxels_resected / num_voxels_parcellation
structures.append((
structure_name,
num_voxels_resected,
ratio,
))
return list(zip(*structures))
def get_resected_labels_and_counts(self, resection_seg_path):
mask_nii = nib.load(resection_seg_path)
mask = mask_nii.get_data() > 0
masked_values = self.label_map[mask]
unique, counts = np.unique(masked_values, return_counts=True)
resected_dict = dict(zip(unique, counts))
return resected_dict
def print_percentage_of_resected_structures(self,
resection_seg_path,
hide_zeros=True):
structures, voxels, ratios = self.get_resected_structures(
resection_seg_path)
sort_by_ratio = np.argsort(ratios)
print('Percentage of each resected structure:')
for idx in reversed(sort_by_ratio):
ratio = ratios[idx]
structure = structures[idx]
percentage = int(ratio * 100)
if percentage == 0 and hide_zeros:
continue
structure_pretty = structure.replace('-', ' ')
print(f'{percentage:3}% of {structure_pretty}')
print()
sort_by_voxels = np.argsort(voxels)
total_voxels = sum(voxels)
print('The resection volume is composed of:')
for idx in reversed(sort_by_voxels):
ratio = voxels[idx] / total_voxels
structure = structures[idx]
percentage = int(ratio * 100)
if percentage == 0 and hide_zeros:
continue
structure_pretty = structure.replace('-', ' ')
print(f'{percentage:3}% is {structure_pretty}')
def plot_pie(
self,
resection_seg_path,
title=None,
show=True,
pct_threshold=2,
output_path=None,
ignore=None,
):
names, voxels, _ = self.get_resected_structures(
resection_seg_path, ignore=ignore)
colors = [
self.color_table.get_color_from_structure_name(name)
for name in names
]
fig, ax = plt.subplots()
sort_by_voxels = np.argsort(voxels)[::-1] # descending order
voxels = np.array(voxels)[sort_by_voxels]
percentages = (voxels / voxels.sum()) * 100
names = np.array(names)[sort_by_voxels]
colors = np.array(colors)[sort_by_voxels]
# Hide some values
def my_autopct(pct):
return f'{int(pct)}%' if pct > pct_threshold else ''
labels = names[:]
for i, pct in enumerate(percentages):
if pct <= pct_threshold:
labels[i] = ''
ax.pie(
percentages,
labels=labels,
colors=colors,
shadow=False,
autopct=my_autopct,
pctdistance=0.7,
)
if title is not None:
ax.set_title(title)
plt.tight_layout()
if output_path is not None:
fig.savefig(output_path, dpi=400)
if show:
plt.show()
return fig
def plot_bars(
self,
resection_seg_path,
title=None,
show=True,
output_path=None,
ignore=None,
):
names, _, ratios = self.get_resected_structures(
resection_seg_path, ignore=ignore)
colors = [
self.color_table.get_color_from_structure_name(name)
for name in names
]
fig, ax = plt.subplots()
sort_by_ratios = np.argsort(ratios)
ratios = np.array(ratios)[sort_by_ratios]
percentages = ratios * 100
names = np.array(names)[sort_by_ratios]
colors = np.array(colors)[sort_by_ratios]
y_pos = np.arange(len(names))
ax.barh(
y_pos,
percentages,
align='center',
color=colors,
tick_label=names,
)
ax.set_axisbelow(True) # https://stackoverflow.com/a/39039520
ax.grid()
ax.set_xlim((0, 105))
ax.xaxis.set_major_formatter(mtick.PercentFormatter())
if title is not None:
ax.set_title(title)
plt.tight_layout()
if output_path is not None:
fig.savefig(output_path, dpi=400)
if show:
plt.show()
return fig
def is_valid_number(self, number):
return self.color_table.is_valid_number(number)
class GIFParcellation(Parcellation):
def __init__(self, parcellation_path):
Parcellation.__init__(self, parcellation_path)
self.color_table = GIFColorTable()
class FreeSurferParcellation(Parcellation):
def __init__(self, parcellation_path):
Parcellation.__init__(self, parcellation_path)
self.color_table = FreeSurferColorTable()
class ColorTable:
def __init__(self):
self.fieldnames = (
'structure',
'red',
'green',
'blue',
'alpha',
)
def get_value_from_label_number(self, label_number, key):
try:
value = self._data_frame.loc[label_number][key]
except KeyError:
value = f'[Unkown label: {label_number}]'
return value
def get_row_from_structure_name(self, name):
mask = self._data_frame['structure'] == name
row = self._data_frame.loc[mask]
return row
def get_value_from_structure_name(self, name, key):
row = self.get_row_from_structure_name(name)
value = row[key]
return value
def get_structure_from_label_number(self, label_number):
return self.get_value_from_label_number(label_number, 'structure')
def get_color_from_structure_name(self, name):
row = self.get_row_from_structure_name(name)
if row.empty:
color = 0, 0, 0
else:
color = [row[c].values for c in ('red', 'green', 'blue')]
color = np.hstack(color)
color = np.array(color) / 255
return color
def is_valid_number(self, number):
return number in self._data_frame.index
class GIFColorTable(ColorTable):
def __init__(self):
ColorTable.__init__(self)
self.color_table_path = color_tables_dir / 'BrainAnatomyLabelsV3_0.txt'
self._data_frame = self.read_color_table()
def read_color_table(self):
df = pd.read_csv(
self.color_table_path,
index_col=0,
names=self.fieldnames,
sep=r'\s+', # there is a double space in the file
)
return df
class FreeSurferColorTable(ColorTable):
def __init__(self):
ColorTable.__init__(self)
self.color_table_path = color_tables_dir / 'FreeSurferLabels.ctbl'
self._data_frame = self.read_color_table()
def read_color_table(self):
df = pd.read_csv(
self.color_table_path,
index_col=0,
names=self.fieldnames,
sep=r'\s+',
skiprows=2,
)
return df
| [
"numpy.unique",
"pandas.read_csv",
"nibabel.load",
"pathlib.Path",
"matplotlib.ticker.PercentFormatter",
"numpy.hstack",
"numpy.argsort",
"numpy.count_nonzero",
"numpy.array",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((174, 188), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (178, 188), False, 'from pathlib import Path\n'), ((294, 317), 'pathlib.Path', 'Path', (['parcellation_path'], {}), '(parcellation_path)\n', (298, 317), False, 'from pathlib import Path\n'), ((1696, 1724), 'nibabel.load', 'nib.load', (['resection_seg_path'], {}), '(resection_seg_path)\n', (1704, 1724), True, 'import nibabel as nib\n'), ((1835, 1879), 'numpy.unique', 'np.unique', (['masked_values'], {'return_counts': '(True)'}), '(masked_values, return_counts=True)\n', (1844, 1879), True, 'import numpy as np\n'), ((2271, 2289), 'numpy.argsort', 'np.argsort', (['ratios'], {}), '(ratios)\n', (2281, 2289), True, 'import numpy as np\n'), ((2737, 2755), 'numpy.argsort', 'np.argsort', (['voxels'], {}), '(voxels)\n', (2747, 2755), True, 'import numpy as np\n'), ((3671, 3685), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3683, 3685), True, 'import matplotlib.pyplot as plt\n'), ((4482, 4500), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4498, 4500), True, 'import matplotlib.pyplot as plt\n'), ((5076, 5090), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5088, 5090), True, 'import matplotlib.pyplot as plt\n'), ((5116, 5134), 'numpy.argsort', 'np.argsort', (['ratios'], {}), '(ratios)\n', (5126, 5134), True, 'import numpy as np\n'), ((5766, 5784), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5782, 5784), True, 'import matplotlib.pyplot as plt\n'), ((7973, 8060), 'pandas.read_csv', 'pd.read_csv', (['self.color_table_path'], {'index_col': '(0)', 'names': 'self.fieldnames', 'sep': '"""\\\\s+"""'}), "(self.color_table_path, index_col=0, names=self.fieldnames, sep=\n '\\\\s+')\n", (7984, 8060), True, 'import pandas as pd\n'), ((8444, 8543), 'pandas.read_csv', 'pd.read_csv', (['self.color_table_path'], {'index_col': '(0)', 'names': 'self.fieldnames', 'sep': '"""\\\\s+"""', 'skiprows': '(2)'}), "(self.color_table_path, index_col=0, names=self.fieldnames, sep=\n '\\\\s+', skiprows=2)\n", (8455, 8543), True, 'import pandas as pd\n'), ((453, 485), 'nibabel.load', 'nib.load', (['self.parcellation_path'], {}), '(self.parcellation_path)\n', (461, 485), True, 'import nibabel as nib\n'), ((1298, 1349), 'numpy.count_nonzero', 'np.count_nonzero', (['(self.label_map == resected_number)'], {}), '(self.label_map == resected_number)\n', (1314, 1349), True, 'import numpy as np\n'), ((3711, 3729), 'numpy.argsort', 'np.argsort', (['voxels'], {}), '(voxels)\n', (3721, 3729), True, 'import numpy as np\n'), ((3773, 3789), 'numpy.array', 'np.array', (['voxels'], {}), '(voxels)\n', (3781, 3789), True, 'import numpy as np\n'), ((3874, 3889), 'numpy.array', 'np.array', (['names'], {}), '(names)\n', (3882, 3889), True, 'import numpy as np\n'), ((3923, 3939), 'numpy.array', 'np.array', (['colors'], {}), '(colors)\n', (3931, 3939), True, 'import numpy as np\n'), ((4612, 4622), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4620, 4622), True, 'import matplotlib.pyplot as plt\n'), ((5152, 5168), 'numpy.array', 'np.array', (['ratios'], {}), '(ratios)\n', (5160, 5168), True, 'import numpy as np\n'), ((5236, 5251), 'numpy.array', 'np.array', (['names'], {}), '(names)\n', (5244, 5251), True, 'import numpy as np\n'), ((5285, 5301), 'numpy.array', 'np.array', (['colors'], {}), '(colors)\n', (5293, 5301), True, 'import numpy as np\n'), ((5668, 5692), 'matplotlib.ticker.PercentFormatter', 'mtick.PercentFormatter', ([], {}), '()\n', (5690, 5692), True, 'import matplotlib.ticker as mtick\n'), ((5896, 5906), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5904, 5906), True, 'import matplotlib.pyplot as plt\n'), ((7535, 7551), 'numpy.hstack', 'np.hstack', (['color'], {}), '(color)\n', (7544, 7551), True, 'import numpy as np\n'), ((7572, 7587), 'numpy.array', 'np.array', (['color'], {}), '(color)\n', (7580, 7587), True, 'import numpy as np\n')] |
#-*-coding:utf-8-*-
'''
Created on Nov14 31,2018
@author: pengzhiliang
'''
import time
import numpy as np
import os
import os.path as osp
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from torch.utils.data import Dataset,DataLoader
from torch.optim import lr_scheduler,Adam,SGD
from torchvision import datasets, models, transforms
from torchsummary import summary
from model.unet import UNet
from model.fcn import fcn
from utils.metrics import Score,averageMeter
from utils.crf import dense_crf
from dataloader.MRBrain_loader import MRBrainSDataset
from dataloader.augmentation import *
from dataloader.coder import merge_classes
# GPU or CPU
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 参数设置
defualt_path = osp.join('/home/cv_xfwang/data/', 'MRBrainS')
batch_size = 1
num_workers = 4
resume_path = '/home/cv_xfwang/MRBrainS_seg/checkpoint/best_unet_model.pkl'
# data loader
val_loader = DataLoader(MRBrainSDataset(defualt_path, split='val', is_transform=True, \
img_norm=True, augmentations=Compose([Scale(224)])), \
batch_size=1,num_workers=num_workers,pin_memory=True,shuffle=False)
# Setup Model and summary
model = UNet().to(device)
# summary(model,(3,224,224),batch_size) # summary 网络参数
# running_metrics = Score(n_classes=9)
running_metrics = Score(n_classes=4) # label_test=[0,2,2,3,3,1,1,0,0]
# resume
if osp.isfile(resume_path):
checkpoint = torch.load(resume_path)
model.load_state_dict(checkpoint["model_state"])
best_iou = checkpoint['best_iou']
print("=====>",
"Loaded checkpoint '{}' (iter {})".format(
resume_path, checkpoint["epoch"]
))
print("=====> best mIoU: %.4f best mean dice: %.4f"%(best_iou,(best_iou*2)/(best_iou+1)))
else:
raise ValueError("can't find model")
print(">>>Test After Dense CRF: ")
model.eval()
running_metrics.reset()
with torch.no_grad():
for i, (img, mask) in tqdm(enumerate(val_loader)):
img = img.to(device)
output = model(img) #[-1, 9, 256, 256]
probs = F.softmax(output, dim=1)
pred = probs.cpu().data[0].numpy()
label = mask.cpu().data[0].numpy()
# crf
img = img.cpu().data[0].numpy()
pred = dense_crf(img*255, pred)
# print(pred.shape)
# _, pred = torch.max(torch.tensor(pred), dim=-1)
pred = np.asarray(pred, dtype=np.int)
label = np.asarray(label, dtype=np.int)
# 合并特征
pred = merge_classes(pred)
label = merge_classes(label)
# print(pred.shape,label.shape)
running_metrics.update(label,pred)
score, class_iou = running_metrics.get_scores()
for k, v in score.items():
print(k,':',v)
print(i, class_iou) | [
"utils.crf.dense_crf",
"torch.load",
"os.path.join",
"numpy.asarray",
"utils.metrics.Score",
"model.unet.UNet",
"os.path.isfile",
"torch.cuda.is_available",
"dataloader.coder.merge_classes",
"torch.no_grad",
"torch.nn.functional.softmax"
] | [((855, 900), 'os.path.join', 'osp.join', (['"""/home/cv_xfwang/data/"""', '"""MRBrainS"""'], {}), "('/home/cv_xfwang/data/', 'MRBrainS')\n", (863, 900), True, 'import os.path as osp\n'), ((1430, 1448), 'utils.metrics.Score', 'Score', ([], {'n_classes': '(4)'}), '(n_classes=4)\n', (1435, 1448), False, 'from utils.metrics import Score, averageMeter\n'), ((1495, 1518), 'os.path.isfile', 'osp.isfile', (['resume_path'], {}), '(resume_path)\n', (1505, 1518), True, 'import os.path as osp\n'), ((1537, 1560), 'torch.load', 'torch.load', (['resume_path'], {}), '(resume_path)\n', (1547, 1560), False, 'import torch\n'), ((1999, 2014), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2012, 2014), False, 'import torch\n'), ((795, 820), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (818, 820), False, 'import torch\n'), ((1299, 1305), 'model.unet.UNet', 'UNet', ([], {}), '()\n', (1303, 1305), False, 'from model.unet import UNet\n'), ((2163, 2187), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (2172, 2187), True, 'import torch.nn.functional as F\n'), ((2343, 2369), 'utils.crf.dense_crf', 'dense_crf', (['(img * 255)', 'pred'], {}), '(img * 255, pred)\n', (2352, 2369), False, 'from utils.crf import dense_crf\n'), ((2469, 2499), 'numpy.asarray', 'np.asarray', (['pred'], {'dtype': 'np.int'}), '(pred, dtype=np.int)\n', (2479, 2499), True, 'import numpy as np\n'), ((2516, 2547), 'numpy.asarray', 'np.asarray', (['label'], {'dtype': 'np.int'}), '(label, dtype=np.int)\n', (2526, 2547), True, 'import numpy as np\n'), ((2578, 2597), 'dataloader.coder.merge_classes', 'merge_classes', (['pred'], {}), '(pred)\n', (2591, 2597), False, 'from dataloader.coder import merge_classes\n'), ((2614, 2634), 'dataloader.coder.merge_classes', 'merge_classes', (['label'], {}), '(label)\n', (2627, 2634), False, 'from dataloader.coder import merge_classes\n')] |
import numpy
import numpy as np
from skimage.metrics import structural_similarity as ssim, peak_signal_noise_ratio
from sklearn.metrics import mean_absolute_error, mean_squared_error, accuracy_score
import torch
from torch.nn import MSELoss,L1Loss
# PSNR and SSIM calculation for inputting a 3d array (amount, height, width)
def PSNR_SSIM(y, x):
(AMT, HT, WDTH) = y.shape
PSNR_sum = 0
SSIM_sum = 0
for i in range(AMT):
# reshaping a 3d matrix into a 2d matrix with same height and width
y_tmp = y[i, :, :].reshape((HT, WDTH))
x_tmp = x[i, :, :].reshape((HT, WDTH))
# for climate data
# PSNR_sum += peak_signal_noise_ratio(x_tmp, y_tmp, data_range=1.0)
# SSIM_sum += ssim(x_tmp, y_tmp, data_range=1.0)
# for container data
PSNR_sum += peak_signal_noise_ratio(x_tmp, y_tmp, data_range=255.0)
SSIM_sum += ssim(x_tmp, y_tmp, data_range=255.0)
PSNR_sum = PSNR_sum / AMT
SSIM_sum = SSIM_sum / AMT
return (PSNR_sum, SSIM_sum)
# MAE and MSE calculation for inputting a 3d array (amount, height, width)
def PSNR_SSIM_pytorch(x, y):
(AMT, CHANNEL, HT, WDTH) = y.shape
PSNR_sum = 0
SSIM_sum = 0
# casting back to np array for calculation
with torch.no_grad():
y = np.array(torch.Tensor.cpu(y))
x = np.array(torch.Tensor.cpu(x))
for i in range(AMT):
# reshaping a 4d matrix into a 2d matrix with same height and width
y_tmp = y[i, 0, :, :].reshape((HT, WDTH))
x_tmp = x[i, 0, :, :].reshape((HT, WDTH))
# for drange = 1
# PSNR_sum += peak_signal_noise_ratio(x_tmp, y_tmp, data_range=1.0)
# SSIM_sum += ssim(x_tmp, y_tmp, data_range=1.0)
# for drange = 255
PSNR_sum += peak_signal_noise_ratio(x_tmp, y_tmp, data_range=255.0)
SSIM_sum += ssim(x_tmp, y_tmp, data_range=255.0)
PSNR_sum = PSNR_sum / AMT
SSIM_sum = SSIM_sum / AMT
return (PSNR_sum, SSIM_sum)
def MAE_MSE(y, x):
loss = MSELoss()
l1loss = L1Loss()
(AMT, HT, WDTH) = y.shape
mae = 0
mse = 0
for i in range(AMT):
# reshaping a 3d matrix into a 2d matrix with same height and width
y_tmp = y[i, :, :].reshape((HT, WDTH))
x_tmp = x[i, :, :].reshape((HT, WDTH))
x_tmp = torch.FloatTensor(x_tmp)
y_tmp = torch.FloatTensor(y_tmp)
with torch.no_grad():
mae += l1loss(y_tmp, x_tmp).item()
mse += loss(y_tmp, x_tmp).item()
mae = mae / AMT
mse = mse / AMT
return (mae, mse)
# check the performance given two set of array with specified height, width and amount. If they
def check_performance(x_set, y_set, HEIGHT, WIDTH, AMOUNT, without_padding):
# only for x and y in the same shape
if without_padding == True:
x = numpy.reshape(x_set[:, :, 2:HEIGHT - 2, 2:WIDTH - 2], (AMOUNT, HEIGHT - 2 * 2, WIDTH - 2 * 2))
y = numpy.reshape(y_set[:, :, 2:HEIGHT - 2, 2:WIDTH - 2], (AMOUNT, HEIGHT - 2 * 2, WIDTH - 2 * 2))
else:
x = numpy.reshape(x_set[:, :, :, :], (AMOUNT, HEIGHT, WIDTH))
y = numpy.reshape(y_set[:, :, :, :], (AMOUNT, HEIGHT, WIDTH))
(psnr, ssim) = PSNR_SSIM(y, x)
(mae, mse) = MAE_MSE(y, x)
return (psnr, ssim, mae,mse)
def check_performance_3d(x_set, y_set, HEIGHT, WIDTH, AMOUNT):
x = numpy.reshape(x_set[:, :, 2, 2:HEIGHT - 2, 2:WIDTH - 2], (AMOUNT - 4, HEIGHT - 2 * 2, WIDTH - 2 * 2))
y = numpy.reshape(y_set[:, :, :, :], (AMOUNT - 4, HEIGHT - 2 * 2, WIDTH - 2 * 2))
(psnr, ssim) = PSNR_SSIM(y, x)
(mae, mse) = MAE_MSE(y, x)
return (psnr, ssim, mae, mse)
| [
"numpy.reshape",
"skimage.metrics.structural_similarity",
"torch.Tensor.cpu",
"torch.nn.L1Loss",
"torch.nn.MSELoss",
"torch.no_grad",
"skimage.metrics.peak_signal_noise_ratio",
"torch.FloatTensor"
] | [((2048, 2057), 'torch.nn.MSELoss', 'MSELoss', ([], {}), '()\n', (2055, 2057), False, 'from torch.nn import MSELoss, L1Loss\n'), ((2071, 2079), 'torch.nn.L1Loss', 'L1Loss', ([], {}), '()\n', (2077, 2079), False, 'from torch.nn import MSELoss, L1Loss\n'), ((3383, 3489), 'numpy.reshape', 'numpy.reshape', (['x_set[:, :, 2, 2:HEIGHT - 2, 2:WIDTH - 2]', '(AMOUNT - 4, HEIGHT - 2 * 2, WIDTH - 2 * 2)'], {}), '(x_set[:, :, 2, 2:HEIGHT - 2, 2:WIDTH - 2], (AMOUNT - 4, \n HEIGHT - 2 * 2, WIDTH - 2 * 2))\n', (3396, 3489), False, 'import numpy\n'), ((3493, 3570), 'numpy.reshape', 'numpy.reshape', (['y_set[:, :, :, :]', '(AMOUNT - 4, HEIGHT - 2 * 2, WIDTH - 2 * 2)'], {}), '(y_set[:, :, :, :], (AMOUNT - 4, HEIGHT - 2 * 2, WIDTH - 2 * 2))\n', (3506, 3570), False, 'import numpy\n'), ((818, 873), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['x_tmp', 'y_tmp'], {'data_range': '(255.0)'}), '(x_tmp, y_tmp, data_range=255.0)\n', (841, 873), False, 'from skimage.metrics import structural_similarity as ssim, peak_signal_noise_ratio\n'), ((894, 930), 'skimage.metrics.structural_similarity', 'ssim', (['x_tmp', 'y_tmp'], {'data_range': '(255.0)'}), '(x_tmp, y_tmp, data_range=255.0)\n', (898, 930), True, 'from skimage.metrics import structural_similarity as ssim, peak_signal_noise_ratio\n'), ((1260, 1275), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1273, 1275), False, 'import torch\n'), ((2345, 2369), 'torch.FloatTensor', 'torch.FloatTensor', (['x_tmp'], {}), '(x_tmp)\n', (2362, 2369), False, 'import torch\n'), ((2386, 2410), 'torch.FloatTensor', 'torch.FloatTensor', (['y_tmp'], {}), '(y_tmp)\n', (2403, 2410), False, 'import torch\n'), ((2858, 2956), 'numpy.reshape', 'numpy.reshape', (['x_set[:, :, 2:HEIGHT - 2, 2:WIDTH - 2]', '(AMOUNT, HEIGHT - 2 * 2, WIDTH - 2 * 2)'], {}), '(x_set[:, :, 2:HEIGHT - 2, 2:WIDTH - 2], (AMOUNT, HEIGHT - 2 *\n 2, WIDTH - 2 * 2))\n', (2871, 2956), False, 'import numpy\n'), ((2965, 3063), 'numpy.reshape', 'numpy.reshape', (['y_set[:, :, 2:HEIGHT - 2, 2:WIDTH - 2]', '(AMOUNT, HEIGHT - 2 * 2, WIDTH - 2 * 2)'], {}), '(y_set[:, :, 2:HEIGHT - 2, 2:WIDTH - 2], (AMOUNT, HEIGHT - 2 *\n 2, WIDTH - 2 * 2))\n', (2978, 3063), False, 'import numpy\n'), ((3082, 3139), 'numpy.reshape', 'numpy.reshape', (['x_set[:, :, :, :]', '(AMOUNT, HEIGHT, WIDTH)'], {}), '(x_set[:, :, :, :], (AMOUNT, HEIGHT, WIDTH))\n', (3095, 3139), False, 'import numpy\n'), ((3152, 3209), 'numpy.reshape', 'numpy.reshape', (['y_set[:, :, :, :]', '(AMOUNT, HEIGHT, WIDTH)'], {}), '(y_set[:, :, :, :], (AMOUNT, HEIGHT, WIDTH))\n', (3165, 3209), False, 'import numpy\n'), ((1298, 1317), 'torch.Tensor.cpu', 'torch.Tensor.cpu', (['y'], {}), '(y)\n', (1314, 1317), False, 'import torch\n'), ((1340, 1359), 'torch.Tensor.cpu', 'torch.Tensor.cpu', (['x'], {}), '(x)\n', (1356, 1359), False, 'import torch\n'), ((1805, 1860), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['x_tmp', 'y_tmp'], {'data_range': '(255.0)'}), '(x_tmp, y_tmp, data_range=255.0)\n', (1828, 1860), False, 'from skimage.metrics import structural_similarity as ssim, peak_signal_noise_ratio\n'), ((1885, 1921), 'skimage.metrics.structural_similarity', 'ssim', (['x_tmp', 'y_tmp'], {'data_range': '(255.0)'}), '(x_tmp, y_tmp, data_range=255.0)\n', (1889, 1921), True, 'from skimage.metrics import structural_similarity as ssim, peak_signal_noise_ratio\n'), ((2424, 2439), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2437, 2439), False, 'import torch\n')] |
import numpy as np
import logging
class PID(object):
def __init__(self, kp, ki, kd):
self.kp = kp
self.ki = ki
self.kd = kd
self.reset()
def update(self, t, e):
# TODO add anti-windup logic
# Most environments have a short execution time
# the controller doesn't have much time to wind up
dt = t - self.last_t
self.last_t = t
p_term = self.kp * e
self.accum += e * dt
i_term = self.ki * self.accum
de = e - self.last_e
self.last_e = e
d_term = self.kd * de / dt if dt > 0 else 0
return p_term + i_term + d_term
def reset(self):
self.last_t = 0
self.last_e = 0
self.accum = 0
class PidController(object):
""" This is a loose port from Betaflight """
FD_ROLL = 0
FD_PITCH = 1
FD_YAW = 2
PTERM_SCALE = 0.032029
ITERM_SCALE = 0.244381
DTERM_SCALE = 0.000529
minthrottle = 1000
maxthrottle = 2000
def __init__(self,
pid_roll = [40, 40, 30],
pid_pitch = [58, 50, 35],
pid_yaw = [80, 45, 20],
mixer = [],
itermLimit = 150):
# init gains and scale
self.Kp = [pid_roll[0], pid_pitch[0], pid_yaw[0]]
self.Kp = [self.PTERM_SCALE * p for p in self.Kp]
self.Ki = [pid_roll[1], pid_pitch[1], pid_yaw[1]]
self.Ki = [self.ITERM_SCALE * i for i in self.Ki]
self.Kd = [pid_roll[2], pid_pitch[2], pid_yaw[2]]
self.Kd = [self.DTERM_SCALE * d for d in self.Kd]
self.itermLimit = itermLimit
self.previousRateError = [0]*3
self.previousTime = 0
self.previous_motor_values = [self.minthrottle]*4
self.pid_rpy = [PID(*pid_roll), PID(*pid_pitch), PID(*pid_yaw)]
self.mixer = mixer
def calculate_motor_values(self, current_time, sp_rates, gyro_rates):
rpy_sums = []
for i in range(3):
u = self.pid_rpy[i].update(current_time, sp_rates[i] - gyro_rates[i])
rpy_sums.append(u)
return self.mix(*rpy_sums)
def constrainf(self, amt, low, high):
# From BF src/main/common/maths.h
if amt < low:
return low
elif amt > high:
return high
else:
return amt
def mix(self, r, p, y):
PID_MIXER_SCALING = 1000.0
pidSumLimit = 10000.#500
pidSumLimitYaw = 100000.#1000.0#400
motorOutputMixSign = 1
motorOutputRange = self.maxthrottle - self.minthrottle# throttle max - throttle min
motorOutputMin = self.minthrottle
mixer_index_throttle = 0
mixer_index_roll = 1
mixer_index_pitch = 2
mixer_index_yaw = 3
scaledAxisPidRoll = self.constrainf(r, -pidSumLimit, pidSumLimit) / PID_MIXER_SCALING
scaledAxisPidPitch = self.constrainf(p, -pidSumLimit, pidSumLimit) / PID_MIXER_SCALING
scaledAxisPidYaw = self.constrainf(y, -pidSumLimitYaw, pidSumLimitYaw) / PID_MIXER_SCALING
scaledAxisPidYaw = -scaledAxisPidYaw
# Find roll/pitch/yaw desired output
motor_count = 4
motorMix = [0]*motor_count
motorMixMax = 0
motorMixMin = 0
# No additional throttle, in air mode
throttle = 0
motorRangeMin = 1000
motorRangeMax = 2000
for i in range(motor_count):
mix = (scaledAxisPidRoll * self.mixer[i][1] +
scaledAxisPidPitch * self.mixer[i][2] +
scaledAxisPidYaw * self.mixer[i][3])
if mix > motorMixMax:
motorMixMax = mix
elif mix < motorMixMin:
motorMixMin = mix
motorMix[i] = mix
motorMixRange = motorMixMax - motorMixMin
if motorMixRange > 1.0:
for i in range(motor_count):
motorMix[i] /= motorMixRange
# Get the maximum correction by setting offset to center when airmode enabled
throttle = 0.5
else:
# Only automatically adjust throttle when airmode enabled. Airmode logic is always active on high throttle
throttleLimitOffset = motorMixRange / 2.0
throttle = self.constrainf(throttle, 0.0 + throttleLimitOffset, 1.0 - throttleLimitOffset)
motor = []
for i in range(motor_count):
motorOutput = motorOutputMin + (motorOutputRange * (motorOutputMixSign * motorMix[i] + throttle * self.mixer[i][mixer_index_throttle]))
motorOutput = self.constrainf(motorOutput, motorRangeMin, motorRangeMax);
motor.append(motorOutput)
motor = list(map(int, np.round(motor)))
return motor
def reset(self):
for pid in self.pid_rpy:
pid.reset()
| [
"numpy.round"
] | [((4723, 4738), 'numpy.round', 'np.round', (['motor'], {}), '(motor)\n', (4731, 4738), True, 'import numpy as np\n')] |
import os
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
def generate_test_trajectory():
result = []
angle_i = np.arange(0, 271, 3)
angle_j = np.arange(0, 1200, 10)
for i, j in zip(angle_i, angle_j):
x = 2 * np.cos(i * 3 * np.pi/180.0) * (1.0 + 0.5 * np.cos(1.2 + i * 1.2 * np.pi/180.0))
y = 0.25 + i/270.0 + np.sin(j * np.pi/180.0) * 0.2 * np.sin(0.6 + j * 1.5 * np.pi/180.0)
z = 2 * np.sin(i * 3 * np.pi/180.0) * (1.0 + 0.5 * np.cos(1.2 + i * np.pi/180.0))
result.append(cv.viz.makeCameraPose((x, y, z), (0.0, 0, 0), (0.0, 1.0, 0.0)))
x = np.zeros(shape=(len(result), 1, 16 ), dtype= np.float64)
for idx, m in enumerate(result):
x[idx, 0, :] = m.mat().reshape(16)
return x, result
def tutorial3(camera_pov, filename):
myWindow = cv.viz_Viz3d("Coordinate Frame")
myWindow.showWidget("axe",cv.viz_WCoordinateSystem())
cam_origin = (3.0, 3.0, 3.0)
cam_focal_point = (3.0,3.0,2.0)
cam_y_dir = (-1.0,0.0,0.0)
camera_pose = cv.viz.makeCameraPose(cam_origin, cam_focal_point, cam_y_dir)
transform = cv.viz.makeTransformToGlobal((0.0,-1.0,0.0), (-1.0,0.0,0.0), (0.0,0.0,-1.0), cam_origin)
dragon_cloud,_,_ = cv.viz.readCloud(filename)
cloud_widget = cv.viz_WCloud(dragon_cloud, cv.viz_Color().green())
cloud_pose = cv.viz_Affine3d()
cloud_pose = cv.viz_Affine3d().rotate((0, np.pi / 2, 0)).translate((0, 0, 3))
cloud_pose_global = transform.product(cloud_pose)
myWindow.showWidget("CPW_FRUSTUM", cv.viz_WCameraPosition((0.889484, 0.523599)), camera_pose)
if not camera_pov:
myWindow.showWidget("CPW", cv.viz_WCameraPosition(0.5), camera_pose)
myWindow.showWidget("dragon", cloud_widget, cloud_pose_global)
if camera_pov:
myWindow.setViewerPose(camera_pose)
class viz_test(NewOpenCVTests):
def setUp(self):
super(viz_test, self).setUp()
if not bool(os.environ.get('OPENCV_PYTEST_RUN_VIZ', False)):
self.skipTest("Use OPENCV_PYTEST_RUN_VIZ=1 to enable VIZ UI tests")
def test_viz_tutorial3_global_view(self):
tutorial3(False, self.find_file("viz/dragon.ply"))
def test_viz_tutorial3_camera_view(self):
tutorial3(True, self.find_file("viz/dragon.ply"))
def test_viz(self):
dragon_cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply"))
myWindow = cv.viz_Viz3d("abc")
myWindow.showWidget("coo", cv.viz_WCoordinateSystem(1))
myWindow.showWidget("cloud", cv.viz_WPaintedCloud(dragon_cloud))
myWindow.spinOnce(500, True)
def test_viz_show_simple_widgets(self):
viz = cv.viz_Viz3d("show_simple_widgets")
viz.setBackgroundMeshLab()
viz.showWidget("coos", cv.viz_WCoordinateSystem())
viz.showWidget("cube", cv.viz_WCube())
viz.showWidget("cub0", cv.viz_WCube((-1.0, -1, -1), (-0.5, -0.5, -0.5), False, cv.viz_Color().indigo()))
viz.showWidget("arro", cv.viz_WArrow((-0.5, -0.5, -0.5), (0.5, 0.5, 0.5), 0.009, cv.viz_Color().raspberry()))
viz.showWidget("cir1", cv.viz_WCircle(0.5, 0.01, cv.viz_Color.bluberry()))
viz.showWidget("cir2", cv.viz_WCircle(0.5, (0.5, 0.0, 0.0), (1.0, 0.0, 0.0), 0.01, cv.viz_Color().apricot()))
viz.showWidget("cyl0", cv.viz_WCylinder((-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), 0.125, 30, cv.viz_Color().brown()))
viz.showWidget("con0", cv.viz_WCone(0.25, 0.125, 6, cv.viz_Color().azure()))
viz.showWidget("con1", cv.viz_WCone(0.125, (0.5, -0.5, 0.5), (0.5, -1.0, 0.5), 6, cv.viz_Color().turquoise()))
text2d = cv.viz_WText("Different simple widgets", (20, 20), 20, cv.viz_Color().green())
viz.showWidget("text2d", text2d)
text3d = cv.viz_WText3D("Simple 3D text", ( 0.5, 0.5, 0.5), 0.125, False, cv.viz_Color().green())
viz.showWidget("text3d", text3d)
viz.showWidget("plane1", cv.viz_WPlane((0.25, 0.75)))
viz.showWidget("plane2", cv.viz_WPlane((0.5, -0.5, -0.5), (0.0, 1.0, 1.0), (1.0, 1.0, 0.0), (1.0, 0.5), cv.viz_Color().gold()))
viz.showWidget("grid1", cv.viz_WGrid((7,7), (0.75,0.75), cv.viz_Color().gray()), cv.viz_Affine3d().translate((0.0, 0.0, -1.0)))
viz.spinOnce(500, True)
text2d.setText("Different simple widgets (updated)")
text3d.setText("Updated text 3D")
viz.spinOnce(500, True)
def test_viz_show_overlay_image(self):
lena = cv.imread(self.find_file("viz/lena.png"))
gray = cv.cvtColor(lena, cv.COLOR_BGR2GRAY)
rows = lena.shape[0]
cols = lena.shape[1]
half_lsize = (lena.shape[1] // 2, lena.shape[0] // 2)
viz = cv.viz_Viz3d("show_overlay_image")
viz.setBackgroundMeshLab();
vsz = viz.getWindowSize()
viz.showWidget("coos", cv.viz_WCoordinateSystem())
viz.showWidget("cube", cv.viz_WCube())
x = cv.viz_WImageOverlay(lena, (10, 10, half_lsize[1], half_lsize[0]))
viz.showWidget("img1", x)
viz.showWidget("img2", cv.viz_WImageOverlay(gray, (vsz[0] - 10 - cols // 2, 10, half_lsize[1], half_lsize[0])))
viz.showWidget("img3", cv.viz_WImageOverlay(gray, (10, vsz[1] - 10 - rows // 2, half_lsize[1], half_lsize[0])))
viz.showWidget("img5", cv.viz_WImageOverlay(lena, (vsz[0] - 10 - cols // 2, vsz[1] - 10 - rows // 2, half_lsize[1], half_lsize[0])))
viz.showWidget("text2d", cv.viz_WText("Overlay images", (20, 20), 20, cv.viz_Color().green()))
i = 0
for num in range(50):
i = i + 1
a = i % 360
pose = (3 * np.sin(a * np.pi/180), 2.1, 3 * np.cos(a * np.pi/180));
viz.setViewerPose(cv.viz.makeCameraPose(pose , (0.0, 0.5, 0.0), (0.0, 0.1, 0.0)))
img = lena * (np.sin(i * 10 * np.pi/180) * 0.5 + 0.5)
x.setImage(img.astype(np.uint8))
viz.spinOnce(100, True)
viz.showWidget("text2d", cv.viz_WText("Overlay images (stopped)", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_image_3d(self):
lena = cv.imread(self.find_file("viz/lena.png"))
lena_gray = cv.cvtColor(lena, cv.COLOR_BGR2GRAY)
viz = cv.viz_Viz3d("show_image_3d")
viz.setBackgroundMeshLab()
viz.showWidget("coos", cv.viz_WCoordinateSystem())
viz.showWidget("cube", cv.viz_WCube());
viz.showWidget("arr0", cv.viz_WArrow((0.5, 0.0, 0.0), (1.5, 0.0, 0.0), 0.009, cv.viz_Color().raspberry()))
x = cv.viz_WImage3D(lena, (1.0, 1.0))
viz.showWidget("img0", x, cv.viz_Affine3d((0.0, np.pi/2, 0.0), (.5, 0.0, 0.0)))
viz.showWidget("arr1", cv.viz_WArrow((-0.5, -0.5, 0.0), (0.2, 0.2, 0.0), 0.009, cv.viz_Color().raspberry()))
viz.showWidget("img1", cv.viz_WImage3D(lena_gray, (1.0, 1.0), (-0.5, -0.5, 0.0), (1.0, 1.0, 0.0), (0.0, 1.0, 0.0)))
viz.showWidget("arr3", cv.viz_WArrow((-0.5, -0.5, -0.5), (0.5, 0.5, 0.5), 0.009, cv.viz_Color().raspberry()))
viz.showWidget("text2d", cv.viz_WText("Images in 3D", (20, 20), 20, cv.viz_Color().green()))
i = 0
for num in range(50):
img = lena * (np.sin(i*7.5*np.pi/180) * 0.5 + 0.5)
x.setImage(img.astype(np.uint8))
i = i + 1
viz.spinOnce(100, True);
viz.showWidget("text2d", cv.viz_WText("Images in 3D (stopped)", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_cloud_bluberry(self):
dragon_cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply"))
pose = cv.viz_Affine3d()
pose = pose.rotate((0, 0.8, 0));
viz = cv.viz_Viz3d("show_cloud_bluberry")
viz.setBackgroundColor(cv.viz_Color().black())
viz.showWidget("coosys", cv.viz_WCoordinateSystem())
viz.showWidget("dragon", cv.viz_WCloud(dragon_cloud, cv.viz_Color().bluberry()), pose)
viz.showWidget("text2d", cv.viz_WText("Bluberry cloud", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_cloud_random_color(self):
dragon_cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply"))
colors = np.random.randint(0, 255, size=(dragon_cloud.shape[0],dragon_cloud.shape[1],3), dtype=np.uint8)
pose = cv.viz_Affine3d()
pose = pose.rotate((0, 0.8, 0));
viz = cv.viz_Viz3d("show_cloud_random_color")
viz.setBackgroundMeshLab()
viz.showWidget("coosys", cv.viz_WCoordinateSystem())
viz.showWidget("dragon", cv.viz_WCloud(dragon_cloud, colors), pose)
viz.showWidget("text2d", cv.viz_WText("Random color cloud", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_cloud_masked(self):
dragon_cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply"))
qnan = np.NAN
for idx in range(dragon_cloud.shape[0]):
if idx % 15 != 0:
dragon_cloud[idx,:] = qnan
pose = cv.viz_Affine3d()
pose = pose.rotate((0, 0.8, 0))
viz = cv.viz_Viz3d("show_cloud_masked");
viz.showWidget("coosys", cv.viz_WCoordinateSystem())
viz.showWidget("dragon", cv.viz_WCloud(dragon_cloud), pose)
viz.showWidget("text2d", cv.viz_WText("Nan masked cloud", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_cloud_collection(self):
cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply"))
ccol = cv.viz_WCloudCollection()
pose = cv.viz_Affine3d()
pose1 = cv.viz_Affine3d().translate((0, 0, 0)).rotate((np.pi/2, 0, 0))
ccol.addCloud(cloud, cv.viz_Color().white(), cv.viz_Affine3d().translate((0, 0, 0)).rotate((np.pi/2, 0, 0)))
ccol.addCloud(cloud, cv.viz_Color().blue(), cv.viz_Affine3d().translate((1, 0, 0)))
ccol.addCloud(cloud, cv.viz_Color().red(), cv.viz_Affine3d().translate((2, 0, 0)))
ccol.finalize();
viz = cv.viz_Viz3d("show_cloud_collection")
viz.setBackgroundColor(cv.viz_Color().mlab())
viz.showWidget("coosys", cv.viz_WCoordinateSystem());
viz.showWidget("ccol", ccol);
viz.showWidget("text2d", cv.viz_WText("Cloud collection", (20, 20), 20, cv.viz_Color(0, 255,0 )))
viz.spinOnce(500, True)
def test_viz_show_painted_clouds(self):
cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply"))
viz = cv.viz_Viz3d("show_painted_clouds")
viz.setBackgroundMeshLab()
viz.showWidget("coosys", cv.viz_WCoordinateSystem())
pose1 = cv.viz_Affine3d((0.0, -np.pi/2, 0.0), (-1.5, 0.0, 0.0))
pose2 = cv.viz_Affine3d((0.0, np.pi/2, 0.0), (1.5, 0.0, 0.0))
viz.showWidget("cloud1", cv.viz_WPaintedCloud(cloud), pose1)
viz.showWidget("cloud2", cv.viz_WPaintedCloud(cloud, (0.0, -0.75, -1.0), (0.0, 0.75, 0.0)), pose2);
viz.showWidget("cloud3", cv.viz_WPaintedCloud(cloud, (0.0, 0.0, -1.0), (0.0, 0.0, 1.0), cv.viz_Color().blue(), cv.viz_Color().red()))
viz.showWidget("arrow", cv.viz_WArrow((0.0, 1.0, -1.0), (0.0, 1.0, 1.0), 0.009, cv.viz_Color()))
viz.showWidget("text2d", cv.viz_WText("Painted clouds", (20, 20), 20, cv.viz_Color(0, 255, 0)))
viz.spinOnce(500, True)
def test_viz_show_mesh(self):
mesh = cv.viz.readMesh(self.find_file("viz/dragon.ply"))
viz = cv.viz_Viz3d("show_mesh")
viz.showWidget("coosys", cv.viz_WCoordinateSystem());
viz.showWidget("mesh", cv.viz_WMesh(mesh), cv.viz_Affine3d().rotate((0, 0.8, 0)));
viz.showWidget("text2d", cv.viz_WText("Just mesh", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_mesh_random_colors(self):
mesh = cv.viz.readMesh(self.find_file("viz/dragon.ply"))
mesh.colors = np.random.randint(0, 255, size=mesh.colors.shape, dtype=np.uint8)
viz = cv.viz_Viz3d("show_mesh")
viz.showWidget("coosys", cv.viz_WCoordinateSystem());
viz.showWidget("mesh", cv.viz_WMesh(mesh), cv.viz_Affine3d().rotate((0, 0.8, 0)))
viz.setRenderingProperty("mesh", cv.viz.SHADING, cv.viz.SHADING_PHONG)
viz.showWidget("text2d", cv.viz_WText("Random color mesh", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_textured_mesh(self):
lena = cv.imread(self.find_file("viz/lena.png"))
angle = np.arange(0,64)
points0 = np.vstack((np.zeros(shape=angle.shape, dtype=np.float32), np.cos(angle * np.pi /128), np.sin(angle* np.pi /128)))
points1 = np.vstack((1.57 * np.ones(shape=angle.shape, dtype=np.float32),np.cos(angle* np.pi /128), np.sin(angle* np.pi /128)))
tcoords0 = np.vstack((np.zeros(shape=angle.shape, dtype=np.float32), angle / 64))
tcoords1 = np.vstack((np.ones(shape=angle.shape, dtype=np.float32), angle / 64))
points = np.zeros(shape=(points0.shape[0], points0.shape[1] * 2 ),dtype=np.float32)
tcoords = np.zeros(shape=(tcoords0.shape[0], tcoords0.shape[1] * 2),dtype=np.float32)
tcoords[:,0::2] = tcoords0
tcoords[:,1::2] = tcoords1
points[:,0::2] = points0 * 0.75
points[:,1::2] = points1 * 0.75
polygons = np.zeros(shape=(4 * (points.shape[1]-2)+1),dtype=np.int32)
for idx in range(points.shape[1] // 2 - 1):
polygons[8 * idx: 8 * (idx + 1)] = [3, 2*idx, 2*idx+1, 2*idx+2, 3, 2*idx+1, 2*idx+2, 2*idx+3]
mesh = cv.viz_Mesh()
mesh.cloud = points.transpose().reshape(1,points.shape[1],points.shape[0])
mesh.tcoords = tcoords.transpose().reshape(1,tcoords.shape[1],tcoords.shape[0])
mesh.polygons = polygons.reshape(1, 4 * (points.shape[1]-2)+1)
mesh.texture = lena
viz = cv.viz_Viz3d("show_textured_mesh")
viz.setBackgroundMeshLab();
viz.showWidget("coosys", cv.viz_WCoordinateSystem());
viz.showWidget("mesh", cv.viz_WMesh(mesh))
viz.setRenderingProperty("mesh", cv.viz.SHADING, cv.viz.SHADING_PHONG)
viz.showWidget("text2d", cv.viz_WText("Textured mesh", (20, 20), 20, cv.viz_Color().green()));
viz.spinOnce(500, True)
def test_viz_show_polyline(self):
palette = [ cv.viz_Color().red(),
cv.viz_Color().green(),
cv.viz_Color().blue(),
cv.viz_Color().gold(),
cv.viz_Color().raspberry(),
cv.viz_Color().bluberry(),
cv.viz_Color().lime()]
palette_size = len(palette)
polyline = np.zeros(shape=(1, 32, 3), dtype=np.float32)
colors = np.zeros(shape=(1, 32, 3), dtype=np.uint8)
for i in range(polyline.shape[1]):
polyline[0,i,0] = i / 16.0
polyline[0,i,1] = np.cos(i * np.pi/6)
polyline[0,i,2] = np.sin(i * np.pi/6)
colors[0,i,0] = palette[i % palette_size].get_blue()
colors[0,i,1] = palette[i % palette_size].get_green()
colors[0,i,2] = palette[i % palette_size].get_red()
viz = cv.viz_Viz3d("show_polyline")
viz.showWidget("polyline", cv.viz_WPolyLine(polyline, colors))
viz.showWidget("coosys", cv.viz_WCoordinateSystem())
viz.showWidget("text2d", cv.viz_WText("Polyline", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_sampled_normals(self):
mesh = cv.viz.readMesh(self.find_file("viz/dragon.ply"))
mesh.normals = cv.viz.computeNormals(mesh)
pose = cv.viz_Affine3d().rotate((0, 0.8, 0))
viz = cv.viz_Viz3d("show_sampled_normals")
viz.showWidget("mesh", cv.viz_WMesh(mesh), pose)
viz.showWidget("normals", cv.viz_WCloudNormals(mesh.cloud, mesh.normals, 30, 0.1, cv.viz_Color().green()), pose)
viz.setRenderingProperty("normals", cv.viz.LINE_WIDTH, 2.0)
viz.showWidget("text2d", cv.viz_WText("Cloud or mesh normals", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True);
def test_viz_show_cloud_shaded_by_normals(self):
mesh = cv.viz.readMesh(self.find_file("viz/dragon.ply"))
mesh.normals = cv.viz.computeNormals(mesh)
pose = cv.viz_Affine3d().rotate((0, 0.8, 0))
cloud = cv.viz_WCloud(mesh.cloud, cv.viz_Color().white(), mesh.normals)
cloud.setRenderingProperty(cv.viz.SHADING, cv.viz.SHADING_GOURAUD)
viz = cv.viz_Viz3d("show_cloud_shaded_by_normals")
viz.showWidget("cloud", cloud, pose)
viz.showWidget("text2d", cv.viz_WText("Cloud shaded by normals", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_image_method(self):
lena = cv.imread(self.find_file("viz/lena.png"))
lena_gray = cv.cvtColor(lena, cv.COLOR_BGR2GRAY)
viz = cv.viz_Viz3d("show_image_method")
viz.showImage(lena)
viz.spinOnce(1500, True)
viz.showImage(lena, (lena.shape[1], lena.shape[0]))
viz.spinOnce(1500, True)
#cv.viz.imshow("show_image_method", lena_gray).spinOnce(500, True) BUG
def test_viz_show_follower(self):
viz = cv.viz_Viz3d("show_follower")
viz.showWidget("coos", cv.viz_WCoordinateSystem())
viz.showWidget("cube", cv.viz_WCube())
text_3d = cv.viz_WText3D("Simple 3D follower", (-0.5, -0.5, 0.5), 0.125, True, cv.viz_Color().green())
viz.showWidget("t3d_2", text_3d)
viz.showWidget("text2d", cv.viz_WText("Follower: text always facing camera", (20, 20), 20, cv.viz_Color().green()))
viz.setBackgroundMeshLab()
viz.spinOnce(500, True)
text_3d.setText("Updated follower 3D")
viz.spinOnce(500, True)
def test_viz_show_trajectory_reposition(self):
mat, path = generate_test_trajectory()
viz = cv.viz_Viz3d("show_trajectory_reposition_to_origin")
viz.showWidget("coos", cv.viz_WCoordinateSystem())
viz.showWidget("sub3", cv.viz_WTrajectory(mat[0: len(path) // 3,:,:], cv.viz.PyWTrajectory_BOTH, 0.2, cv.viz_Color().brown()), path[0].inv())
viz.showWidget("text2d", cv.viz_WText("Trajectory resposition to origin", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
def test_viz_show_trajectories(self):
mat, path = generate_test_trajectory()
size =len(path)
sub0 = np.copy(mat[0: size//10+1,::])
sub1 = np.copy(mat[size//10: size//5+1,::])
sub2 = np.copy(mat[size//5: 11*size//12,::])
sub3 = np.copy(mat[11 * size // 12 : size,::])
sub4 = np.copy(mat[3 * size//4: 33*size//40,::])
sub5 = np.copy(mat[11*size//12: size,::])
K = np.array([[1024.0, 0.0, 320.0], [0.0, 1024.0, 240.0], [0.0, 0.0, 1.0]],dtype=np.float64)
viz = cv.viz_Viz3d("show_trajectories")
viz.showWidget("coos", cv.viz_WCoordinateSystem())
viz.showWidget("sub0", cv.viz_WTrajectorySpheres(sub0, 0.25, 0.07))
viz.showWidget("sub1", cv.viz_WTrajectory(sub1, cv.viz.PyWTrajectory_PATH, 0.2, cv.viz_Color().brown()))
viz.showWidget("sub2", cv.viz_WTrajectory(sub2, cv.viz.PyWTrajectory_FRAMES, 0.2))
viz.showWidget("sub3", cv.viz_WTrajectory(sub3, cv.viz.PyWTrajectory_BOTH, 0.2, cv.viz_Color().green()))
viz.showWidget("sub4", cv.viz_WTrajectoryFrustums(sub4, K, 0.3, cv.viz_Color().yellow()))
viz.showWidget("sub5", cv.viz_WTrajectoryFrustums(sub5, (0.78, 0.78), 0.15, cv.viz_Color().magenta())) #BUG
viz.showWidget("text2d", cv.viz_WText("Different kinds of supported trajectories", (20, 20), 20, cv.viz_Color().green()))
i = 0
for num in range(50):
i = i - 1
a = i % 360
pose = (np.sin(a * np.pi/180)* 7.5, 0.7, np.cos(a * np.pi/180)* 7.5)
viz.setViewerPose(cv.viz.makeCameraPose(pose , (0.0, 0.5, 0.0), (0.0, 0.1, 0.0)));
viz.spinOnce(100, True)
viz.resetCamera()
viz.spinOnce(500, True)
def test_viz_show_camera_positions(self):
K = np.array([[1024.0, 0.0, 320.0], [0.0, 1024.0, 240.0], [0.0, 0.0, 1.0]],dtype=np.float64)
lena = cv.imread(self.find_file("viz/lena.png"))
lena_gray = cv.cvtColor(lena, cv.COLOR_BGR2GRAY)
poses = []
for i in range(2):
pose = (5 * np.sin(3.14 + 2.7 + i*60 * np.pi/180), 2 - i*1.5, 5 * np.cos(3.14 + 2.7 + i*60 * np.pi/180))
poses.append(cv.viz.makeCameraPose(pose, (0.0, 0.0, 0.0), (0.0, -0.1, 0.0)))
viz = cv.viz_Viz3d("show_camera_positions")
viz.showWidget("sphe", cv.viz_WSphere((0,0,0), 1.0, 10, cv.viz_Color().orange_red()))
viz.showWidget("coos", cv.viz_WCoordinateSystem(1.5))
viz.showWidget("pos1", cv.viz_WCameraPosition(0.75), poses[0])
viz.showWidget("pos2", cv.viz_WCameraPosition((0.78, 0.78), lena, 2.2, cv.viz_Color().green()), poses[0])
viz.showWidget("pos3", cv.viz_WCameraPosition(0.75), poses[0])
viz.showWidget("pos4", cv.viz_WCameraPosition(K, lena_gray, 3, cv.viz_Color().indigo()), poses[1])
viz.showWidget("text2d", cv.viz_WText("Camera positions with images", (20, 20), 20, cv.viz_Color().green()))
viz.spinOnce(500, True)
"""
TEST(Viz, show_widget_merger)
{
WWidgetMerger merger;
merger.addWidget(WCube(Vec3d::all(0.0), Vec3d::all(1.0), true, Color::gold()));
RNG& rng = theRNG();
for(int i = 0; i < 77; ++i)
{
Vec3b c;
rng.fill(c, RNG::NORMAL, Scalar::all(128), Scalar::all(48), true);
merger.addWidget(WSphere(Vec3d(c)*(1.0/255.0), 7.0/255.0, 10, Color(c[2], c[1], c[0])));
}
merger.finalize();
Viz3d viz("show_mesh_random_color");
viz.showWidget("coo", WCoordinateSystem());
viz.showWidget("merger", merger);
viz.showWidget("text2d", WText("Widget merger", Point(20, 20), 20, Color::green()));
viz.spinOnce(500, true);
}
"""
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
| [
"cv2.viz.makeTransformToGlobal",
"cv2.viz_WCoordinateSystem",
"cv2.viz_Mesh",
"cv2.viz_Color",
"tests_common.NewOpenCVTests.bootstrap",
"numpy.array",
"numpy.sin",
"cv2.viz_WCameraPosition",
"numpy.arange",
"cv2.viz_WCloudCollection",
"cv2.viz_WTrajectory",
"cv2.viz_WTrajectorySpheres",
"cv2... | [((151, 171), 'numpy.arange', 'np.arange', (['(0)', '(271)', '(3)'], {}), '(0, 271, 3)\n', (160, 171), True, 'import numpy as np\n'), ((186, 208), 'numpy.arange', 'np.arange', (['(0)', '(1200)', '(10)'], {}), '(0, 1200, 10)\n', (195, 208), True, 'import numpy as np\n'), ((837, 869), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""Coordinate Frame"""'], {}), "('Coordinate Frame')\n", (849, 869), True, 'import cv2 as cv\n'), ((1048, 1109), 'cv2.viz.makeCameraPose', 'cv.viz.makeCameraPose', (['cam_origin', 'cam_focal_point', 'cam_y_dir'], {}), '(cam_origin, cam_focal_point, cam_y_dir)\n', (1069, 1109), True, 'import cv2 as cv\n'), ((1126, 1224), 'cv2.viz.makeTransformToGlobal', 'cv.viz.makeTransformToGlobal', (['(0.0, -1.0, 0.0)', '(-1.0, 0.0, 0.0)', '(0.0, 0.0, -1.0)', 'cam_origin'], {}), '((0.0, -1.0, 0.0), (-1.0, 0.0, 0.0), (0.0, 0.0,\n -1.0), cam_origin)\n', (1154, 1224), True, 'import cv2 as cv\n'), ((1238, 1264), 'cv2.viz.readCloud', 'cv.viz.readCloud', (['filename'], {}), '(filename)\n', (1254, 1264), True, 'import cv2 as cv\n'), ((1353, 1370), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (1368, 1370), True, 'import cv2 as cv\n'), ((21946, 21972), 'tests_common.NewOpenCVTests.bootstrap', 'NewOpenCVTests.bootstrap', ([], {}), '()\n', (21970, 21972), False, 'from tests_common import NewOpenCVTests\n'), ((900, 926), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (924, 926), True, 'import cv2 as cv\n'), ((1546, 1590), 'cv2.viz_WCameraPosition', 'cv.viz_WCameraPosition', (['(0.889484, 0.523599)'], {}), '((0.889484, 0.523599))\n', (1568, 1590), True, 'import cv2 as cv\n'), ((2409, 2428), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""abc"""'], {}), "('abc')\n", (2421, 2428), True, 'import cv2 as cv\n'), ((2662, 2697), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_simple_widgets"""'], {}), "('show_simple_widgets')\n", (2674, 2697), True, 'import cv2 as cv\n'), ((4503, 4539), 'cv2.cvtColor', 'cv.cvtColor', (['lena', 'cv.COLOR_BGR2GRAY'], {}), '(lena, cv.COLOR_BGR2GRAY)\n', (4514, 4539), True, 'import cv2 as cv\n'), ((4675, 4709), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_overlay_image"""'], {}), "('show_overlay_image')\n", (4687, 4709), True, 'import cv2 as cv\n'), ((4899, 4965), 'cv2.viz_WImageOverlay', 'cv.viz_WImageOverlay', (['lena', '(10, 10, half_lsize[1], half_lsize[0])'], {}), '(lena, (10, 10, half_lsize[1], half_lsize[0]))\n', (4919, 4965), True, 'import cv2 as cv\n'), ((6158, 6194), 'cv2.cvtColor', 'cv.cvtColor', (['lena', 'cv.COLOR_BGR2GRAY'], {}), '(lena, cv.COLOR_BGR2GRAY)\n', (6169, 6194), True, 'import cv2 as cv\n'), ((6210, 6239), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_image_3d"""'], {}), "('show_image_3d')\n", (6222, 6239), True, 'import cv2 as cv\n'), ((6509, 6542), 'cv2.viz_WImage3D', 'cv.viz_WImage3D', (['lena', '(1.0, 1.0)'], {}), '(lena, (1.0, 1.0))\n', (6524, 6542), True, 'import cv2 as cv\n'), ((7589, 7606), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (7604, 7606), True, 'import cv2 as cv\n'), ((7662, 7697), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_cloud_bluberry"""'], {}), "('show_cloud_bluberry')\n", (7674, 7697), True, 'import cv2 as cv\n'), ((8190, 8292), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(dragon_cloud.shape[0], dragon_cloud.shape[1], 3)', 'dtype': 'np.uint8'}), '(0, 255, size=(dragon_cloud.shape[0], dragon_cloud.shape[1\n ], 3), dtype=np.uint8)\n', (8207, 8292), True, 'import numpy as np\n'), ((8302, 8319), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (8317, 8319), True, 'import cv2 as cv\n'), ((8376, 8415), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_cloud_random_color"""'], {}), "('show_cloud_random_color')\n", (8388, 8415), True, 'import cv2 as cv\n'), ((9011, 9028), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (9026, 9028), True, 'import cv2 as cv\n'), ((9085, 9118), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_cloud_masked"""'], {}), "('show_cloud_masked')\n", (9097, 9118), True, 'import cv2 as cv\n'), ((9520, 9545), 'cv2.viz_WCloudCollection', 'cv.viz_WCloudCollection', ([], {}), '()\n', (9543, 9545), True, 'import cv2 as cv\n'), ((9561, 9578), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (9576, 9578), True, 'import cv2 as cv\n'), ((9999, 10036), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_cloud_collection"""'], {}), "('show_cloud_collection')\n", (10011, 10036), True, 'import cv2 as cv\n'), ((10460, 10495), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_painted_clouds"""'], {}), "('show_painted_clouds')\n", (10472, 10495), True, 'import cv2 as cv\n'), ((10608, 10665), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', (['(0.0, -np.pi / 2, 0.0)', '(-1.5, 0.0, 0.0)'], {}), '((0.0, -np.pi / 2, 0.0), (-1.5, 0.0, 0.0))\n', (10623, 10665), True, 'import cv2 as cv\n'), ((10680, 10735), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', (['(0.0, np.pi / 2, 0.0)', '(1.5, 0.0, 0.0)'], {}), '((0.0, np.pi / 2, 0.0), (1.5, 0.0, 0.0))\n', (10695, 10735), True, 'import cv2 as cv\n'), ((11411, 11436), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_mesh"""'], {}), "('show_mesh')\n", (11423, 11436), True, 'import cv2 as cv\n'), ((11858, 11923), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': 'mesh.colors.shape', 'dtype': 'np.uint8'}), '(0, 255, size=mesh.colors.shape, dtype=np.uint8)\n', (11875, 11923), True, 'import numpy as np\n'), ((11938, 11963), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_mesh"""'], {}), "('show_mesh')\n", (11950, 11963), True, 'import cv2 as cv\n'), ((12452, 12468), 'numpy.arange', 'np.arange', (['(0)', '(64)'], {}), '(0, 64)\n', (12461, 12468), True, 'import numpy as np\n'), ((12933, 13007), 'numpy.zeros', 'np.zeros', ([], {'shape': '(points0.shape[0], points0.shape[1] * 2)', 'dtype': 'np.float32'}), '(shape=(points0.shape[0], points0.shape[1] * 2), dtype=np.float32)\n', (12941, 13007), True, 'import numpy as np\n'), ((13027, 13103), 'numpy.zeros', 'np.zeros', ([], {'shape': '(tcoords0.shape[0], tcoords0.shape[1] * 2)', 'dtype': 'np.float32'}), '(shape=(tcoords0.shape[0], tcoords0.shape[1] * 2), dtype=np.float32)\n', (13035, 13103), True, 'import numpy as np\n'), ((13273, 13334), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4 * (points.shape[1] - 2) + 1)', 'dtype': 'np.int32'}), '(shape=4 * (points.shape[1] - 2) + 1, dtype=np.int32)\n', (13281, 13334), True, 'import numpy as np\n'), ((13506, 13519), 'cv2.viz_Mesh', 'cv.viz_Mesh', ([], {}), '()\n', (13517, 13519), True, 'import cv2 as cv\n'), ((13804, 13838), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_textured_mesh"""'], {}), "('show_textured_mesh')\n", (13816, 13838), True, 'import cv2 as cv\n'), ((14606, 14650), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 32, 3)', 'dtype': 'np.float32'}), '(shape=(1, 32, 3), dtype=np.float32)\n', (14614, 14650), True, 'import numpy as np\n'), ((14668, 14710), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 32, 3)', 'dtype': 'np.uint8'}), '(shape=(1, 32, 3), dtype=np.uint8)\n', (14676, 14710), True, 'import numpy as np\n'), ((15103, 15132), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_polyline"""'], {}), "('show_polyline')\n", (15115, 15132), True, 'import cv2 as cv\n'), ((15530, 15557), 'cv2.viz.computeNormals', 'cv.viz.computeNormals', (['mesh'], {}), '(mesh)\n', (15551, 15557), True, 'import cv2 as cv\n'), ((15625, 15661), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_sampled_normals"""'], {}), "('show_sampled_normals')\n", (15637, 15661), True, 'import cv2 as cv\n'), ((16195, 16222), 'cv2.viz.computeNormals', 'cv.viz.computeNormals', (['mesh'], {}), '(mesh)\n', (16216, 16222), True, 'import cv2 as cv\n'), ((16446, 16490), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_cloud_shaded_by_normals"""'], {}), "('show_cloud_shaded_by_normals')\n", (16458, 16490), True, 'import cv2 as cv\n'), ((16801, 16837), 'cv2.cvtColor', 'cv.cvtColor', (['lena', 'cv.COLOR_BGR2GRAY'], {}), '(lena, cv.COLOR_BGR2GRAY)\n', (16812, 16837), True, 'import cv2 as cv\n'), ((16852, 16885), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_image_method"""'], {}), "('show_image_method')\n", (16864, 16885), True, 'import cv2 as cv\n'), ((17173, 17202), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_follower"""'], {}), "('show_follower')\n", (17185, 17202), True, 'import cv2 as cv\n'), ((17846, 17898), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_trajectory_reposition_to_origin"""'], {}), "('show_trajectory_reposition_to_origin')\n", (17858, 17898), True, 'import cv2 as cv\n'), ((18391, 18424), 'numpy.copy', 'np.copy', (['mat[0:size // 10 + 1, :]'], {}), '(mat[0:size // 10 + 1, :])\n', (18398, 18424), True, 'import numpy as np\n'), ((18437, 18478), 'numpy.copy', 'np.copy', (['mat[size // 10:size // 5 + 1, :]'], {}), '(mat[size // 10:size // 5 + 1, :])\n', (18444, 18478), True, 'import numpy as np\n'), ((18489, 18531), 'numpy.copy', 'np.copy', (['mat[size // 5:11 * size // 12, :]'], {}), '(mat[size // 5:11 * size // 12, :])\n', (18496, 18531), True, 'import numpy as np\n'), ((18542, 18579), 'numpy.copy', 'np.copy', (['mat[11 * size // 12:size, :]'], {}), '(mat[11 * size // 12:size, :])\n', (18549, 18579), True, 'import numpy as np\n'), ((18598, 18644), 'numpy.copy', 'np.copy', (['mat[3 * size // 4:33 * size // 40, :]'], {}), '(mat[3 * size // 4:33 * size // 40, :])\n', (18605, 18644), True, 'import numpy as np\n'), ((18655, 18692), 'numpy.copy', 'np.copy', (['mat[11 * size // 12:size, :]'], {}), '(mat[11 * size // 12:size, :])\n', (18662, 18692), True, 'import numpy as np\n'), ((18702, 18795), 'numpy.array', 'np.array', (['[[1024.0, 0.0, 320.0], [0.0, 1024.0, 240.0], [0.0, 0.0, 1.0]]'], {'dtype': 'np.float64'}), '([[1024.0, 0.0, 320.0], [0.0, 1024.0, 240.0], [0.0, 0.0, 1.0]],\n dtype=np.float64)\n', (18710, 18795), True, 'import numpy as np\n'), ((18806, 18839), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_trajectories"""'], {}), "('show_trajectories')\n", (18818, 18839), True, 'import cv2 as cv\n'), ((20056, 20149), 'numpy.array', 'np.array', (['[[1024.0, 0.0, 320.0], [0.0, 1024.0, 240.0], [0.0, 0.0, 1.0]]'], {'dtype': 'np.float64'}), '([[1024.0, 0.0, 320.0], [0.0, 1024.0, 240.0], [0.0, 0.0, 1.0]],\n dtype=np.float64)\n', (20064, 20149), True, 'import numpy as np\n'), ((20222, 20258), 'cv2.cvtColor', 'cv.cvtColor', (['lena', 'cv.COLOR_BGR2GRAY'], {}), '(lena, cv.COLOR_BGR2GRAY)\n', (20233, 20258), True, 'import cv2 as cv\n'), ((20526, 20563), 'cv2.viz_Viz3d', 'cv.viz_Viz3d', (['"""show_camera_positions"""'], {}), "('show_camera_positions')\n", (20538, 20563), True, 'import cv2 as cv\n'), ((553, 615), 'cv2.viz.makeCameraPose', 'cv.viz.makeCameraPose', (['(x, y, z)', '(0.0, 0, 0)', '(0.0, 1.0, 0.0)'], {}), '((x, y, z), (0.0, 0, 0), (0.0, 1.0, 0.0))\n', (574, 615), True, 'import cv2 as cv\n'), ((1663, 1690), 'cv2.viz_WCameraPosition', 'cv.viz_WCameraPosition', (['(0.5)'], {}), '(0.5)\n', (1685, 1690), True, 'import cv2 as cv\n'), ((2464, 2491), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', (['(1)'], {}), '(1)\n', (2488, 2491), True, 'import cv2 as cv\n'), ((2530, 2564), 'cv2.viz_WPaintedCloud', 'cv.viz_WPaintedCloud', (['dragon_cloud'], {}), '(dragon_cloud)\n', (2550, 2564), True, 'import cv2 as cv\n'), ((2765, 2791), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (2789, 2791), True, 'import cv2 as cv\n'), ((2824, 2838), 'cv2.viz_WCube', 'cv.viz_WCube', ([], {}), '()\n', (2836, 2838), True, 'import cv2 as cv\n'), ((3917, 3944), 'cv2.viz_WPlane', 'cv.viz_WPlane', (['(0.25, 0.75)'], {}), '((0.25, 0.75))\n', (3930, 3944), True, 'import cv2 as cv\n'), ((4812, 4838), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (4836, 4838), True, 'import cv2 as cv\n'), ((4871, 4885), 'cv2.viz_WCube', 'cv.viz_WCube', ([], {}), '()\n', (4883, 4885), True, 'import cv2 as cv\n'), ((5031, 5122), 'cv2.viz_WImageOverlay', 'cv.viz_WImageOverlay', (['gray', '(vsz[0] - 10 - cols // 2, 10, half_lsize[1], half_lsize[0])'], {}), '(gray, (vsz[0] - 10 - cols // 2, 10, half_lsize[1],\n half_lsize[0]))\n', (5051, 5122), True, 'import cv2 as cv\n'), ((5151, 5242), 'cv2.viz_WImageOverlay', 'cv.viz_WImageOverlay', (['gray', '(10, vsz[1] - 10 - rows // 2, half_lsize[1], half_lsize[0])'], {}), '(gray, (10, vsz[1] - 10 - rows // 2, half_lsize[1],\n half_lsize[0]))\n', (5171, 5242), True, 'import cv2 as cv\n'), ((5271, 5384), 'cv2.viz_WImageOverlay', 'cv.viz_WImageOverlay', (['lena', '(vsz[0] - 10 - cols // 2, vsz[1] - 10 - rows // 2, half_lsize[1], half_lsize[0]\n )'], {}), '(lena, (vsz[0] - 10 - cols // 2, vsz[1] - 10 - rows // \n 2, half_lsize[1], half_lsize[0]))\n', (5291, 5384), True, 'import cv2 as cv\n'), ((6306, 6332), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (6330, 6332), True, 'import cv2 as cv\n'), ((6365, 6379), 'cv2.viz_WCube', 'cv.viz_WCube', ([], {}), '()\n', (6377, 6379), True, 'import cv2 as cv\n'), ((6577, 6632), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', (['(0.0, np.pi / 2, 0.0)', '(0.5, 0.0, 0.0)'], {}), '((0.0, np.pi / 2, 0.0), (0.5, 0.0, 0.0))\n', (6592, 6632), True, 'import cv2 as cv\n'), ((6779, 6874), 'cv2.viz_WImage3D', 'cv.viz_WImage3D', (['lena_gray', '(1.0, 1.0)', '(-0.5, -0.5, 0.0)', '(1.0, 1.0, 0.0)', '(0.0, 1.0, 0.0)'], {}), '(lena_gray, (1.0, 1.0), (-0.5, -0.5, 0.0), (1.0, 1.0, 0.0),\n (0.0, 1.0, 0.0))\n', (6794, 6874), True, 'import cv2 as cv\n'), ((7786, 7812), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (7810, 7812), True, 'import cv2 as cv\n'), ((8484, 8510), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (8508, 8510), True, 'import cv2 as cv\n'), ((8545, 8580), 'cv2.viz_WCloud', 'cv.viz_WCloud', (['dragon_cloud', 'colors'], {}), '(dragon_cloud, colors)\n', (8558, 8580), True, 'import cv2 as cv\n'), ((9153, 9179), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (9177, 9179), True, 'import cv2 as cv\n'), ((9214, 9241), 'cv2.viz_WCloud', 'cv.viz_WCloud', (['dragon_cloud'], {}), '(dragon_cloud)\n', (9227, 9241), True, 'import cv2 as cv\n'), ((10124, 10150), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (10148, 10150), True, 'import cv2 as cv\n'), ((10564, 10590), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (10588, 10590), True, 'import cv2 as cv\n'), ((10768, 10795), 'cv2.viz_WPaintedCloud', 'cv.viz_WPaintedCloud', (['cloud'], {}), '(cloud)\n', (10788, 10795), True, 'import cv2 as cv\n'), ((10837, 10902), 'cv2.viz_WPaintedCloud', 'cv.viz_WPaintedCloud', (['cloud', '(0.0, -0.75, -1.0)', '(0.0, 0.75, 0.0)'], {}), '(cloud, (0.0, -0.75, -1.0), (0.0, 0.75, 0.0))\n', (10857, 10902), True, 'import cv2 as cv\n'), ((11470, 11496), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (11494, 11496), True, 'import cv2 as cv\n'), ((11530, 11548), 'cv2.viz_WMesh', 'cv.viz_WMesh', (['mesh'], {}), '(mesh)\n', (11542, 11548), True, 'import cv2 as cv\n'), ((11997, 12023), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (12021, 12023), True, 'import cv2 as cv\n'), ((12057, 12075), 'cv2.viz_WMesh', 'cv.viz_WMesh', (['mesh'], {}), '(mesh)\n', (12069, 12075), True, 'import cv2 as cv\n'), ((13908, 13934), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (13932, 13934), True, 'import cv2 as cv\n'), ((13968, 13986), 'cv2.viz_WMesh', 'cv.viz_WMesh', (['mesh'], {}), '(mesh)\n', (13980, 13986), True, 'import cv2 as cv\n'), ((14823, 14844), 'numpy.cos', 'np.cos', (['(i * np.pi / 6)'], {}), '(i * np.pi / 6)\n', (14829, 14844), True, 'import numpy as np\n'), ((14873, 14894), 'numpy.sin', 'np.sin', (['(i * np.pi / 6)'], {}), '(i * np.pi / 6)\n', (14879, 14894), True, 'import numpy as np\n'), ((15168, 15202), 'cv2.viz_WPolyLine', 'cv.viz_WPolyLine', (['polyline', 'colors'], {}), '(polyline, colors)\n', (15184, 15202), True, 'import cv2 as cv\n'), ((15237, 15263), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (15261, 15263), True, 'import cv2 as cv\n'), ((15693, 15711), 'cv2.viz_WMesh', 'cv.viz_WMesh', (['mesh'], {}), '(mesh)\n', (15705, 15711), True, 'import cv2 as cv\n'), ((17235, 17261), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (17259, 17261), True, 'import cv2 as cv\n'), ((17294, 17308), 'cv2.viz_WCube', 'cv.viz_WCube', ([], {}), '()\n', (17306, 17308), True, 'import cv2 as cv\n'), ((17930, 17956), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (17954, 17956), True, 'import cv2 as cv\n'), ((18871, 18897), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', ([], {}), '()\n', (18895, 18897), True, 'import cv2 as cv\n'), ((18930, 18973), 'cv2.viz_WTrajectorySpheres', 'cv.viz_WTrajectorySpheres', (['sub0', '(0.25)', '(0.07)'], {}), '(sub0, 0.25, 0.07)\n', (18955, 18973), True, 'import cv2 as cv\n'), ((19119, 19177), 'cv2.viz_WTrajectory', 'cv.viz_WTrajectory', (['sub2', 'cv.viz.PyWTrajectory_FRAMES', '(0.2)'], {}), '(sub2, cv.viz.PyWTrajectory_FRAMES, 0.2)\n', (19137, 19177), True, 'import cv2 as cv\n'), ((20689, 20718), 'cv2.viz_WCoordinateSystem', 'cv.viz_WCoordinateSystem', (['(1.5)'], {}), '(1.5)\n', (20713, 20718), True, 'import cv2 as cv\n'), ((20751, 20779), 'cv2.viz_WCameraPosition', 'cv.viz_WCameraPosition', (['(0.75)'], {}), '(0.75)\n', (20773, 20779), True, 'import cv2 as cv\n'), ((20936, 20964), 'cv2.viz_WCameraPosition', 'cv.viz_WCameraPosition', (['(0.75)'], {}), '(0.75)\n', (20958, 20964), True, 'import cv2 as cv\n'), ((264, 293), 'numpy.cos', 'np.cos', (['(i * 3 * np.pi / 180.0)'], {}), '(i * 3 * np.pi / 180.0)\n', (270, 293), True, 'import numpy as np\n'), ((405, 442), 'numpy.sin', 'np.sin', (['(0.6 + j * 1.5 * np.pi / 180.0)'], {}), '(0.6 + j * 1.5 * np.pi / 180.0)\n', (411, 442), True, 'import numpy as np\n'), ((457, 486), 'numpy.sin', 'np.sin', (['(i * 3 * np.pi / 180.0)'], {}), '(i * 3 * np.pi / 180.0)\n', (463, 486), True, 'import numpy as np\n'), ((1312, 1326), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (1324, 1326), True, 'import cv2 as cv\n'), ((1947, 1993), 'os.environ.get', 'os.environ.get', (['"""OPENCV_PYTEST_RUN_VIZ"""', '(False)'], {}), "('OPENCV_PYTEST_RUN_VIZ', False)\n", (1961, 1993), False, 'import os\n'), ((3128, 3151), 'cv2.viz_Color.bluberry', 'cv.viz_Color.bluberry', ([], {}), '()\n', (3149, 3151), True, 'import cv2 as cv\n'), ((5686, 5747), 'cv2.viz.makeCameraPose', 'cv.viz.makeCameraPose', (['pose', '(0.0, 0.5, 0.0)', '(0.0, 0.1, 0.0)'], {}), '(pose, (0.0, 0.5, 0.0), (0.0, 0.1, 0.0))\n', (5707, 5747), True, 'import cv2 as cv\n'), ((10271, 10294), 'cv2.viz_Color', 'cv.viz_Color', (['(0)', '(255)', '(0)'], {}), '(0, 255, 0)\n', (10283, 10294), True, 'import cv2 as cv\n'), ((11142, 11156), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (11154, 11156), True, 'import cv2 as cv\n'), ((11237, 11260), 'cv2.viz_Color', 'cv.viz_Color', (['(0)', '(255)', '(0)'], {}), '(0, 255, 0)\n', (11249, 11260), True, 'import cv2 as cv\n'), ((12497, 12542), 'numpy.zeros', 'np.zeros', ([], {'shape': 'angle.shape', 'dtype': 'np.float32'}), '(shape=angle.shape, dtype=np.float32)\n', (12505, 12542), True, 'import numpy as np\n'), ((12544, 12571), 'numpy.cos', 'np.cos', (['(angle * np.pi / 128)'], {}), '(angle * np.pi / 128)\n', (12550, 12571), True, 'import numpy as np\n'), ((12572, 12599), 'numpy.sin', 'np.sin', (['(angle * np.pi / 128)'], {}), '(angle * np.pi / 128)\n', (12578, 12599), True, 'import numpy as np\n'), ((12681, 12708), 'numpy.cos', 'np.cos', (['(angle * np.pi / 128)'], {}), '(angle * np.pi / 128)\n', (12687, 12708), True, 'import numpy as np\n'), ((12708, 12735), 'numpy.sin', 'np.sin', (['(angle * np.pi / 128)'], {}), '(angle * np.pi / 128)\n', (12714, 12735), True, 'import numpy as np\n'), ((12766, 12811), 'numpy.zeros', 'np.zeros', ([], {'shape': 'angle.shape', 'dtype': 'np.float32'}), '(shape=angle.shape, dtype=np.float32)\n', (12774, 12811), True, 'import numpy as np\n'), ((12856, 12900), 'numpy.ones', 'np.ones', ([], {'shape': 'angle.shape', 'dtype': 'np.float32'}), '(shape=angle.shape, dtype=np.float32)\n', (12863, 12900), True, 'import numpy as np\n'), ((15573, 15590), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (15588, 15590), True, 'import cv2 as cv\n'), ((16238, 16255), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (16253, 16255), True, 'import cv2 as cv\n'), ((19838, 19899), 'cv2.viz.makeCameraPose', 'cv.viz.makeCameraPose', (['pose', '(0.0, 0.5, 0.0)', '(0.0, 0.1, 0.0)'], {}), '(pose, (0.0, 0.5, 0.0), (0.0, 0.1, 0.0))\n', (19859, 19899), True, 'import cv2 as cv\n'), ((20448, 20510), 'cv2.viz.makeCameraPose', 'cv.viz.makeCameraPose', (['pose', '(0.0, 0.0, 0.0)', '(0.0, -0.1, 0.0)'], {}), '(pose, (0.0, 0.0, 0.0), (0.0, -0.1, 0.0))\n', (20469, 20510), True, 'import cv2 as cv\n'), ((307, 344), 'numpy.cos', 'np.cos', (['(1.2 + i * 1.2 * np.pi / 180.0)'], {}), '(1.2 + i * 1.2 * np.pi / 180.0)\n', (313, 344), True, 'import numpy as np\n'), ((373, 398), 'numpy.sin', 'np.sin', (['(j * np.pi / 180.0)'], {}), '(j * np.pi / 180.0)\n', (379, 398), True, 'import numpy as np\n'), ((500, 531), 'numpy.cos', 'np.cos', (['(1.2 + i * np.pi / 180.0)'], {}), '(1.2 + i * np.pi / 180.0)\n', (506, 531), True, 'import numpy as np\n'), ((1388, 1405), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (1403, 1405), True, 'import cv2 as cv\n'), ((3670, 3684), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (3682, 3684), True, 'import cv2 as cv\n'), ((3818, 3832), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (3830, 3832), True, 'import cv2 as cv\n'), ((4172, 4189), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (4187, 4189), True, 'import cv2 as cv\n'), ((5600, 5623), 'numpy.sin', 'np.sin', (['(a * np.pi / 180)'], {}), '(a * np.pi / 180)\n', (5606, 5623), True, 'import numpy as np\n'), ((5632, 5655), 'numpy.cos', 'np.cos', (['(a * np.pi / 180)'], {}), '(a * np.pi / 180)\n', (5638, 5655), True, 'import numpy as np\n'), ((7729, 7743), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (7741, 7743), True, 'import cv2 as cv\n'), ((9688, 9702), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (9700, 9702), True, 'import cv2 as cv\n'), ((9805, 9819), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (9817, 9819), True, 'import cv2 as cv\n'), ((9828, 9845), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (9843, 9845), True, 'import cv2 as cv\n'), ((9897, 9911), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (9909, 9911), True, 'import cv2 as cv\n'), ((9919, 9936), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (9934, 9936), True, 'import cv2 as cv\n'), ((10068, 10082), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (10080, 10082), True, 'import cv2 as cv\n'), ((11550, 11567), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (11565, 11567), True, 'import cv2 as cv\n'), ((12077, 12094), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (12092, 12094), True, 'import cv2 as cv\n'), ((12636, 12680), 'numpy.ones', 'np.ones', ([], {'shape': 'angle.shape', 'dtype': 'np.float32'}), '(shape=angle.shape, dtype=np.float32)\n', (12643, 12680), True, 'import numpy as np\n'), ((14261, 14275), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (14273, 14275), True, 'import cv2 as cv\n'), ((14303, 14317), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (14315, 14317), True, 'import cv2 as cv\n'), ((14347, 14361), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (14359, 14361), True, 'import cv2 as cv\n'), ((14390, 14404), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (14402, 14404), True, 'import cv2 as cv\n'), ((14433, 14447), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (14445, 14447), True, 'import cv2 as cv\n'), ((14481, 14495), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (14493, 14495), True, 'import cv2 as cv\n'), ((14528, 14542), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (14540, 14542), True, 'import cv2 as cv\n'), ((16319, 16333), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (16331, 16333), True, 'import cv2 as cv\n'), ((17398, 17412), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (17410, 17412), True, 'import cv2 as cv\n'), ((19747, 19770), 'numpy.sin', 'np.sin', (['(a * np.pi / 180)'], {}), '(a * np.pi / 180)\n', (19753, 19770), True, 'import numpy as np\n'), ((19780, 19803), 'numpy.cos', 'np.cos', (['(a * np.pi / 180)'], {}), '(a * np.pi / 180)\n', (19786, 19803), True, 'import numpy as np\n'), ((20330, 20371), 'numpy.sin', 'np.sin', (['(3.14 + 2.7 + i * 60 * np.pi / 180)'], {}), '(3.14 + 2.7 + i * 60 * np.pi / 180)\n', (20336, 20371), True, 'import numpy as np\n'), ((20384, 20425), 'numpy.cos', 'np.cos', (['(3.14 + 2.7 + i * 60 * np.pi / 180)'], {}), '(3.14 + 2.7 + i * 60 * np.pi / 180)\n', (20390, 20425), True, 'import numpy as np\n'), ((2927, 2941), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (2939, 2941), True, 'import cv2 as cv\n'), ((3042, 3056), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (3054, 3056), True, 'import cv2 as cv\n'), ((3245, 3259), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (3257, 3259), True, 'import cv2 as cv\n'), ((3369, 3383), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (3381, 3383), True, 'import cv2 as cv\n'), ((3454, 3468), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (3466, 3468), True, 'import cv2 as cv\n'), ((3569, 3583), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (3581, 3583), True, 'import cv2 as cv\n'), ((4058, 4072), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (4070, 4072), True, 'import cv2 as cv\n'), ((4148, 4162), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (4160, 4162), True, 'import cv2 as cv\n'), ((5460, 5474), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (5472, 5474), True, 'import cv2 as cv\n'), ((5776, 5804), 'numpy.sin', 'np.sin', (['(i * 10 * np.pi / 180)'], {}), '(i * 10 * np.pi / 180)\n', (5782, 5804), True, 'import numpy as np\n'), ((5985, 5999), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (5997, 5999), True, 'import cv2 as cv\n'), ((6468, 6482), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (6480, 6482), True, 'import cv2 as cv\n'), ((6719, 6733), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (6731, 6733), True, 'import cv2 as cv\n'), ((6962, 6976), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (6974, 6976), True, 'import cv2 as cv\n'), ((7068, 7082), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (7080, 7082), True, 'import cv2 as cv\n'), ((7164, 7193), 'numpy.sin', 'np.sin', (['(i * 7.5 * np.pi / 180)'], {}), '(i * 7.5 * np.pi / 180)\n', (7170, 7193), True, 'import numpy as np\n'), ((7391, 7405), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (7403, 7405), True, 'import cv2 as cv\n'), ((7875, 7889), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (7887, 7889), True, 'import cv2 as cv\n'), ((7988, 8002), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (8000, 8002), True, 'import cv2 as cv\n'), ((8670, 8684), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (8682, 8684), True, 'import cv2 as cv\n'), ((9329, 9343), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (9341, 9343), True, 'import cv2 as cv\n'), ((9596, 9613), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (9611, 9613), True, 'import cv2 as cv\n'), ((11008, 11022), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (11020, 11022), True, 'import cv2 as cv\n'), ((11031, 11045), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (11043, 11045), True, 'import cv2 as cv\n'), ((11663, 11677), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (11675, 11677), True, 'import cv2 as cv\n'), ((12276, 12290), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (12288, 12290), True, 'import cv2 as cv\n'), ((14144, 14158), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (14156, 14158), True, 'import cv2 as cv\n'), ((15337, 15351), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (15349, 15351), True, 'import cv2 as cv\n'), ((15809, 15823), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (15821, 15823), True, 'import cv2 as cv\n'), ((15993, 16007), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (16005, 16007), True, 'import cv2 as cv\n'), ((16624, 16638), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (16636, 16638), True, 'import cv2 as cv\n'), ((17562, 17576), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (17574, 17576), True, 'import cv2 as cv\n'), ((18068, 18082), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (18080, 18082), True, 'import cv2 as cv\n'), ((18204, 18218), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (18216, 18218), True, 'import cv2 as cv\n'), ((19063, 19077), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (19075, 19077), True, 'import cv2 as cv\n'), ((19267, 19281), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (19279, 19281), True, 'import cv2 as cv\n'), ((19364, 19378), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (19376, 19378), True, 'import cv2 as cv\n'), ((19474, 19488), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (19486, 19488), True, 'import cv2 as cv\n'), ((19611, 19625), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (19623, 19625), True, 'import cv2 as cv\n'), ((20628, 20642), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (20640, 20642), True, 'import cv2 as cv\n'), ((20870, 20884), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (20882, 20884), True, 'import cv2 as cv\n'), ((21047, 21061), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (21059, 21061), True, 'import cv2 as cv\n'), ((21175, 21189), 'cv2.viz_Color', 'cv.viz_Color', ([], {}), '()\n', (21187, 21189), True, 'import cv2 as cv\n'), ((9712, 9729), 'cv2.viz_Affine3d', 'cv.viz_Affine3d', ([], {}), '()\n', (9727, 9729), True, 'import cv2 as cv\n')] |
#!/usr/bin/env python3
"""
Project 8: Maze Solver with Reinforcement Learning
Author: <NAME> <***<EMAIL>>
Learn policies to walk through a maze by reinforcement learning. This program
implements value iteration with synchronous updates.
Data Assumptions:
1. Maze data are rectangular (i.e. all rows have the same number of characters)
and valid (only contains 'S', 'Q', '.', '*'; etc.).
Notes:
1. When the agent reaches the goal/terminal state, it'll stay there.
2. Whenever the agent performs an action (except when it stays in the
goal/terminal state), it receives an immediate reward of -1.
3. When the agent takes an action that'll hit the border or an obstacle, it'll
return to the original state, and it'll still need to pay -1 immediate reward.
"""
import sys
import numpy as np
import warnings
from itertools import product
# for debugging: print the full numpy array
np.set_printoptions(threshold=np.inf, linewidth=100)
class ValueIteration:
def __init__(self, maze_input, value_file, q_value_file, policy_file,
num_epoch, discount_factor):
values, next_states, goal_state_ind, self.maze_dimension = self.load(maze_input)
learned_values, learned_q_values, learned_policies = \
self.learn(values, next_states, goal_state_ind, discount_factor,
num_epoch)
# Output results
self.output_values(learned_values, value_file)
self.output_q_values(learned_q_values, q_value_file)
self.output_policies(learned_policies, policy_file)
def load(self, maze_input):
"""
Read in and parse maze information.
Returns the value function V(s), value function indices of next states
that correspond to transition probabilities P(s'|s,a), goal state index
in V(s), and maze dimension as (length, width) tuple.
"""
# Load maze map into memory
maze_data = []
with open(maze_input, mode='r') as f:
for line in f:
line = line.strip()
# Shatter row string into list of chars
maze_data.append(list(line))
maze_data = np.asarray(maze_data)
# print('maze:\n', maze_data)
# Initialize parameters according to maze map
# Value function V(s)
values = np.zeros(maze_data.size + 1)
values[0] = np.nan # Here we adds a NaN value for any invalid states
# Index of the next state in values if the agent takes action a when in
# state a. Invalid next states (i.e. out of bond or obstacles) are
# pointed to values[0] which is NaN. Matrix indices correspond to
# positions in transition probabilities, which is a num_of_states x num_of_actions matrix.
# Notes:
# 1. Because transition probabilities are either 1 or 0 (actions are
# deterministic), here we skip transition probabilities modeling
# 2. To line up with values (for convenience in the learn function),
# here we add a nan row at front
next_states = np.zeros((maze_data.size + 1, 4), dtype=np.int)
state_ind = 0 # current state's index in the value function
goal_state_ind = -1 # index of goal state
for x, row in enumerate(maze_data):
for y, state in enumerate(row):
state_ind += 1
if maze_data[x, y] == '*':
# skip obstacles
continue
if maze_data[x, y] == 'G':
# agent stays in goal/terminal state
next_states[state_ind] = state_ind
goal_state_ind = state_ind
continue
if y > 0 and maze_data[x, y-1] != '*':
# can move west
next_states[state_ind, 0] = state_ind - 1
else:
# agent stays
next_states[state_ind, 0] = state_ind
if x > 0 and maze_data[x-1, y] != '*':
# can move north
next_states[state_ind, 1] = state_ind - maze_data.shape[1]
else:
# agent stays
next_states[state_ind, 1] = state_ind
if y < maze_data.shape[1] - 1 and maze_data[x, y+1] != '*':
# can move east
next_states[state_ind, 2] = state_ind + 1
else:
# agent stays
next_states[state_ind, 2] = state_ind
if x < maze_data.shape[0] - 1 and maze_data[x+1, y] != '*':
# can move south
next_states[state_ind, 3] = state_ind + maze_data.shape[1]
else:
# agent stays
next_states[state_ind, 3] = state_ind
return values, next_states, goal_state_ind, maze_data.shape
def learn(self, values, next_states, goal_state_ind, lam, num_epoch):
"""
Learn by value iteration. Uses synchronous update.
lam: the discount factor \\lambda
Returns the learned values, q-values and optimal policies.
"""
# Update values V(s)
with warnings.catch_warnings():
# Suppress the "RuntimeWarning: All-NaN slice encountered" message
# in np.nanmax and np.nanargmax
# Ref: https://docs.python.org/3/library/warnings.html#temporarily-suppressing-warnings
warnings.simplefilter('ignore')
if num_epoch > 0:
for e in range(num_epoch):
# Immediate reward will always be -1 for each action...
values = -1 + lam * np.nanmax(values[next_states], axis=1)
# ...except for goal/terminal state: it incurs no cost staying there
# Note: according to assignment's reference output, other than
# the goal state, all other "stay" actions will still receive
# a reward of -1.
values[goal_state_ind] += 1
# print('values in epoch %d:\n' % (e+1), values[1:].reshape(self.maze_dimension))
# print('values:', values)
else:
# Keep running until convergence
iteration_count = 0
while True:
iteration_count += 1
new_values = -1 + lam * np.nanmax(values[next_states], axis=1)
new_values[goal_state_ind] += 1
max_diff = np.nanmax(np.absolute(new_values - values))
# print('iteration %d: max_diff %f' % (iteration_count, max_diff))
values = new_values
# print('values in epoch %d:\n' % (iteration_count), values[1:].reshape(self.maze_dimension))
if max_diff < 0.001:
# values converged
break
print('total iterations:', iteration_count)
# Compute Q values Q(s, a): expected discounted reward for taking
# action a in state s
q_values = -1 + lam * values[next_states]
q_values[goal_state_ind] += 1
# print('q_values:\n', q_values)
# Compute optimal policies \\pi(s)
# Note: np.nanargmax raises ValueError if there're rows of all NaNs;
# hence the mask method below
# Create mask that highlights rows of all NaNs
mask = np.all(np.isnan(q_values), axis=1)
policies = np.empty(values.shape, dtype=np.float)
policies[mask] = np.nan # rows that are all NaNs don't have a policy
policies[~mask] = np.nanargmax(q_values[~mask], axis=1)
# print('policies:', policies)
# Remove the padded first (row of) NaN values
return values[1:], q_values[1:], policies[1:]
def output_values(self, learned_values, value_file):
"""
Output learned values V(s) to file.
"""
with open(value_file, mode='w') as f:
for i, (x, y) in enumerate(product(range(self.maze_dimension[0]),
range(self.maze_dimension[1]))):
if not np.isnan(learned_values[i]):
f.write('%d %d %f\n' % (x, y, learned_values[i]))
def output_q_values(self, learned_q_values, q_value_file):
"""
Output learned q values Q(s, a) to file.
"""
with open(q_value_file, mode='w') as f:
# Create mask that highlights rows of all NaNs
mask = np.all(np.isnan(learned_q_values), axis=1)
for i, (x, y) in enumerate(product(range(self.maze_dimension[0]),
range(self.maze_dimension[1]))):
if not mask[i]: # not an obstacle
for d in range(learned_q_values.shape[1]):
f.write('%d %d %d %f\n' % (x, y, d, learned_q_values[i, d]))
def output_policies(self, learned_policies, policy_file):
"""
Output learned policies \\pi(s) to file.
"""
with open(policy_file, mode='w') as f:
for i, (x, y) in enumerate(product(range(self.maze_dimension[0]),
range(self.maze_dimension[1]))):
if not np.isnan(learned_policies[i]):
f.write('%d %d %d\n' % (x, y, learned_policies[i]))
if __name__ == '__main__':
maze_input = sys.argv[1]
value_file = sys.argv[2]
q_value_file = sys.argv[3]
policy_file = sys.argv[4]
num_epoch = int(sys.argv[5])
discount_factor = float(sys.argv[6])
model = ValueIteration(maze_input, value_file, q_value_file, policy_file,
num_epoch, discount_factor)
# model = ValueIteration('tiny_maze.txt', 'value_output.txt',
# 'q_value_output.txt', 'policy_output.txt', 5, 0.9)
# model = ValueIteration('medium_maze.txt', 'value_output.txt',
# 'q_value_output.txt', 'policy_output.txt', 5, 0.9)
# model = ValueIteration('maze1.txt', 'value_output.txt',
# 'q_value_output.txt', 'policy_output.txt', -1, 0.9)
# model = ValueIteration('maze2.txt', 'value_output.txt',
# 'q_value_output.txt', 'policy_output.txt', -1, 0.9)
| [
"numpy.nanargmax",
"numpy.absolute",
"warnings.catch_warnings",
"numpy.asarray",
"numpy.zeros",
"numpy.empty",
"numpy.isnan",
"numpy.nanmax",
"warnings.simplefilter",
"numpy.set_printoptions"
] | [((890, 942), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf', 'linewidth': '(100)'}), '(threshold=np.inf, linewidth=100)\n', (909, 942), True, 'import numpy as np\n'), ((2165, 2186), 'numpy.asarray', 'np.asarray', (['maze_data'], {}), '(maze_data)\n', (2175, 2186), True, 'import numpy as np\n'), ((2328, 2356), 'numpy.zeros', 'np.zeros', (['(maze_data.size + 1)'], {}), '(maze_data.size + 1)\n', (2336, 2356), True, 'import numpy as np\n'), ((3071, 3118), 'numpy.zeros', 'np.zeros', (['(maze_data.size + 1, 4)'], {'dtype': 'np.int'}), '((maze_data.size + 1, 4), dtype=np.int)\n', (3079, 3118), True, 'import numpy as np\n'), ((5224, 5249), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (5247, 5249), False, 'import warnings\n'), ((5486, 5517), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (5507, 5517), False, 'import warnings\n'), ((7592, 7630), 'numpy.empty', 'np.empty', (['values.shape'], {'dtype': 'np.float'}), '(values.shape, dtype=np.float)\n', (7600, 7630), True, 'import numpy as np\n'), ((7743, 7780), 'numpy.nanargmax', 'np.nanargmax', (['q_values[~mask]'], {'axis': '(1)'}), '(q_values[~mask], axis=1)\n', (7755, 7780), True, 'import numpy as np\n'), ((7541, 7559), 'numpy.isnan', 'np.isnan', (['q_values'], {}), '(q_values)\n', (7549, 7559), True, 'import numpy as np\n'), ((8655, 8681), 'numpy.isnan', 'np.isnan', (['learned_q_values'], {}), '(learned_q_values)\n', (8663, 8681), True, 'import numpy as np\n'), ((8286, 8313), 'numpy.isnan', 'np.isnan', (['learned_values[i]'], {}), '(learned_values[i])\n', (8294, 8313), True, 'import numpy as np\n'), ((9412, 9441), 'numpy.isnan', 'np.isnan', (['learned_policies[i]'], {}), '(learned_policies[i])\n', (9420, 9441), True, 'import numpy as np\n'), ((6582, 6614), 'numpy.absolute', 'np.absolute', (['(new_values - values)'], {}), '(new_values - values)\n', (6593, 6614), True, 'import numpy as np\n'), ((5708, 5746), 'numpy.nanmax', 'np.nanmax', (['values[next_states]'], {'axis': '(1)'}), '(values[next_states], axis=1)\n', (5717, 5746), True, 'import numpy as np\n'), ((6449, 6487), 'numpy.nanmax', 'np.nanmax', (['values[next_states]'], {'axis': '(1)'}), '(values[next_states], axis=1)\n', (6458, 6487), True, 'import numpy as np\n')] |
import sympy as sp #math library to represent functions, we will write our own problem solving expressions
from sympy import cos, cosh, sin, sinh
import numpy as np
import matplotlib.pyplot as mplot
x = sp.Symbol('x')
s = sp.Symbol('s')
r1 = 1.87527632324985
r2 = 4.69409122046058
r3 = 7.855
#Superimpose plots
q11 = 4.29105998838015
q21 = 1.76556786
q22 = -0.83363552
q31 = 1.7774846
q32 = -0.84769887
q33 = -0.01233278
def phi(x, r):
return (sin(r*x) + sinh(r*x) + ((cos(r)+cosh(r))/(sin(r)+sinh(r)))*(cos(r*x)-cosh(r*x)))
def d2phi(x, r):
return (r**2)*(-sin(r*x) + sinh(r*x) + ((cos(r)+cosh(r))/(sin(r)+sinh(r)))*(-cos(r*x)-cosh(r*x)))
def psi_1(x):
return q11*phi(x, r1)
def d2psi_1(x):
return q11*d2phi(x, r1)
def psi_2(x):
return q21*phi(x, r1) + q22*phi(x,r2)
def d2psi_2(x):
return q21*d2phi(x, r1) + q22*d2phi(x, r2)
def psi_3(x):
return q31*phi(x, r1) + q32*phi(x,r2) + q33*phi(x,r3)
def d2psi_3(x):
return q31*d2phi(x, r1) + q32*d2phi(x, r2) + q33*d2phi(x, r3)
#governing ODE: (1)*d2phi(x) - int_{x,1}(100*cos(psi(x)-psi(s)))ds = 0
#evaluate LHS of the ODE, set RHS to zero
h_1 = 100*cos(psi_1(x) - psi_1(s))
h_2 = 100*cos(psi_2(x) - psi_2(s))
h_3 = 100*cos(psi_3(x) - psi_3(s))
def trapezoid_1(var, h, n=100, a=x, b=1):
const = (b-a)/(2*n)
dx = (b-a)/(n)
total = h.subs(var, a) + h.subs(var, b)
for i in range(1, n):
total += 2*h.subs(var, a + dx*i)
return const*total + d2psi_1(x)
def trapezoid_2(var, h, n=100, a=x, b=1):
const = (b-a)/(2*n)
dx = (b-a)/(n)
total = h.subs(var, a) + h.subs(var, b)
for i in range(1, n):
total += 2*h.subs(var, a + dx*i)
return const*total + d2psi_2(x)
def trapezoid_3(var, h, n=100, a=x, b=1):
const = (b-a)/(2*n)
dx = (b-a)/(n)
total = h.subs(var, a) + h.subs(var, b)
for i in range(1, n):
total += 2*h.subs(var, a + dx*i)
return const*total + d2psi_3(x)
ode1 = trapezoid_1(s, h_1)
ode2 = trapezoid_2(s, h_1)
ode3 = trapezoid_3(s, h_1)
xs = np.arange(0,1.01,0.01)
o1 = []
o2 = []
o3 = []
for i in xs:
o1.append(abs(ode1.subs(x, i)))
o2.append(abs(ode2.subs(x, i)))
o3.append(abs(ode3.subs(x, i)))
o1 = np.array(o1)
o2 = np.array(o2)
o3 = np.array(o3)
#plt(ode1, ode2, ode3, (x,0,1), legend=True)
mplot.plot(xs, o1, color='k', label='1D Approx')
mplot.plot(xs, o2, color='b', label='2D Approx')
mplot.plot(xs, o3, color='r', label='3D Approx')
mplot.xlabel("Position along beam, x")
mplot.ylabel("Value of LHS of the ODE")
mplot.grid(color='k', linestyle='--', linewidth=0.5)
mplot.title("LHS of the ODE vs x obtained by using different number of qi terms")
mplot.legend()
mplot.show()
#Guess ROC
ith_term = [1,2,3]
q_ith = [q11, abs(q22), abs(q33)]
mplot.plot(ith_term, q_ith, '--*')
mplot.xlabel("Number of q terms")
mplot.ylabel("Absolute value of q_ith term")
mplot.grid(color='k', linestyle='--', linewidth=0.5)
mplot.title("Approximation of Convergence of psi(x) using i q terms")
mplot.show() | [
"sympy.sin",
"sympy.Symbol",
"matplotlib.pyplot.grid",
"sympy.cos",
"matplotlib.pyplot.ylabel",
"sympy.cosh",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.title",
"sympy.sinh",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((209, 223), 'sympy.Symbol', 'sp.Symbol', (['"""x"""'], {}), "('x')\n", (218, 223), True, 'import sympy as sp\n'), ((228, 242), 'sympy.Symbol', 'sp.Symbol', (['"""s"""'], {}), "('s')\n", (237, 242), True, 'import sympy as sp\n'), ((2044, 2068), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.01)'], {}), '(0, 1.01, 0.01)\n', (2053, 2068), True, 'import numpy as np\n'), ((2219, 2231), 'numpy.array', 'np.array', (['o1'], {}), '(o1)\n', (2227, 2231), True, 'import numpy as np\n'), ((2237, 2249), 'numpy.array', 'np.array', (['o2'], {}), '(o2)\n', (2245, 2249), True, 'import numpy as np\n'), ((2255, 2267), 'numpy.array', 'np.array', (['o3'], {}), '(o3)\n', (2263, 2267), True, 'import numpy as np\n'), ((2314, 2362), 'matplotlib.pyplot.plot', 'mplot.plot', (['xs', 'o1'], {'color': '"""k"""', 'label': '"""1D Approx"""'}), "(xs, o1, color='k', label='1D Approx')\n", (2324, 2362), True, 'import matplotlib.pyplot as mplot\n'), ((2363, 2411), 'matplotlib.pyplot.plot', 'mplot.plot', (['xs', 'o2'], {'color': '"""b"""', 'label': '"""2D Approx"""'}), "(xs, o2, color='b', label='2D Approx')\n", (2373, 2411), True, 'import matplotlib.pyplot as mplot\n'), ((2412, 2460), 'matplotlib.pyplot.plot', 'mplot.plot', (['xs', 'o3'], {'color': '"""r"""', 'label': '"""3D Approx"""'}), "(xs, o3, color='r', label='3D Approx')\n", (2422, 2460), True, 'import matplotlib.pyplot as mplot\n'), ((2461, 2499), 'matplotlib.pyplot.xlabel', 'mplot.xlabel', (['"""Position along beam, x"""'], {}), "('Position along beam, x')\n", (2473, 2499), True, 'import matplotlib.pyplot as mplot\n'), ((2500, 2539), 'matplotlib.pyplot.ylabel', 'mplot.ylabel', (['"""Value of LHS of the ODE"""'], {}), "('Value of LHS of the ODE')\n", (2512, 2539), True, 'import matplotlib.pyplot as mplot\n'), ((2540, 2592), 'matplotlib.pyplot.grid', 'mplot.grid', ([], {'color': '"""k"""', 'linestyle': '"""--"""', 'linewidth': '(0.5)'}), "(color='k', linestyle='--', linewidth=0.5)\n", (2550, 2592), True, 'import matplotlib.pyplot as mplot\n'), ((2593, 2679), 'matplotlib.pyplot.title', 'mplot.title', (['"""LHS of the ODE vs x obtained by using different number of qi terms"""'], {}), "(\n 'LHS of the ODE vs x obtained by using different number of qi terms')\n", (2604, 2679), True, 'import matplotlib.pyplot as mplot\n'), ((2675, 2689), 'matplotlib.pyplot.legend', 'mplot.legend', ([], {}), '()\n', (2687, 2689), True, 'import matplotlib.pyplot as mplot\n'), ((2690, 2702), 'matplotlib.pyplot.show', 'mplot.show', ([], {}), '()\n', (2700, 2702), True, 'import matplotlib.pyplot as mplot\n'), ((2769, 2803), 'matplotlib.pyplot.plot', 'mplot.plot', (['ith_term', 'q_ith', '"""--*"""'], {}), "(ith_term, q_ith, '--*')\n", (2779, 2803), True, 'import matplotlib.pyplot as mplot\n'), ((2804, 2837), 'matplotlib.pyplot.xlabel', 'mplot.xlabel', (['"""Number of q terms"""'], {}), "('Number of q terms')\n", (2816, 2837), True, 'import matplotlib.pyplot as mplot\n'), ((2838, 2882), 'matplotlib.pyplot.ylabel', 'mplot.ylabel', (['"""Absolute value of q_ith term"""'], {}), "('Absolute value of q_ith term')\n", (2850, 2882), True, 'import matplotlib.pyplot as mplot\n'), ((2883, 2935), 'matplotlib.pyplot.grid', 'mplot.grid', ([], {'color': '"""k"""', 'linestyle': '"""--"""', 'linewidth': '(0.5)'}), "(color='k', linestyle='--', linewidth=0.5)\n", (2893, 2935), True, 'import matplotlib.pyplot as mplot\n'), ((2936, 3005), 'matplotlib.pyplot.title', 'mplot.title', (['"""Approximation of Convergence of psi(x) using i q terms"""'], {}), "('Approximation of Convergence of psi(x) using i q terms')\n", (2947, 3005), True, 'import matplotlib.pyplot as mplot\n'), ((3006, 3018), 'matplotlib.pyplot.show', 'mplot.show', ([], {}), '()\n', (3016, 3018), True, 'import matplotlib.pyplot as mplot\n'), ((459, 469), 'sympy.sin', 'sin', (['(r * x)'], {}), '(r * x)\n', (462, 469), False, 'from sympy import cos, cosh, sin, sinh\n'), ((470, 481), 'sympy.sinh', 'sinh', (['(r * x)'], {}), '(r * x)\n', (474, 481), False, 'from sympy import cos, cosh, sin, sinh\n'), ((519, 529), 'sympy.cos', 'cos', (['(r * x)'], {}), '(r * x)\n', (522, 529), False, 'from sympy import cos, cosh, sin, sinh\n'), ((528, 539), 'sympy.cosh', 'cosh', (['(r * x)'], {}), '(r * x)\n', (532, 539), False, 'from sympy import cos, cosh, sin, sinh\n'), ((589, 600), 'sympy.sinh', 'sinh', (['(r * x)'], {}), '(r * x)\n', (593, 600), False, 'from sympy import cos, cosh, sin, sinh\n'), ((484, 490), 'sympy.cos', 'cos', (['r'], {}), '(r)\n', (487, 490), False, 'from sympy import cos, cosh, sin, sinh\n'), ((491, 498), 'sympy.cosh', 'cosh', (['r'], {}), '(r)\n', (495, 498), False, 'from sympy import cos, cosh, sin, sinh\n'), ((501, 507), 'sympy.sin', 'sin', (['r'], {}), '(r)\n', (504, 507), False, 'from sympy import cos, cosh, sin, sinh\n'), ((508, 515), 'sympy.sinh', 'sinh', (['r'], {}), '(r)\n', (512, 515), False, 'from sympy import cos, cosh, sin, sinh\n'), ((578, 588), 'sympy.sin', 'sin', (['(r * x)'], {}), '(r * x)\n', (581, 588), False, 'from sympy import cos, cosh, sin, sinh\n'), ((648, 659), 'sympy.cosh', 'cosh', (['(r * x)'], {}), '(r * x)\n', (652, 659), False, 'from sympy import cos, cosh, sin, sinh\n'), ((603, 609), 'sympy.cos', 'cos', (['r'], {}), '(r)\n', (606, 609), False, 'from sympy import cos, cosh, sin, sinh\n'), ((610, 617), 'sympy.cosh', 'cosh', (['r'], {}), '(r)\n', (614, 617), False, 'from sympy import cos, cosh, sin, sinh\n'), ((620, 626), 'sympy.sin', 'sin', (['r'], {}), '(r)\n', (623, 626), False, 'from sympy import cos, cosh, sin, sinh\n'), ((627, 634), 'sympy.sinh', 'sinh', (['r'], {}), '(r)\n', (631, 634), False, 'from sympy import cos, cosh, sin, sinh\n'), ((639, 649), 'sympy.cos', 'cos', (['(r * x)'], {}), '(r * x)\n', (642, 649), False, 'from sympy import cos, cosh, sin, sinh\n')] |
import numpy as np
import torch
import numpy.linalg as linalg
import scipy.stats as stats
import pickle
from sys import exit
m = 20
n = 40
K = 5
np.random.seed(0)
U = stats.ortho_group.rvs(m)[:, 0:K]
V = stats.ortho_group.rvs(n)[:, 0:K]
mu_mn = np.sqrt(m + n + 2*np.sqrt(m*n))
D = np.random.uniform(low = 1/2*mu_mn, high = 3/2*mu_mn, size = K)
D = np.array([16.0, 14.0, 10.0, 2.0, 1.0])
M = np.matmul(U*D, V.T)
E = np.random.normal(size = M.shape)
Y = M + E
with open(f"./output/data_m_{m}_n_{n}_K_{K}.pkl", 'wb') as file_handle:
pickle.dump({"m": m, 'n': n, 'K': K,
'U': U, 'D': D, 'V': V,
'M': M, 'Y': Y, 'E': E}, file_handle)
## descretize some columns using link function
shuffled_col_index = np.arange(n)
np.random.shuffle(shuffled_col_index)
binary_col_index = shuffled_col_index[0:10]
count_col_index = shuffled_col_index[10:20]
continuous_col_index = shuffled_col_index[20:]
E = np.random.normal(scale = 0.3, size = M.shape)
linked_Y = np.copy(M) + E
p = 1.0 / (1.0 + np.exp(-3*linked_Y[:, binary_col_index]))
linked_Y[:, binary_col_index] = np.random.binomial(n = 1, p = p).astype(np.float64)
lam = np.exp(linked_Y[:, count_col_index])
linked_Y[:, count_col_index] = np.random.poisson(lam).astype(np.float64)
linked_Y[:, continuous_col_index] = linked_Y[:, continuous_col_index]
with open(f"./output/linked_data_m_{m}_n_{n}_K_{K}.pkl", 'wb') as file_handle:
pickle.dump({"m": m, 'n': n, 'K': K,
'U': U, 'D': D, 'V': V,
'M': M, 'linked_Y': linked_Y, 'E': E,
'binary_col_index': binary_col_index,
'count_col_index': count_col_index,
'continuous_col_index': continuous_col_index}, file_handle)
| [
"numpy.random.normal",
"numpy.copy",
"pickle.dump",
"numpy.sqrt",
"numpy.random.poisson",
"scipy.stats.ortho_group.rvs",
"numpy.random.binomial",
"numpy.exp",
"numpy.array",
"numpy.matmul",
"numpy.random.seed",
"numpy.random.uniform",
"numpy.arange",
"numpy.random.shuffle"
] | [((147, 164), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (161, 164), True, 'import numpy as np\n'), ((284, 348), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1 / 2 * mu_mn)', 'high': '(3 / 2 * mu_mn)', 'size': 'K'}), '(low=1 / 2 * mu_mn, high=3 / 2 * mu_mn, size=K)\n', (301, 348), True, 'import numpy as np\n'), ((351, 389), 'numpy.array', 'np.array', (['[16.0, 14.0, 10.0, 2.0, 1.0]'], {}), '([16.0, 14.0, 10.0, 2.0, 1.0])\n', (359, 389), True, 'import numpy as np\n'), ((394, 415), 'numpy.matmul', 'np.matmul', (['(U * D)', 'V.T'], {}), '(U * D, V.T)\n', (403, 415), True, 'import numpy as np\n'), ((418, 448), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'M.shape'}), '(size=M.shape)\n', (434, 448), True, 'import numpy as np\n'), ((744, 756), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (753, 756), True, 'import numpy as np\n'), ((757, 794), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffled_col_index'], {}), '(shuffled_col_index)\n', (774, 794), True, 'import numpy as np\n'), ((935, 976), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.3)', 'size': 'M.shape'}), '(scale=0.3, size=M.shape)\n', (951, 976), True, 'import numpy as np\n'), ((1158, 1194), 'numpy.exp', 'np.exp', (['linked_Y[:, count_col_index]'], {}), '(linked_Y[:, count_col_index])\n', (1164, 1194), True, 'import numpy as np\n'), ((170, 194), 'scipy.stats.ortho_group.rvs', 'stats.ortho_group.rvs', (['m'], {}), '(m)\n', (191, 194), True, 'import scipy.stats as stats\n'), ((207, 231), 'scipy.stats.ortho_group.rvs', 'stats.ortho_group.rvs', (['n'], {}), '(n)\n', (228, 231), True, 'import scipy.stats as stats\n'), ((538, 640), 'pickle.dump', 'pickle.dump', (["{'m': m, 'n': n, 'K': K, 'U': U, 'D': D, 'V': V, 'M': M, 'Y': Y, 'E': E}", 'file_handle'], {}), "({'m': m, 'n': n, 'K': K, 'U': U, 'D': D, 'V': V, 'M': M, 'Y': Y,\n 'E': E}, file_handle)\n", (549, 640), False, 'import pickle\n'), ((992, 1002), 'numpy.copy', 'np.copy', (['M'], {}), '(M)\n', (999, 1002), True, 'import numpy as np\n'), ((1423, 1667), 'pickle.dump', 'pickle.dump', (["{'m': m, 'n': n, 'K': K, 'U': U, 'D': D, 'V': V, 'M': M, 'linked_Y':\n linked_Y, 'E': E, 'binary_col_index': binary_col_index,\n 'count_col_index': count_col_index, 'continuous_col_index':\n continuous_col_index}", 'file_handle'], {}), "({'m': m, 'n': n, 'K': K, 'U': U, 'D': D, 'V': V, 'M': M,\n 'linked_Y': linked_Y, 'E': E, 'binary_col_index': binary_col_index,\n 'count_col_index': count_col_index, 'continuous_col_index':\n continuous_col_index}, file_handle)\n", (1434, 1667), False, 'import pickle\n'), ((1025, 1067), 'numpy.exp', 'np.exp', (['(-3 * linked_Y[:, binary_col_index])'], {}), '(-3 * linked_Y[:, binary_col_index])\n', (1031, 1067), True, 'import numpy as np\n'), ((1099, 1127), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': 'p'}), '(n=1, p=p)\n', (1117, 1127), True, 'import numpy as np\n'), ((1226, 1248), 'numpy.random.poisson', 'np.random.poisson', (['lam'], {}), '(lam)\n', (1243, 1248), True, 'import numpy as np\n'), ((266, 280), 'numpy.sqrt', 'np.sqrt', (['(m * n)'], {}), '(m * n)\n', (273, 280), True, 'import numpy as np\n')] |
import numpy as np
from tensorflow import keras
from tensorflow.keras import backend as K
class WarmUpLearningRateScheduler(keras.callbacks.Callback):
"""Warmup learning rate scheduler
"""
def __init__(self, warmup_batches, init_lr, verbose=0):
"""Constructor for warmup learning rate scheduler
Arguments:
warmup_batches {int} -- Number of batch for warmup.
init_lr {float} -- Learning rate after warmup.
Keyword Arguments:
verbose {int} -- 0: quiet, 1: update messages. (default: {0})
"""
super(WarmUpLearningRateScheduler, self).__init__()
self.warmup_batches = warmup_batches
self.init_lr = init_lr
self.verbose = verbose
self.batch_count = 0
self.learning_rates = []
def on_batch_end(self, batch, logs=None):
self.batch_count = self.batch_count + 1
lr = K.get_value(self.model.optimizer.lr)
self.learning_rates.append(lr)
def on_batch_begin(self, batch, logs=None):
if self.batch_count <= self.warmup_batches:
lr = self.batch_count*self.init_lr/self.warmup_batches
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nBatch %05d: WarmUpLearningRateScheduler setting learning '
'rate to %s.' % (self.batch_count + 1, lr))
if __name__ == '__main__':
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# Create a model.
model = Sequential()
model.add(Dense(32, activation='relu', input_dim=100))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Number of training samples.
sample_count = 12
# Total epochs to train.
epochs = 7
# Number of warmup epochs.
warmup_epoch = 5
# Training batch size, set small value here for demonstration purpose.
batch_size = 4
# Generate dummy data.
data = np.random.random((sample_count, 100))
labels = np.random.randint(10, size=(sample_count, 1))
# Convert labels to categorical one-hot encoding.
one_hot_labels = keras.utils.to_categorical(labels, num_classes=10)
# Compute the number of warmup batches.
warmup_batches = warmup_epoch * sample_count / batch_size
# Create the Learning rate scheduler.
warm_up_lr = WarmUpLearningRateScheduler(warmup_batches, init_lr=0.001)
# Train the model, iterating on the data in batches of 32 samples
model.fit(data, one_hot_labels, epochs=epochs, batch_size=batch_size,
verbose=0, callbacks=[warm_up_lr]) | [
"tensorflow.keras.utils.to_categorical",
"numpy.random.random",
"tensorflow.keras.backend.get_value",
"numpy.random.randint",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.backend.set_value",
"tensorflow.keras.models.Sequential"
] | [((1553, 1565), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1563, 1565), False, 'from tensorflow.keras.models import Sequential\n'), ((2087, 2124), 'numpy.random.random', 'np.random.random', (['(sample_count, 100)'], {}), '((sample_count, 100))\n', (2103, 2124), True, 'import numpy as np\n'), ((2138, 2183), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(sample_count, 1)'}), '(10, size=(sample_count, 1))\n', (2155, 2183), True, 'import numpy as np\n'), ((2260, 2310), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['labels'], {'num_classes': '(10)'}), '(labels, num_classes=10)\n', (2286, 2310), False, 'from tensorflow import keras\n'), ((913, 949), 'tensorflow.keras.backend.get_value', 'K.get_value', (['self.model.optimizer.lr'], {}), '(self.model.optimizer.lr)\n', (924, 949), True, 'from tensorflow.keras import backend as K\n'), ((1580, 1623), 'tensorflow.keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""', 'input_dim': '(100)'}), "(32, activation='relu', input_dim=100)\n", (1585, 1623), False, 'from tensorflow.keras.layers import Dense\n'), ((1639, 1670), 'tensorflow.keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (1644, 1670), False, 'from tensorflow.keras.layers import Dense\n'), ((1169, 1209), 'tensorflow.keras.backend.set_value', 'K.set_value', (['self.model.optimizer.lr', 'lr'], {}), '(self.model.optimizer.lr, lr)\n', (1180, 1209), True, 'from tensorflow.keras import backend as K\n')] |
from astropy.cosmology import WMAP7 as cosmo
from astropy.constants import c as C
from astropy import units as u
from astropy.table import Table
from astropy.io import ascii
import numpy as np
from linetools import utils as ltu
from linetools.isgm.abscomponent import AbsComponent
from linetools.spectra.io import readspec
from linetools.spectra.xspectrum1d import XSpectrum1D
import linetools.isgm.io as ltiio
from linetools.isgm import utils as ltiu
from pyntejos.io import table_from_marzfile
import json
import glob
import matplotlib.pyplot as plt
"""Module for utils"""
def get_closest_ind(array, value):
"""Gets the index of array such that array[ind] is the
closest element to given value"""
ind = np.argmin(np.fabs(value - array))
return ind
def get_closest_inds(array, values):
"""Gets the indices of array such that array[inds] give the closest
elements to given values, respectively. Note: there could come
with duplication dependind on the `values` array."""
inds = [get_closest_ind(array, value) for value in values]
return inds
def compare_z(z1, z2, dv):
"""Return true if the difference between z1 and z2 is within dv at
the mean redshift. Otherwise return False. dv has to be much
smaller and speed of light as the function does not account for
relativity."""
z1 = np.array(z1)
z2 = np.array(z2)
dv = np.array(dv)
dz = np.fabs(z1 - z2)
z = np.mean([z1, z2])
if dz / (1. + z) < dv / C.to('km/s').value:
return True
else:
return False
def group_z(z, dv=1000):
"""Group redshifts within dv (km/s) at the redshift of the
group. Returns an array of id_membership, of the same dimension
than z"""
z_original = np.array(z)
z = np.array(z)
z.sort()
ids = np.zeros(len(z)) # This is the output, here we store the id_membership of each z
ids_aux = np.zeros(len(z)) # This is the same, but matching a sorted z array
q = 0 # counter for groups
ids[0] = q
for i in np.arange(1, len(z)):
if compare_z(z[i], z[i - 1], dv):
ids_aux[i] = q
else:
q += 1
ids_aux[i] = q
# remap ids_aux to ids
for i in np.arange(len(z_original)):
cond = z_original[i] == z
if np.sum(cond) > 1: # in case there are 2 or more with same z
ids[i] = ids_aux[cond][0]
else:
ids[i] = ids_aux[cond]
return ids.astype(int)
def poisson_err(n):
"""Gets poissonian error analytical approximation from equations of
Gherels 1986. Returns the upper and lower uncertainties"""
n = np.array(n)
errp = np.sqrt(n + 0.75) + 1.
errp = np.where(errp == errp, errp, errp)
errm = np.where(n > 0.25, np.sqrt(n - 0.25), 0)
return errp, errm
def Nmin(e, dz, s, a, a_err):
"""Estimates the minimum number of independent structures
to detect a difference in dN/dz w/r to a field value given
by dNdz|field = a +- a_err, at a statistical significance s,
using a redshift path of dz per structure"""
e = np.array(e).astype(float)
dz = np.array(dz).astype(float)
s = np.array(s).astype(float)
a = np.array(a).astype(float)
a_err = np.array(a_err).astype(float)
# this is a analytical expression was derived by N.T.
return (e / dz / a) * (s ** 2) / ((e - 1.) - s * a_err / a) ** 2
def find_edges(a):
"""Assume a is 1-D array of values, where 0 mean masked out. This
function will provide the indices of lower and upper edges of
chunks having values different than 0. Useful for spectra
analyses"""
a = np.array(a)
# append 0 before and after the original array to an auxiliary array
# in this way:
# lower case limits are always 0,something
# upper case limits are always something,0
a_aux = np.append(0, a)
a_aux = np.append(a_aux, 0)
lower = []
upper = []
for i in range(1, len(a_aux) - 1): # only for indices with original info
if (a_aux[i] != 0) and (a_aux[i - 1] == 0): # lower case
lower += [i]
if (a_aux[i] != 0) and (a_aux[i + 1] == 0): # upper case
upper += [i]
lower = np.array(lower)
upper = np.array(upper)
# lower and upper have indices of a_aux
# we substract one to get the indices in the original array a
lower = lower - 1
upper = upper - 1
assert len(lower) == len(upper), 'Something is wrong with find_edges function. Debug code!'
return lower.astype(int), upper.astype(int)
def is_local_minima(a):
"""For a given array a, it returns true for local minima"""
return ltu.is_local_minima(a)
def is_local_maxima(a):
"""For a given array a, returns true for local maxima"""
return ltu.is_local_maxima(a)
def associate_redshifts(z1, z2, dv):
"""Returns an array of same lenght as z1, with a 1 if z1 redshift
lie within dv from any of the redshifts given by the array z2;
otherwise it has a value of 0"""
z1 = np.array(z1)
z2 = np.array(z2)
association = np.zeros(len(z1))
for z in z2:
dv_aux = np.fabs(ltu.dv_from_z(z1, z))
association = np.where(dv_aux < dv, 1, association)
return association
def get_dv_closest_z(z1, z2, give_ids=False):
"""Returns an array of same lenght as z1, with the velocity difference
(in km/s) associates to the closest redshift in z2, at restframe
given by z2. (Using relativistic approximation for flat-space time;
see ltu.dv_from_z() function)
If give_ids is True , it also returns the indices of z2 where the
difference is minimum.
"""
z1 = np.array(z1)
z2 = np.array(z2)
dv = []
inds = []
for z in z1:
dv_aux = ltu.dv_from_z(z, z2)
# find minimum difference
cond = np.fabs(dv_aux) == np.min(np.fabs(dv_aux))
ind = np.where(cond)[0][0]
# append dv
dv += [dv_aux[ind]]
inds += [ind]
dv = np.array(dv)
if give_ids:
return dv, inds
else:
return dv
def clean_array(x, value=0):
"""Get rid of nan and inf in the array x, and replace them with the
given value."""
x = np.array(x)
cond = (np.isnan(x)) | (np.isinf(x))
x = np.where(cond, value, x)
return x
def get_original_indices(original, new):
"""For a pair of arrays containing the same information but sorted in a
different way, this function provides the indices associated to the
original array that make up the new array so
original[indices]=new.
Add check to make sore arrays contain exact same information
"""
original = np.array(original)
new = np.array(new)
# use np.argsort()
inds_orig_sorted = np.argsort(original)
inds_new_sorted = np.argsort(new)
# find indices such that original[indices] = new
indices_aux = np.argsort(inds_new_sorted)
indices = inds_orig_sorted[indices_aux]
return indices
def get_today_str():
"""Returns a string representation of current day
format YYYY-MM-DD"""
import time
t = time.gmtime()
year = str(t.tm_year)
month = str(t.tm_mon)
day = str(t.tm_mday)
if len(month) == 1:
month = '0' + month
if len(day) == 1:
day = '0' + day
s = '{}-{}-{}'.format(year, month, day)
return s
def get_current_year():
"""Returns current Gregorian year"""
import time
t = time.gmtime()
year = t.tm_year
return year
def complist_from_igmgjson(igmguesses_json):
"""Creates a list of AbsComponenbts from a igmguesses_json file
Parameters
----------
igmguesses_json : str
Name of the json file genereted by IGMGUESSES
Returns
-------
comp_list : list of AbsComponents
A list of AbsComponents
"""
return ltiio.read_igmg_to_components(igmguesses_json, linelist='ISM')
# Read the JSON file
with open(igmguesses_json) as data_file:
igmg_dict = json.load(data_file)
# Components
comp_list = []
for ii, key in enumerate(igmg_dict['cmps'].keys()):
comp_dict = igmg_dict['cmps'][key]
comp_dict['flag_N'] = 1
comp_dict['logN'] = comp_dict['Nfit']
comp_dict['sig_logN'] = -1
# import pdb; pdb.set_trace()
try:
comp = AbsComponent.from_dict(comp_dict, chk_sep=False, chk_data=False, chk_vel=False, linelist="ISM")
except:
comp = AbsComponent.from_dict(comp_dict, chk_sep=False, chk_data=False, chk_vel=False, linelist='H2')
# add extra attributes manually
comp.attrib['b'] = comp_dict['bfit']
comp.attrib['sig_b'] = -1
comp.attrib['reliability'] = comp_dict['Reliability']
comp_list += [comp]
return comp_list
def igmgjson_from_complist(complist, specfile, fwhm, outfile='IGM_model.json'):
""" Write to a JSON file of the IGMGuesses format.
complist : list of AbsComponents
Ditto
specfile : str
Name of spectrum associated to these components
fwhm : int
FWHM of the spectrum
outfile : str, optional
Name of the output json file
"""
import json, io
# Create dict of the components
out_dict = dict(cmps={},
spec_file=specfile,
fwhm=fwhm, bad_pixels=[])
for kk, comp in enumerate(complist):
key = comp.name
out_dict['cmps'][key] = comp.to_dict()
# import pdb; pdb.set_trace()
out_dict['cmps'][key]['zcomp'] = comp.zcomp
out_dict['cmps'][key]['zfit'] = comp.zcomp
out_dict['cmps'][key]['Nfit'] = comp.logN
out_dict['cmps'][key]['bfit'] = comp.attrib['b']
out_dict['cmps'][key]['wrest'] = comp._abslines[0].wrest.value
out_dict['cmps'][key]['vlim'] = list(comp.vlim.value)
out_dict['cmps'][key]['Reliability'] = str(comp.reliability)
out_dict['cmps'][key]['Comment'] = str(comp.comment)
# out_dict['cmps'][key]['mask_abslines'] = comp.mask_abslines
# JSONify
gd_dict = ltu.jsonify(out_dict)
# Write file
# with io.open(outfile, 'w', encoding='utf-8') as f:
f = open(outfile, 'w')
f.write(unicode(json.dumps(gd_dict, sort_keys=True, indent=4, separators=(',', ': '))))
print('Wrote: {:s}'.format(outfile))
def from_joebvp_to_table(joebvp_file, radec):
"""
Parameters
----------
joebvp_file : str
Name of the JOEBVP file
radec : SkyCoord
Coordinate of object
Returns
-------
A table version of the file
"""
comps = ltiio.read_joebvp_to_components(joebvp_file,radec)
tab = ltiu.table_from_complist(comps)
return tab
def igmgjson_from_joebvp(joebvp_file, radec, specfile, fwhm, outfile='IGM_model_from_joebvp.json'):
"""
Parameters
----------
joebvp_file : str
Name of the JOEBVP file
radec : SkyCoord
Coordinate of object
Returns
-------
A table version of the file
"""
comps = ltiio.read_joebvp_to_components(joebvp_file, radec)
igmgjson_from_complist(comps, specfile, fwhm, outfile=outfile)
def from_igmguesses_to_complist(infile):
"""Reads .json file generated by IGMGuesses and return a list of AbsComponent objects
Parameters
----------
infile : str
Name of the .json file from IGMGuesses
Returns:
complist : list of AbsComponent
"""
import json
# Read the JSON file
with open(infile) as data_file:
igmg_dict = json.load(data_file)
# Components
comp_list = []
for ii, key in enumerate(igmg_dict['cmps'].keys()):
# QtCore.pyqtRemoveInputHook()
# pdb.set_trace()
# QtCore.pyqtRestoreInputHook()
# import pdb; pdb.set_trace()
idict = igmg_dict['cmps'][key]
idict['logN'] = idict['attrib']['logN']
try:
idict['flag_N'] = idict['attrib']['flag_N']
except:
idict['flag_N'] = 0.
try:
idict['sig_logN'] = idict['attrib']['sig_logN']
except:
idict['sig_logN'] = 0.
comp = AbsComponent.from_dict(idict, skip_abslines=False, chk_sep=False, chk_data=False, chk_vel=False)
comp_list += [comp]
return comp_list
def from_igmguesses_to_table(infile):
"""Reads .json file generated by IGMGuesses and return Table object
Parameters
----------
infile : str
Name of the .json file from IGMGuesses
Returns:
table : Table
"""
complist = from_igmguesses_to_complist(infile)
tab = ltiu.table_from_complist(complist)
return tab
def renorm2_factor_from_wvrange(sp1, sp2, wvrange=None):
"""Returns the normalization factor to scale spec2 to spec1 based on flux
within wvrange
"""
if wvrange is not None:
cond1 = (sp1.wavelength.to('AA').value >= wvrange[0]) & (sp1.wavelength.to('AA').value <= wvrange[1])
cond2 = (sp2.wavelength.to('AA').value >= wvrange[0]) & (sp2.wavelength.to('AA').value <= wvrange[1])
median1 = np.nanmedian(sp1.flux[cond1])
median2 = np.nanmedian(sp2.flux[cond2])
else:
median1 = np.nanmedian(sp1.flux)
median2 = np.nanmedian(sp2.flux)
renorm2 = median1/median2
return renorm2
def get_s2n(sp, wvrange=None):
if wvrange is not None:
cond = (sp.wavelength.to('AA').value >= wvrange[0]) & (sp.wavelength.to('AA').value <= wvrange[1])
else:
cond = [True]*sp.npix
s2n = np.nanmedian(sp.flux/sp.sig)
return s2n
def plot_two_spec(sp1, sp2, text1='spec1', text2='spec2', renorm2=True, renorm_wvrange=None, verbose=False, ax=None):
"""Plot two XSpectrum1D spectra for comparison purposes"""
if renorm2:
if not isinstance(renorm2, bool): # assume float
renorm = renorm2
else:
renorm = renorm2_factor_from_wvrange(sp1, sp2, wvrange=renorm_wvrange)
else:
renorm = 1
max1 = np.nanmax(sp1.flux)
max2 = np.nanmax(sp2.flux*renorm)
ymax = 1.1*np.max([max1,max2])
if ax is None:
ax = plt.gca()
# main plot
ax.plot(sp1.wavelength, sp1.flux, 'k', drawstyle='steps-mid', label=text1)
if sp1.sig_is_set:
ax.plot(sp1.wavelength, sp1.sig, 'g', drawstyle='steps-mid')
ax.plot(sp2.wavelength, renorm*sp2.flux, 'b', drawstyle='steps-mid', label=text2)
if sp2.sig_is_set:
ax.plot(sp2.wavelength, renorm*sp2.sig, 'y', drawstyle='steps-mid')
ax.set_ylim(0, ymax)
# print stats
if verbose:
print("<SN1> = {}".format(np.nanmedian(sp1.flux/sp1.sig)))
print("<SN2> = {}".format(np.nanmedian(sp2.flux/sp2.sig)))
print("<FL_IVAR1> = {}".format(np.nanmedian(sp1.flux/sp1.sig**2)))
print("<FL_IVAR2> = {}".format(np.nanmedian(sp2.flux/sp2.sig**2)))
print("<FL1>/<FL2> = {}".format(np.nanmedian(sp1.flux)/np.nanmedian(sp2.flux)))
def plot_two_mpdaf_spec(sp1, sp2, wvrange=None, **kwargs):
"""Plot two MPDAF Spectrum objects"""
# mask region of interest
if wv_range is not None:
sp1.mask_region(lmin=wvrange[0], lmax=wvrange[1], inside=False)
sp2.mask_region(lmin=wvrange[0], lmax=wvrange[1], inside=False)
# transform to XSpectrum1D objects
spec1 = xspectrum1d_from_mpdaf_spec(sp1)
spec2 = xspectrum1d_from_mpdaf_spec(sp2)
# plot
plot_two_spec(spec1, spec2, **kwargs)
def plot_spec_and_models(spec_filename, models_filenames='all'):
"""Plots several models in on top of a single spectrum.
Parameters
----------
"""
if models_filenames == 'all':
models = glob.glob("*_inspect.fits")
else:
models = models_filenames
spec = readspec(spec_filename)
models_specs = [readspec(model) for model in models]
plt.figure()
if spec.co_is_set:
spec.normalize(co=spec.co)
plt.plot(spec.wavelength, spec.flux, 'k', drawstyle='steps-mid')
plt.plot(spec.wavelength, spec.sig, 'g', drawstyle='steps-mid')
for model_s in models_specs:
plt.plot(model_s.wavelength, model_s.flux, label=model_s.filename.split('_inspect')[0])
plt.legend(ncol=5)
plt.ylim(-0.2,2.1)
def give_dv(z, zref, **kwargs):
"""Wrapper for convinience"""
print("This function is now in linetools.utils.dv_from_z(), please avoid using this one.")
return ltu.dv_from_z(z, zref, **kwargs)
def give_dz(dv, zref, **kwargs):
"""Wrapper for convinience"""
print("This function is now in linetools.utils.dz_from_dv(), please avoid using this one.")
return ltu.dz_from_dv(dv, zref, **kwargs)
# estimate fluxes
def get_flux_wvrange(spec, wvrange, flux_units = u.erg/u.cm/u.cm/u.s/u.AA, substract_cont=False):
"""Direct integration of fluxes within spectrum spec, in a given wv range
spec : XSpectrum1D object
wvrange : tuple with wavelength range, e.g. (4000,5000)*u.AA
Return: integrated flux
"""
cond = (spec.wavelength >= wvrange[0]) & (spec.wavelength <= wvrange[1])
npix = np.sum(cond)
print("{} pixels in the range {}".format(npix, wvrange))
dws = np.diff(spec.wavelength[cond])
dw = np.mean(dws)
max_diff = np.max(np.fabs(dws-dw))
if max_diff > dw/10.:
print('Warning: there is at least one dw value that differs significantly from the rest. Please check.')
print("The maximum dw_diff is {} for a mean dw of {}".format(max_dw, dw))
fl_sum = np.sum(spec.flux[cond]) * flux_units
flux = fl_sum * dw
if substract_cont:
raise NotImplementedError("Not yet implemented")
return flux
def xspectrum1d_from_mpdaf_spec(sp, airvac='air'):
"""Gets a XSpectrum1D object in vacuum from an MPDAF Spectrum
It does not take into account whether the wv is in air or vac anymore
"""
nomask = ~sp.mask
fl = sp.data[nomask]
er = np.sqrt(sp.var[nomask])
wv = sp.wave.coord()[nomask]
meta = dict(airvac=airvac)
spec = XSpectrum1D.from_tuple((wv, fl, er), meta=meta)
#spec.airtovac()
# print("\t Hola!!!!!!!!!!!!!!1")
spec2 = XSpectrum1D.from_tuple((spec.wavelength, fl, er))
return spec2
def chi2_from_two_spec(spec1,spec2, wvrange=None):
"""Return the Chi2 sum of the flux differences between spec1 and spec2
These must be in the same wavelength grid
"""
from scipy.stats import sigmaclip
assert spec1.npix == spec2.npix, "Specs must be of same length"
assert np.alltrue(spec1.wavelength == spec2.wavelength), "Specs must be in the same wavelength grid"
# obtain error2
if (spec1.sig_is_set) and (spec2.sig_is_set):
er2 = spec1.sig**2 + spec2.sig**2
elif (spec1.sig_is_set):
er2 = spec1.sig**2
elif (spec2.sig_is_set):
er2 = spec2.sig**2
else:
er2 = 1.
# clean er2 a bit
_ , min, max = sigmaclip(er2, low=3, high=3)
er2 = np.where(er2 <= min, np.mean(er2), er2)
# import pdb; pdb.set_trace()
chi2 = (spec1.flux - spec2.flux)**2. / er2
cond = (spec1.wavelength.to('AA').value >= wvrange[0]) & (spec1.wavelength.to('AA').value <= wvrange[1])
chi2_dof = np.sum(chi2[cond])/len(chi2[cond])
return chi2_dof
def plot_mypython_catalog(image, catalog):
"""From <NAME>"""
from astropy.io import fits
from photutils.aperture import EllipticalAperture
img, header = fits.getdata(image, header=True)
sources = Table.read(catalog)
fig, ax = plot_fits(img, header, figsize=(20, 20), show=False, cmap="Greys", fontsize=20)
ax.scatter(sources['x'], sources['y'], s=10, c='r')
for ID, x, y, a, b, theta in zip(sources['ID'], sources['x'], sources['y'],
sources['a'], sources['b'], sources['theta']):
plt.annotate(str(ID + 1), (x, y), color="b")
aper = EllipticalAperture((x, y), a, b, theta)
aper.plot(color="r", )
plt.show()
def plot_fits(img, header, figsize=(10, 10),
fontsize=16, levels=(None, None),
lognorm=False, title=None, show=True,
cmap="viridis"):
"""
Show a fits image. (c) <NAME> code
Parameters
----------
img: np.ndarray
Image data
header: fits.header.Header
Fits image header
figsize: tuple of ints, optional
Size of figure to be displayed (x,y)
levels: tuple of floats, optional
Minimum and maximum pixel values
for visualisation.
lognorm: bool, optional
If true, the visualisation is log
stretched.
title: str, optional
Title of the image
show: bool, optional
If true, displays the image.
Else, returns the fig, ax
cmap: str or pyplot cmap, optional
Defaults to viridis
Returns
-------
None if show is False. fig, ax if True
"""
from astropy.wcs import WCS
from astropy.stats import sigma_clipped_stats
from astropy import visualization as vis
plt.rcParams['font.size'] = fontsize
wcs = WCS(header)
_, median, sigma = sigma_clipped_stats(img)
assert len(levels) == 2, "Invalid levels. Use this format: (vmin,vmax)"
vmin, vmax = levels
if vmin is None:
vmin = median
if vmax is not None:
if vmin > vmax:
vmin = vmax - 10 * sigma
warnings.warn("levels changed to ({:f},{:f}) because input vmin waz greater than vmax".format(vmin, vmax))
else:
vmax = median + 10 * sigma
fig = plt.figure(figsize=figsize)
ax = plt.subplot(projection=wcs)
if lognorm:
ax.imshow(img, vmax=vmax, vmin=vmin, norm=vis.ImageNormalize(stretch=vis.LogStretch()), cmap=cmap)
else:
ax.imshow(img, vmax=vmax, vmin=vmin, cmap=cmap)
ax.set_xlabel("RA")
ax.set_ylabel("Dec")
ax.set_title(title)
if show:
plt.show()
else:
return fig, ax
def image2cube(image):
"""
Parameters
----------
image : mpdaf.obj.Image
Input image object
Returns
-------
cube : mpdaf.obj.Cube
A cube version of the input image.
"""
from mpdaf.obj import Cube, Image, WaveCoord
# get the wcs
wcs = image.wcs
# create dummy WaveCoord
wv_coord = WaveCoord(shape=1)
def compare_2_marzfiles(marzfile1,marzfile2):
"""Compare 2 MARZ files and print the main differences"""
mz1 = table_from_marzfile(marzfile1)
mz2 = table_from_marzfile(marzfile2)
assert len(mz1) == len(mz2), 'The input files have different lengths!'
cond_QOP = [mz1['QOP'][ii] != mz2['QOP'][ii] for ii in range(len(mz1))]
cond_z = [mz1['FinZ'][ii] != mz2['FinZ'][ii] for ii in range(len(mz1))]
cond_tem = [mz1['FinTID'][ii] != mz2['FinTID'][ii] for ii in range(len(mz1))]
cond_dif = np.array(cond_QOP) | np.array(cond_z) | np.array(cond_tem)
if np.sum(cond_dif)==0:
print('No differences were found.')
return
print('The following differences were found:')
for ii in range(len(mz1)):
z1 = mz1['FinZ'][ii]
z2 = mz2['FinZ'][ii]
tem1 = mz1['FinTID'][ii]
tem2 = mz2['FinTID'][ii]
QOP1 = mz1['QOP'][ii]
QOP2 = mz2['QOP'][ii]
com1 = mz1['Comment'][ii]
com2 = mz2['Comment'][ii]
# skip unknowns
if (QOP1 == 1) and (QOP2 == 1):
continue
if cond_dif[ii]:
print(mz1['#ID'][ii], z1, z2, tem1, tem2, QOP1, QOP2)
else:
pass
if (com1):
print(' Comment1: ', com1)
if (com2):
print(' Comment2: ', com2)
| [
"numpy.alltrue",
"numpy.sqrt",
"linetools.spectra.xspectrum1d.XSpectrum1D.from_tuple",
"pyntejos.io.table_from_marzfile",
"scipy.stats.sigmaclip",
"numpy.argsort",
"numpy.array",
"linetools.utils.dz_from_dv",
"linetools.utils.is_local_minima",
"numpy.mean",
"linetools.isgm.abscomponent.AbsCompon... | [((1342, 1354), 'numpy.array', 'np.array', (['z1'], {}), '(z1)\n', (1350, 1354), True, 'import numpy as np\n'), ((1364, 1376), 'numpy.array', 'np.array', (['z2'], {}), '(z2)\n', (1372, 1376), True, 'import numpy as np\n'), ((1386, 1398), 'numpy.array', 'np.array', (['dv'], {}), '(dv)\n', (1394, 1398), True, 'import numpy as np\n'), ((1409, 1425), 'numpy.fabs', 'np.fabs', (['(z1 - z2)'], {}), '(z1 - z2)\n', (1416, 1425), True, 'import numpy as np\n'), ((1434, 1451), 'numpy.mean', 'np.mean', (['[z1, z2]'], {}), '([z1, z2])\n', (1441, 1451), True, 'import numpy as np\n'), ((1740, 1751), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (1748, 1751), True, 'import numpy as np\n'), ((1760, 1771), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (1768, 1771), True, 'import numpy as np\n'), ((2628, 2639), 'numpy.array', 'np.array', (['n'], {}), '(n)\n', (2636, 2639), True, 'import numpy as np\n'), ((2686, 2720), 'numpy.where', 'np.where', (['(errp == errp)', 'errp', 'errp'], {}), '(errp == errp, errp, errp)\n', (2694, 2720), True, 'import numpy as np\n'), ((3618, 3629), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (3626, 3629), True, 'import numpy as np\n'), ((3829, 3844), 'numpy.append', 'np.append', (['(0)', 'a'], {}), '(0, a)\n', (3838, 3844), True, 'import numpy as np\n'), ((3857, 3876), 'numpy.append', 'np.append', (['a_aux', '(0)'], {}), '(a_aux, 0)\n', (3866, 3876), True, 'import numpy as np\n'), ((4181, 4196), 'numpy.array', 'np.array', (['lower'], {}), '(lower)\n', (4189, 4196), True, 'import numpy as np\n'), ((4209, 4224), 'numpy.array', 'np.array', (['upper'], {}), '(upper)\n', (4217, 4224), True, 'import numpy as np\n'), ((4624, 4646), 'linetools.utils.is_local_minima', 'ltu.is_local_minima', (['a'], {}), '(a)\n', (4643, 4646), True, 'from linetools import utils as ltu\n'), ((4745, 4767), 'linetools.utils.is_local_maxima', 'ltu.is_local_maxima', (['a'], {}), '(a)\n', (4764, 4767), True, 'from linetools import utils as ltu\n'), ((4991, 5003), 'numpy.array', 'np.array', (['z1'], {}), '(z1)\n', (4999, 5003), True, 'import numpy as np\n'), ((5013, 5025), 'numpy.array', 'np.array', (['z2'], {}), '(z2)\n', (5021, 5025), True, 'import numpy as np\n'), ((5625, 5637), 'numpy.array', 'np.array', (['z1'], {}), '(z1)\n', (5633, 5637), True, 'import numpy as np\n'), ((5647, 5659), 'numpy.array', 'np.array', (['z2'], {}), '(z2)\n', (5655, 5659), True, 'import numpy as np\n'), ((5948, 5960), 'numpy.array', 'np.array', (['dv'], {}), '(dv)\n', (5956, 5960), True, 'import numpy as np\n'), ((6161, 6172), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (6169, 6172), True, 'import numpy as np\n'), ((6222, 6246), 'numpy.where', 'np.where', (['cond', 'value', 'x'], {}), '(cond, value, x)\n', (6230, 6246), True, 'import numpy as np\n'), ((6617, 6635), 'numpy.array', 'np.array', (['original'], {}), '(original)\n', (6625, 6635), True, 'import numpy as np\n'), ((6646, 6659), 'numpy.array', 'np.array', (['new'], {}), '(new)\n', (6654, 6659), True, 'import numpy as np\n'), ((6707, 6727), 'numpy.argsort', 'np.argsort', (['original'], {}), '(original)\n', (6717, 6727), True, 'import numpy as np\n'), ((6750, 6765), 'numpy.argsort', 'np.argsort', (['new'], {}), '(new)\n', (6760, 6765), True, 'import numpy as np\n'), ((6838, 6865), 'numpy.argsort', 'np.argsort', (['inds_new_sorted'], {}), '(inds_new_sorted)\n', (6848, 6865), True, 'import numpy as np\n'), ((7056, 7069), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (7067, 7069), False, 'import time\n'), ((7393, 7406), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (7404, 7406), False, 'import time\n'), ((7785, 7847), 'linetools.isgm.io.read_igmg_to_components', 'ltiio.read_igmg_to_components', (['igmguesses_json'], {'linelist': '"""ISM"""'}), "(igmguesses_json, linelist='ISM')\n", (7814, 7847), True, 'import linetools.isgm.io as ltiio\n'), ((10130, 10151), 'linetools.utils.jsonify', 'ltu.jsonify', (['out_dict'], {}), '(out_dict)\n', (10141, 10151), True, 'from linetools import utils as ltu\n'), ((10676, 10727), 'linetools.isgm.io.read_joebvp_to_components', 'ltiio.read_joebvp_to_components', (['joebvp_file', 'radec'], {}), '(joebvp_file, radec)\n', (10707, 10727), True, 'import linetools.isgm.io as ltiio\n'), ((10737, 10768), 'linetools.isgm.utils.table_from_complist', 'ltiu.table_from_complist', (['comps'], {}), '(comps)\n', (10761, 10768), True, 'from linetools.isgm import utils as ltiu\n'), ((11106, 11157), 'linetools.isgm.io.read_joebvp_to_components', 'ltiio.read_joebvp_to_components', (['joebvp_file', 'radec'], {}), '(joebvp_file, radec)\n', (11137, 11157), True, 'import linetools.isgm.io as ltiio\n'), ((12674, 12708), 'linetools.isgm.utils.table_from_complist', 'ltiu.table_from_complist', (['complist'], {}), '(complist)\n', (12698, 12708), True, 'from linetools.isgm import utils as ltiu\n'), ((13590, 13620), 'numpy.nanmedian', 'np.nanmedian', (['(sp.flux / sp.sig)'], {}), '(sp.flux / sp.sig)\n', (13602, 13620), True, 'import numpy as np\n'), ((14057, 14076), 'numpy.nanmax', 'np.nanmax', (['sp1.flux'], {}), '(sp1.flux)\n', (14066, 14076), True, 'import numpy as np\n'), ((14088, 14116), 'numpy.nanmax', 'np.nanmax', (['(sp2.flux * renorm)'], {}), '(sp2.flux * renorm)\n', (14097, 14116), True, 'import numpy as np\n'), ((15789, 15812), 'linetools.spectra.io.readspec', 'readspec', (['spec_filename'], {}), '(spec_filename)\n', (15797, 15812), False, 'from linetools.spectra.io import readspec\n'), ((15874, 15886), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15884, 15886), True, 'import matplotlib.pyplot as plt\n'), ((15949, 16013), 'matplotlib.pyplot.plot', 'plt.plot', (['spec.wavelength', 'spec.flux', '"""k"""'], {'drawstyle': '"""steps-mid"""'}), "(spec.wavelength, spec.flux, 'k', drawstyle='steps-mid')\n", (15957, 16013), True, 'import matplotlib.pyplot as plt\n'), ((16018, 16081), 'matplotlib.pyplot.plot', 'plt.plot', (['spec.wavelength', 'spec.sig', '"""g"""'], {'drawstyle': '"""steps-mid"""'}), "(spec.wavelength, spec.sig, 'g', drawstyle='steps-mid')\n", (16026, 16081), True, 'import matplotlib.pyplot as plt\n'), ((16215, 16233), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'ncol': '(5)'}), '(ncol=5)\n', (16225, 16233), True, 'import matplotlib.pyplot as plt\n'), ((16238, 16257), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.2)', '(2.1)'], {}), '(-0.2, 2.1)\n', (16246, 16257), True, 'import matplotlib.pyplot as plt\n'), ((16431, 16463), 'linetools.utils.dv_from_z', 'ltu.dv_from_z', (['z', 'zref'], {}), '(z, zref, **kwargs)\n', (16444, 16463), True, 'from linetools import utils as ltu\n'), ((16640, 16674), 'linetools.utils.dz_from_dv', 'ltu.dz_from_dv', (['dv', 'zref'], {}), '(dv, zref, **kwargs)\n', (16654, 16674), True, 'from linetools import utils as ltu\n'), ((17091, 17103), 'numpy.sum', 'np.sum', (['cond'], {}), '(cond)\n', (17097, 17103), True, 'import numpy as np\n'), ((17175, 17205), 'numpy.diff', 'np.diff', (['spec.wavelength[cond]'], {}), '(spec.wavelength[cond])\n', (17182, 17205), True, 'import numpy as np\n'), ((17215, 17227), 'numpy.mean', 'np.mean', (['dws'], {}), '(dws)\n', (17222, 17227), True, 'import numpy as np\n'), ((17914, 17937), 'numpy.sqrt', 'np.sqrt', (['sp.var[nomask]'], {}), '(sp.var[nomask])\n', (17921, 17937), True, 'import numpy as np\n'), ((18013, 18060), 'linetools.spectra.xspectrum1d.XSpectrum1D.from_tuple', 'XSpectrum1D.from_tuple', (['(wv, fl, er)'], {'meta': 'meta'}), '((wv, fl, er), meta=meta)\n', (18035, 18060), False, 'from linetools.spectra.xspectrum1d import XSpectrum1D\n'), ((18132, 18181), 'linetools.spectra.xspectrum1d.XSpectrum1D.from_tuple', 'XSpectrum1D.from_tuple', (['(spec.wavelength, fl, er)'], {}), '((spec.wavelength, fl, er))\n', (18154, 18181), False, 'from linetools.spectra.xspectrum1d import XSpectrum1D\n'), ((18499, 18547), 'numpy.alltrue', 'np.alltrue', (['(spec1.wavelength == spec2.wavelength)'], {}), '(spec1.wavelength == spec2.wavelength)\n', (18509, 18547), True, 'import numpy as np\n'), ((18886, 18915), 'scipy.stats.sigmaclip', 'sigmaclip', (['er2'], {'low': '(3)', 'high': '(3)'}), '(er2, low=3, high=3)\n', (18895, 18915), False, 'from scipy.stats import sigmaclip\n'), ((19400, 19432), 'astropy.io.fits.getdata', 'fits.getdata', (['image'], {'header': '(True)'}), '(image, header=True)\n', (19412, 19432), False, 'from astropy.io import fits\n'), ((19447, 19466), 'astropy.table.Table.read', 'Table.read', (['catalog'], {}), '(catalog)\n', (19457, 19466), False, 'from astropy.table import Table\n'), ((19924, 19934), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19932, 19934), True, 'import matplotlib.pyplot as plt\n'), ((21033, 21044), 'astropy.wcs.WCS', 'WCS', (['header'], {}), '(header)\n', (21036, 21044), False, 'from astropy.wcs import WCS\n'), ((21069, 21093), 'astropy.stats.sigma_clipped_stats', 'sigma_clipped_stats', (['img'], {}), '(img)\n', (21088, 21093), False, 'from astropy.stats import sigma_clipped_stats\n'), ((21500, 21527), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (21510, 21527), True, 'import matplotlib.pyplot as plt\n'), ((21537, 21564), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': 'wcs'}), '(projection=wcs)\n', (21548, 21564), True, 'import matplotlib.pyplot as plt\n'), ((22246, 22264), 'mpdaf.obj.WaveCoord', 'WaveCoord', ([], {'shape': '(1)'}), '(shape=1)\n', (22255, 22264), False, 'from mpdaf.obj import Cube, Image, WaveCoord\n'), ((22386, 22416), 'pyntejos.io.table_from_marzfile', 'table_from_marzfile', (['marzfile1'], {}), '(marzfile1)\n', (22405, 22416), False, 'from pyntejos.io import table_from_marzfile\n'), ((22427, 22457), 'pyntejos.io.table_from_marzfile', 'table_from_marzfile', (['marzfile2'], {}), '(marzfile2)\n', (22446, 22457), False, 'from pyntejos.io import table_from_marzfile\n'), ((730, 752), 'numpy.fabs', 'np.fabs', (['(value - array)'], {}), '(value - array)\n', (737, 752), True, 'import numpy as np\n'), ((2652, 2669), 'numpy.sqrt', 'np.sqrt', (['(n + 0.75)'], {}), '(n + 0.75)\n', (2659, 2669), True, 'import numpy as np\n'), ((2751, 2768), 'numpy.sqrt', 'np.sqrt', (['(n - 0.25)'], {}), '(n - 0.25)\n', (2758, 2768), True, 'import numpy as np\n'), ((5149, 5186), 'numpy.where', 'np.where', (['(dv_aux < dv)', '(1)', 'association'], {}), '(dv_aux < dv, 1, association)\n', (5157, 5186), True, 'import numpy as np\n'), ((5721, 5741), 'linetools.utils.dv_from_z', 'ltu.dv_from_z', (['z', 'z2'], {}), '(z, z2)\n', (5734, 5741), True, 'from linetools import utils as ltu\n'), ((6185, 6196), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (6193, 6196), True, 'import numpy as np\n'), ((6201, 6212), 'numpy.isinf', 'np.isinf', (['x'], {}), '(x)\n', (6209, 6212), True, 'import numpy as np\n'), ((7939, 7959), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (7948, 7959), False, 'import json\n'), ((11613, 11633), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (11622, 11633), False, 'import json\n'), ((12215, 12316), 'linetools.isgm.abscomponent.AbsComponent.from_dict', 'AbsComponent.from_dict', (['idict'], {'skip_abslines': '(False)', 'chk_sep': '(False)', 'chk_data': '(False)', 'chk_vel': '(False)'}), '(idict, skip_abslines=False, chk_sep=False, chk_data=\n False, chk_vel=False)\n', (12237, 12316), False, 'from linetools.isgm.abscomponent import AbsComponent\n'), ((13153, 13182), 'numpy.nanmedian', 'np.nanmedian', (['sp1.flux[cond1]'], {}), '(sp1.flux[cond1])\n', (13165, 13182), True, 'import numpy as np\n'), ((13201, 13230), 'numpy.nanmedian', 'np.nanmedian', (['sp2.flux[cond2]'], {}), '(sp2.flux[cond2])\n', (13213, 13230), True, 'import numpy as np\n'), ((13259, 13281), 'numpy.nanmedian', 'np.nanmedian', (['sp1.flux'], {}), '(sp1.flux)\n', (13271, 13281), True, 'import numpy as np\n'), ((13300, 13322), 'numpy.nanmedian', 'np.nanmedian', (['sp2.flux'], {}), '(sp2.flux)\n', (13312, 13322), True, 'import numpy as np\n'), ((14130, 14150), 'numpy.max', 'np.max', (['[max1, max2]'], {}), '([max1, max2])\n', (14136, 14150), True, 'import numpy as np\n'), ((14183, 14192), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (14190, 14192), True, 'import matplotlib.pyplot as plt\n'), ((15706, 15733), 'glob.glob', 'glob.glob', (['"""*_inspect.fits"""'], {}), "('*_inspect.fits')\n", (15715, 15733), False, 'import glob\n'), ((15833, 15848), 'linetools.spectra.io.readspec', 'readspec', (['model'], {}), '(model)\n', (15841, 15848), False, 'from linetools.spectra.io import readspec\n'), ((17250, 17267), 'numpy.fabs', 'np.fabs', (['(dws - dw)'], {}), '(dws - dw)\n', (17257, 17267), True, 'import numpy as np\n'), ((17501, 17524), 'numpy.sum', 'np.sum', (['spec.flux[cond]'], {}), '(spec.flux[cond])\n', (17507, 17524), True, 'import numpy as np\n'), ((18947, 18959), 'numpy.mean', 'np.mean', (['er2'], {}), '(er2)\n', (18954, 18959), True, 'import numpy as np\n'), ((19172, 19190), 'numpy.sum', 'np.sum', (['chi2[cond]'], {}), '(chi2[cond])\n', (19178, 19190), True, 'import numpy as np\n'), ((19849, 19888), 'photutils.aperture.EllipticalAperture', 'EllipticalAperture', (['(x, y)', 'a', 'b', 'theta'], {}), '((x, y), a, b, theta)\n', (19867, 19888), False, 'from photutils.aperture import EllipticalAperture\n'), ((21849, 21859), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21857, 21859), True, 'import matplotlib.pyplot as plt\n'), ((22823, 22841), 'numpy.array', 'np.array', (['cond_tem'], {}), '(cond_tem)\n', (22831, 22841), True, 'import numpy as np\n'), ((22850, 22866), 'numpy.sum', 'np.sum', (['cond_dif'], {}), '(cond_dif)\n', (22856, 22866), True, 'import numpy as np\n'), ((2286, 2298), 'numpy.sum', 'np.sum', (['cond'], {}), '(cond)\n', (2292, 2298), True, 'import numpy as np\n'), ((3075, 3086), 'numpy.array', 'np.array', (['e'], {}), '(e)\n', (3083, 3086), True, 'import numpy as np\n'), ((3110, 3122), 'numpy.array', 'np.array', (['dz'], {}), '(dz)\n', (3118, 3122), True, 'import numpy as np\n'), ((3145, 3156), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (3153, 3156), True, 'import numpy as np\n'), ((3179, 3190), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (3187, 3190), True, 'import numpy as np\n'), ((3217, 3232), 'numpy.array', 'np.array', (['a_err'], {}), '(a_err)\n', (3225, 3232), True, 'import numpy as np\n'), ((5105, 5125), 'linetools.utils.dv_from_z', 'ltu.dv_from_z', (['z1', 'z'], {}), '(z1, z)\n', (5118, 5125), True, 'from linetools import utils as ltu\n'), ((5791, 5806), 'numpy.fabs', 'np.fabs', (['dv_aux'], {}), '(dv_aux)\n', (5798, 5806), True, 'import numpy as np\n'), ((8278, 8378), 'linetools.isgm.abscomponent.AbsComponent.from_dict', 'AbsComponent.from_dict', (['comp_dict'], {'chk_sep': '(False)', 'chk_data': '(False)', 'chk_vel': '(False)', 'linelist': '"""ISM"""'}), "(comp_dict, chk_sep=False, chk_data=False, chk_vel=\n False, linelist='ISM')\n", (8300, 8378), False, 'from linetools.isgm.abscomponent import AbsComponent\n'), ((10290, 10359), 'json.dumps', 'json.dumps', (['gd_dict'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), "(gd_dict, sort_keys=True, indent=4, separators=(',', ': '))\n", (10300, 10359), False, 'import json\n'), ((22783, 22801), 'numpy.array', 'np.array', (['cond_QOP'], {}), '(cond_QOP)\n', (22791, 22801), True, 'import numpy as np\n'), ((22804, 22820), 'numpy.array', 'np.array', (['cond_z'], {}), '(cond_z)\n', (22812, 22820), True, 'import numpy as np\n'), ((1480, 1492), 'astropy.constants.c.to', 'C.to', (['"""km/s"""'], {}), "('km/s')\n", (1484, 1492), True, 'from astropy.constants import c as C\n'), ((5817, 5832), 'numpy.fabs', 'np.fabs', (['dv_aux'], {}), '(dv_aux)\n', (5824, 5832), True, 'import numpy as np\n'), ((5848, 5862), 'numpy.where', 'np.where', (['cond'], {}), '(cond)\n', (5856, 5862), True, 'import numpy as np\n'), ((8409, 8508), 'linetools.isgm.abscomponent.AbsComponent.from_dict', 'AbsComponent.from_dict', (['comp_dict'], {'chk_sep': '(False)', 'chk_data': '(False)', 'chk_vel': '(False)', 'linelist': '"""H2"""'}), "(comp_dict, chk_sep=False, chk_data=False, chk_vel=\n False, linelist='H2')\n", (8431, 8508), False, 'from linetools.isgm.abscomponent import AbsComponent\n'), ((14659, 14691), 'numpy.nanmedian', 'np.nanmedian', (['(sp1.flux / sp1.sig)'], {}), '(sp1.flux / sp1.sig)\n', (14671, 14691), True, 'import numpy as np\n'), ((14726, 14758), 'numpy.nanmedian', 'np.nanmedian', (['(sp2.flux / sp2.sig)'], {}), '(sp2.flux / sp2.sig)\n', (14738, 14758), True, 'import numpy as np\n'), ((14798, 14835), 'numpy.nanmedian', 'np.nanmedian', (['(sp1.flux / sp1.sig ** 2)'], {}), '(sp1.flux / sp1.sig ** 2)\n', (14810, 14835), True, 'import numpy as np\n'), ((14873, 14910), 'numpy.nanmedian', 'np.nanmedian', (['(sp2.flux / sp2.sig ** 2)'], {}), '(sp2.flux / sp2.sig ** 2)\n', (14885, 14910), True, 'import numpy as np\n'), ((14949, 14971), 'numpy.nanmedian', 'np.nanmedian', (['sp1.flux'], {}), '(sp1.flux)\n', (14961, 14971), True, 'import numpy as np\n'), ((14972, 14994), 'numpy.nanmedian', 'np.nanmedian', (['sp2.flux'], {}), '(sp2.flux)\n', (14984, 14994), True, 'import numpy as np\n'), ((21659, 21675), 'astropy.visualization.LogStretch', 'vis.LogStretch', ([], {}), '()\n', (21673, 21675), True, 'from astropy import visualization as vis\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 18:44:48 2019
把轨迹数据填充到轨迹网格中,然后利用cnn深度学习的方法预测
@author: dell
"""
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from sklearn.model_selection import train_test_split
dataset = pd.read_csv(r"F:\python and machine learning\track data and travel prediction\sample_cnn")
dataset['longitude'] = (dataset['LONGITUDE'] * 100) // 1 / 100
dataset['latitude'] = (dataset['LATITUDE'] * 100) // 1 / 100
dataset['HOUR'] = dataset['START_TIME'] // 100 % 100
dataset.sort_values(['USER_ID', 'P_MONTH'], inplace=True)
dataset.reset_index(drop=True, inplace=True)
dataset = dataset.drop_duplicates(['USER_ID', 'longitude', 'latitude', 'P_MONTH', 'HOUR'])
dataset = dataset.loc[:,('USER_ID', 'longitude', 'latitude', 'P_MONTH', 'FLAG')]
dt = dataset.copy()
dt['longitude'] = (dt['longitude'] * 100) // 1 / 100
dt['latitude'] = (dt['latitude'] * 100) // 1 / 100
#将每个用户的轨迹,填充到轨迹网格中,轨迹网格的规模(512*401*1)
j = 0
l = len(dt['USER_ID'].unique())
holidays = ['20180602', '20180603', '20180609', '20180610', '20180616', '20180617', '20180618',
'20180623', '20180624', '20180630']
result = np.zeros((l, 205312), dtype=np.float16)
for userid in (dt['USER_ID'].unique()):
dataset_userid = dt[dt['USER_ID'] == userid]
dataset_userid['P_MONTH'] = dataset_userid['P_MONTH'].astype(np.str)
dataset_userid = dataset_userid[~dataset_userid['P_MONTH'].isin(holidays)]
dataset_userid = dataset_userid.loc[:,('longitude', 'latitude')]
dataset_userid.reset_index(drop=True, inplace=True)
for i in range(0, len(dataset_userid)):
row = round((float(dataset_userid.loc[i, 'longitude']) - 118.04) * 100)
col = round((float(dataset_userid.loc[i, 'latitude']) - 27.17) * 100)
result[j, (row*401+col)] += 1.0
#result[j] /= np.max(result[j])
j += 1
X = result.reshape(l, 512, 401, 1).astype('float16')
dt_flag = dt.loc[:,('USER_ID', 'FLAG')]
dt_flag = dt_flag.groupby('USER_ID').mean()
y = np.asarray(dt_flag['FLAG']).astype('float16')
y = y.reshape(l, 1)
def baseline_model():
model = Sequential()
model.add(Convolution2D(16, (3, 3), input_shape=(512, 401, 1), strides=3, activation='relu'))
model.add(Convolution2D(16, (3, 3), strides=3, activation='relu'))
model.add(Convolution2D(16, (3, 3), activation='relu'))
#model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = baseline_model()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
model.fit(X_train, y_train, epochs=20, batch_size=512)
scores = model.evaluate(X_test, y_test)
print(scores)
| [
"keras.layers.Flatten",
"pandas.read_csv",
"keras.layers.convolutional.Convolution2D",
"sklearn.model_selection.train_test_split",
"numpy.asarray",
"keras.models.Sequential",
"numpy.zeros",
"keras.layers.convolutional.MaxPooling2D",
"keras.layers.Dense",
"keras.layers.Dropout"
] | [((385, 487), 'pandas.read_csv', 'pd.read_csv', (['"""F:\\\\python and machine learning\\\\track data and travel prediction\\\\sample_cnn"""'], {}), "(\n 'F:\\\\python and machine learning\\\\track data and travel prediction\\\\sample_cnn'\n )\n", (396, 487), True, 'import pandas as pd\n'), ((1299, 1338), 'numpy.zeros', 'np.zeros', (['(l, 205312)'], {'dtype': 'np.float16'}), '((l, 205312), dtype=np.float16)\n', (1307, 1338), True, 'import numpy as np\n'), ((2925, 2978), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (2941, 2978), False, 'from sklearn.model_selection import train_test_split\n'), ((2256, 2268), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2266, 2268), False, 'from keras.models import Sequential\n'), ((2153, 2180), 'numpy.asarray', 'np.asarray', (["dt_flag['FLAG']"], {}), "(dt_flag['FLAG'])\n", (2163, 2180), True, 'import numpy as np\n'), ((2284, 2371), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(16)', '(3, 3)'], {'input_shape': '(512, 401, 1)', 'strides': '(3)', 'activation': '"""relu"""'}), "(16, (3, 3), input_shape=(512, 401, 1), strides=3, activation=\n 'relu')\n", (2297, 2371), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((2383, 2438), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(16)', '(3, 3)'], {'strides': '(3)', 'activation': '"""relu"""'}), "(16, (3, 3), strides=3, activation='relu')\n", (2396, 2438), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((2455, 2499), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""'}), "(16, (3, 3), activation='relu')\n", (2468, 2499), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((2578, 2608), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2590, 2608), False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((2625, 2637), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2632, 2637), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2654, 2663), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2661, 2663), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2680, 2707), 'keras.layers.Dense', 'Dense', (['(8)'], {'activation': '"""relu"""'}), "(8, activation='relu')\n", (2685, 2707), False, 'from keras.layers import Dense, Dropout, Flatten\n'), ((2724, 2754), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (2729, 2754), False, 'from keras.layers import Dense, Dropout, Flatten\n')] |
import csv
import numpy as np
from lightfm import LightFM
import scipy.sparse
from sklearn.metrics import label_ranking_average_precision_score
import math
import random
import pickle
def loadUserFeatures(i_file, n_users):
print('Loading user graph...')
A = scipy.sparse.lil_matrix((n_users, n_users), dtype=int)
with open(i_file, encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
next(reader, None)
for row in reader:
src = int(row[0])
des = int(row[1])
count = int(row[2])
A[src, des] = count
return A
def loadItemFeatures(i_file, n_items, n_tags):
print('Loading item graph...')
A = scipy.sparse.lil_matrix((n_items, n_tags), dtype=int)
with open(i_file, encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
next(reader, None)
for row in reader:
rid = int(row[0])
tag = int(row[1])
A[rid, tag] = 1
return A
def loadInteractions(i_file, n_users, n_items):
print('Loading interaction...')
A = scipy.sparse.lil_matrix((n_users, n_items), dtype=int)
with open(i_file, encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
next(reader, None)
for row in reader:
uid = int(row[0])
rid = int(row[1])
A[uid, rid] = 1
return A
def loadInteractions_keenfunc_thres(i_file, n_users, n_items):
pos_users, pos_items, pos_labels = [], [], []
print('Loading interactions threshold model...')
with open(i_file, encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
next(reader, None)
for row in reader:
pos_users.append(int(row[0]))
pos_items.append(int(row[1]))
pos_labels.append(1)
total_users = [u for u in range(n_users)]
total_items = [i for i in range(n_items)]
neg_users, neg_items, neg_labels = [], [], []
set_neg_items = list(set(total_items) - set(pos_items))
for item in set_neg_items:
users = list(random.sample(total_users, 2))
for u in users:
neg_users.append(u)
neg_items.append(item)
neg_labels.append(0)
users = pos_users + neg_users
items = pos_items + neg_items
labels = pos_labels + neg_labels
print(len(users), len(items), len(labels))
return np.array(users), np.array(items), np.array(labels)
def loadTest(i_file):
users = []
items = []
labels = []
print('Loading test set...')
with open(i_file, encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
next(reader, None)
for row in reader:
users.append(int(row[0]))
items.append(int(row[1]))
labels.append(int(row[2]))
return np.array(users), np.array(items), np.array(labels)
def evaluateLRAP(test_users, test_items, labels, pred):
pos_users = []
for i in range(len(test_users)):
if labels[i] == 1:
pos_users.append(test_users[i])
pos_users = set(pos_users)
sub_users = []
sub_items = []
sub_labels = []
sub_pred = []
for i in range(len(test_users)):
if test_users[i] in pos_users:
sub_users.append(test_users[i])
sub_items.append(test_items[i])
sub_labels.append(labels[i])
sub_pred.append(pred[i])
y_score = np.array([sub_pred])
y_true = np.array([labels])
print(len(y_score))
print(len(y_true))
score = label_ranking_average_precision_score(y_true, y_score)
def recommendSOAnswers(i_train, i_test, i_user_graph, i_item_graph, n_users, n_items, n_tags):
interactions = loadInteractions(i_train, n_users, n_items)
u_features = loadUserFeatures(i_user_graph, n_users)
i_features = loadItemFeatures(i_item_graph, n_items, n_tags)
test_users, test_items, labels = loadTest(i_test)
model = LightFM(learning_rate=0.05, loss='logistic')
model.fit(interactions, user_features=u_features, item_features=i_features, epochs=5, verbose=True, num_threads=10)
result = model.predict(test_users, test_items, item_features=i_features, user_features=u_features, num_threads=10)
y_score = np.array([result])
y_true = np.array([labels])
print(result)
print(len(y_score))
print(len(y_true))
score = label_ranking_average_precision_score(y_true, y_score)
print(score)
def mini_batch(X, Y, mini_batch_size=64, seed=0):
m = X.shape[0] # number of training examples
mini_batches = []
np.random.seed(seed)
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[permutation, :]
shuffled_Y = Y[permutation]
# Step 1: No Shuffle (X, Y)
# shuffled_X = X[:, :]
# shuffled_Y = Y[:]
# Step 2: Partition (X, Y). Minus the end case.
# number of mini batches of size mini_batch_size in your partitionning
num_complete_minibatches = int(math.floor(m / float(mini_batch_size)))
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
mini_batch_Y = shuffled_Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size: m, :]
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def mini_batch_no_shuffle_X(X, mini_batch_size=64, seed=0):
m = X.shape[0] # number of training examples
mini_batches = []
np.random.seed(seed)
shuffled_X = X
# Step 2: Partition (X, Y). Minus the end case.
# number of mini batches of size mini_batch_size in your partitionning
num_complete_minibatches = int(math.floor(m / float(mini_batch_size)))
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
mini_batches.append(mini_batch_X)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size: m, :]
mini_batches.append(mini_batch_X)
return mini_batches
def save_data(path, data):
print('Saving data...')
with open(path, 'wb') as fle:
pickle.dump(data, fle, protocol=pickle.HIGHEST_PROTOCOL)
def load_data(path):
print('Loading data...')
return pickle.load(open(path, 'rb'))
if __name__ == "__main__":
path_interaction = '../data/full_data_v5/so/train.csv'
path_user_features = '../data/full_data_v5/so/so_user_user_graph.csv'
path_item_features = '../data/full_data_v5/so/so_item_graph.csv'
n_users, n_items, n_tags = 23612, 1020809, 30354
path_inter_thres = './data/so_interactions_threshold.pickle'
path_save_interactions = './data/so_interactions.pickle'
path_save_users_features = './data/so_user_features.pickle'
path_save_items_features = './data/so_item_features.pickle'
# # interactions_thres = loadInteractions_keenfunc_thres(i_file=path_interaction, n_users=n_users, n_items=n_items)
# # save_data(path=path_inter_thres, data=interactions_thres)
# interactions_thres = load_data(path=path_inter_thres)
# print(len(interactions_thres))
# interactions = loadInteractions(i_file=path_interaction, n_users=n_users, n_items=n_items)
# save_data(path=path_save_interactions, data=interactions)
# user_features = loadUserFeatures(i_file=path_user_features, n_users=n_users)
# save_data(path=path_save_users_features, data=user_features)
item_features = loadItemFeatures(i_file=path_item_features, n_items=n_items, n_tags=n_tags)
save_data(path=path_save_items_features, data=item_features)
| [
"random.sample",
"sklearn.metrics.label_ranking_average_precision_score",
"pickle.dump",
"lightfm.LightFM",
"numpy.array",
"numpy.random.seed",
"csv.reader",
"numpy.random.permutation"
] | [((3429, 3449), 'numpy.array', 'np.array', (['[sub_pred]'], {}), '([sub_pred])\n', (3437, 3449), True, 'import numpy as np\n'), ((3463, 3481), 'numpy.array', 'np.array', (['[labels]'], {}), '([labels])\n', (3471, 3481), True, 'import numpy as np\n'), ((3542, 3596), 'sklearn.metrics.label_ranking_average_precision_score', 'label_ranking_average_precision_score', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (3579, 3596), False, 'from sklearn.metrics import label_ranking_average_precision_score\n'), ((3949, 3993), 'lightfm.LightFM', 'LightFM', ([], {'learning_rate': '(0.05)', 'loss': '"""logistic"""'}), "(learning_rate=0.05, loss='logistic')\n", (3956, 3993), False, 'from lightfm import LightFM\n'), ((4249, 4267), 'numpy.array', 'np.array', (['[result]'], {}), '([result])\n', (4257, 4267), True, 'import numpy as np\n'), ((4281, 4299), 'numpy.array', 'np.array', (['[labels]'], {}), '([labels])\n', (4289, 4299), True, 'import numpy as np\n'), ((4378, 4432), 'sklearn.metrics.label_ranking_average_precision_score', 'label_ranking_average_precision_score', (['y_true', 'y_score'], {}), '(y_true, y_score)\n', (4415, 4432), False, 'from sklearn.metrics import label_ranking_average_precision_score\n'), ((4578, 4598), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4592, 4598), True, 'import numpy as np\n'), ((5877, 5897), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5891, 5897), True, 'import numpy as np\n'), ((386, 414), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (396, 414), False, 'import csv\n'), ((815, 843), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (825, 843), False, 'import csv\n'), ((1211, 1239), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (1221, 1239), False, 'import csv\n'), ((1626, 1654), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (1636, 1654), False, 'import csv\n'), ((2399, 2414), 'numpy.array', 'np.array', (['users'], {}), '(users)\n', (2407, 2414), True, 'import numpy as np\n'), ((2416, 2431), 'numpy.array', 'np.array', (['items'], {}), '(items)\n', (2424, 2431), True, 'import numpy as np\n'), ((2433, 2449), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2441, 2449), True, 'import numpy as np\n'), ((2616, 2644), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (2626, 2644), False, 'import csv\n'), ((2826, 2841), 'numpy.array', 'np.array', (['users'], {}), '(users)\n', (2834, 2841), True, 'import numpy as np\n'), ((2843, 2858), 'numpy.array', 'np.array', (['items'], {}), '(items)\n', (2851, 2858), True, 'import numpy as np\n'), ((2860, 2876), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (2868, 2876), True, 'import numpy as np\n'), ((4652, 4676), 'numpy.random.permutation', 'np.random.permutation', (['m'], {}), '(m)\n', (4673, 4676), True, 'import numpy as np\n'), ((6655, 6711), 'pickle.dump', 'pickle.dump', (['data', 'fle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(data, fle, protocol=pickle.HIGHEST_PROTOCOL)\n', (6666, 6711), False, 'import pickle\n'), ((2081, 2110), 'random.sample', 'random.sample', (['total_users', '(2)'], {}), '(total_users, 2)\n', (2094, 2110), False, 'import random\n')] |
#!/usr/bin/env python2
import cv2
import sys, random, socket, struct
import numpy as np
from math import sqrt, sin, cos, pi, atan2, fmod
#################
# Choose camera #
#################
camera = ' '.join(sys.argv[1:])
try:
v = int(camera)
camera = v
except ValueError:
pass
if camera == '':
camera = 0
######################
# Open video capture #
######################
VIDEO_W = 640
VIDEO_H = 480
cap = cv2.VideoCapture(camera)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, VIDEO_W)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, VIDEO_H)
##################
# Control server #
##################
class ControlServer:
def __init__(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.serv.bind(("", 50000))
self.serv.settimeout(0.0)
def process(self):
try:
data, addr = self.serv.recvfrom(1024)
if ord(data[0]) == ord('P'):
cmd, idx, x, y = struct.unpack('=BBff', data)
if len(lhaumpions) > idx:
lhaumpions[idx].x = x * VIDEO_W
lhaumpions[idx].y = y * VIDEO_H
elif ord(data[0]) == ord('F'):
cmd, idx, r, g, b, f = struct.unpack('=BBBBBB', data)
if len(lhaumpions) > idx:
lhaumpions[idx].force_color(r, g, b, f)
elif ord(data[0]) == ord('I'):
cmd, idx, ip = struct.unpack('=BB16s', data)
if len(lhaumpions) > idx:
lhaumpions[idx].ip = ip.rstrip(' \t\r\n\0')
except socket.error:
pass
ctrlserv = ControlServer()
##############
# LHAUMpions #
##############
class LHAUMpion:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def __init__(self, x, y):
self.x = x * VIDEO_W
self.y = y * VIDEO_H
self.hx = 0
self.hy = 0
self.v = 0
self.bgr = (0,0,0)
self.forced = False
self.ip = False
self.reset()
def reset(self):
self.hue = fmod(atan2(self.hy, self.hx) / 2 / pi + 1, 1)
norm = (self.hx*self.hx + self.hy*self.hy) * self.v
if norm > 1: norm = 1
if norm < 0: norm = 0
self.value = norm
if not self.forced:
bgr = cv2.cvtColor(np.uint8([[[int(self.hue * 180), 255, self.value*255]]]), cv2.COLOR_HSV2BGR)[0][0]
self.bgr = (int(bgr[0]), int(bgr[1]), int(bgr[2]))
self.hx = 0
self.hy = 0
self.v = 0
def merge(self, obj):
dist = sqrt((obj.pt[0] - self.x)**2/VIDEO_W**2 + (obj.pt[1] - self.y)**2/VIDEO_H**2)/sqrt(2)
weight = obj.size/dist**2 / 500
self.v += weight
if weight > 1: weight = 1
if weight < 0: weight = 0
self.hx += weight*cos(pi*2*obj.hue)
self.hy += weight*sin(pi*2*obj.hue)
def force_color(self, r, g, b, forced):
self.forced = forced
self.bgr = (b, g, r)
def send(self):
if self.ip:
msg = struct.pack('=BBBB', 0xff, self.bgr[2], self.bgr[1], self.bgr[0])
self.sock.sendto(msg, (self.ip, 6969))
lhaumpions = []
for i in range(10):
lhaumpions.append(LHAUMpion(random.random(), random.random()))
#########################
# Circle detection init #
#########################
# Parameters instance
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 100
params.maxThreshold = 240
# Filter by Area
params.filterByArea = True
params.minArea = 100
params.maxArea = 1000000
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.80
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.90
params.maxConvexity = 1
# Create a detector with the parameters
detector = None
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3 :
detector = cv2.SimpleBlobDetector(params)
else :
detector = cv2.SimpleBlobDetector_create(params)
#######################
# Interactive objects #
#######################
class InteractiveObject:
def __init__(self, pt, size, hue):
self.update(pt, size, hue)
def update(self, pt, size, hue):
self.pt = pt
self.size = size
self.hue = hue
self.alive = 1
def areYou(self, pt, hue):
return ((self.pt[0]-pt[0])**2 + (self.pt[1]-pt[1])**2 < (3*self.size)**2) and (abs(self.hue - hue) < 0.1)
def age(self):
self.alive -= 0.1
if self.alive <= 0: self.alive = 0
alives = []
###########
# Process #
###########
display = True
while cv2.waitKey(10) == -1:
# Read next image
ret, im = cap.read()
# Find circles
keypoints = detector.detect(im)
# Update alive objects
for kp in keypoints:
found = False
hue = float(cv2.cvtColor(np.uint8([[im[kp.pt[1]][kp.pt[0]]]]), cv2.COLOR_BGR2HSV)[0][0][0])/180
for a in alives:
if a.areYou(kp.pt, hue):
a.update(kp.pt, int(kp.size), hue)
found = True
if not found:
alives.append(InteractiveObject(kp.pt, int(kp.size), hue))
# Draw, age and kill-olds alives
for a in alives:
bgr = cv2.cvtColor(np.uint8([[[int(a.hue * 180), 255, 255]]]), cv2.COLOR_HSV2BGR)[0][0]
bgr = (int(bgr[0]), int(bgr[1]), int(bgr[2]))
if display:
cv2.ellipse(
im,
(int(a.pt[0]), int(a.pt[1])),
(a.size, a.size),
-90,
0, 360*a.alive,
bgr,
4
)
for l in lhaumpions:
l.merge(a)
a.age()
if a.alive == 0:
alives.remove(a)
# Draw lhaumpions
for l in lhaumpions:
if display:
cv2.circle(
im,
(int(l.x), int(l.y)),
10,
l.bgr,
20
)
l.send()
l.reset()
# Get pupitre control
ctrlserv.process()
# Show results
if display:
cv2.imshow('Vision', im)
| [
"numpy.uint8",
"cv2.__version__.split",
"cv2.SimpleBlobDetector_create",
"socket.socket",
"math.sqrt",
"cv2.SimpleBlobDetector",
"cv2.imshow",
"math.cos",
"struct.pack",
"cv2.SimpleBlobDetector_Params",
"struct.unpack",
"cv2.VideoCapture",
"math.atan2",
"random.random",
"math.sin",
"cv... | [((420, 444), 'cv2.VideoCapture', 'cv2.VideoCapture', (['camera'], {}), '(camera)\n', (436, 444), False, 'import cv2\n'), ((2897, 2928), 'cv2.SimpleBlobDetector_Params', 'cv2.SimpleBlobDetector_Params', ([], {}), '()\n', (2926, 2928), False, 'import cv2\n'), ((3352, 3378), 'cv2.__version__.split', 'cv2.__version__.split', (['"""."""'], {}), "('.')\n", (3373, 3378), False, 'import cv2\n'), ((1481, 1529), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (1494, 1529), False, 'import sys, random, socket, struct\n'), ((3417, 3447), 'cv2.SimpleBlobDetector', 'cv2.SimpleBlobDetector', (['params'], {}), '(params)\n', (3439, 3447), False, 'import cv2\n'), ((3471, 3508), 'cv2.SimpleBlobDetector_create', 'cv2.SimpleBlobDetector_create', (['params'], {}), '(params)\n', (3500, 3508), False, 'import cv2\n'), ((4058, 4073), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (4069, 4073), False, 'import cv2\n'), ((659, 707), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (672, 707), False, 'import sys, random, socket, struct\n'), ((5169, 5193), 'cv2.imshow', 'cv2.imshow', (['"""Vision"""', 'im'], {}), "('Vision', im)\n", (5179, 5193), False, 'import cv2\n'), ((2177, 2270), 'math.sqrt', 'sqrt', (['((obj.pt[0] - self.x) ** 2 / VIDEO_W ** 2 + (obj.pt[1] - self.y) ** 2 / \n VIDEO_H ** 2)'], {}), '((obj.pt[0] - self.x) ** 2 / VIDEO_W ** 2 + (obj.pt[1] - self.y) ** 2 /\n VIDEO_H ** 2)\n', (2181, 2270), False, 'from math import sqrt, sin, cos, pi, atan2, fmod\n'), ((2255, 2262), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (2259, 2262), False, 'from math import sqrt, sin, cos, pi, atan2, fmod\n'), ((2392, 2413), 'math.cos', 'cos', (['(pi * 2 * obj.hue)'], {}), '(pi * 2 * obj.hue)\n', (2395, 2413), False, 'from math import sqrt, sin, cos, pi, atan2, fmod\n'), ((2430, 2451), 'math.sin', 'sin', (['(pi * 2 * obj.hue)'], {}), '(pi * 2 * obj.hue)\n', (2433, 2451), False, 'from math import sqrt, sin, cos, pi, atan2, fmod\n'), ((2577, 2641), 'struct.pack', 'struct.pack', (['"""=BBBB"""', '(255)', 'self.bgr[2]', 'self.bgr[1]', 'self.bgr[0]'], {}), "('=BBBB', 255, self.bgr[2], self.bgr[1], self.bgr[0])\n", (2588, 2641), False, 'import sys, random, socket, struct\n'), ((2751, 2766), 'random.random', 'random.random', ([], {}), '()\n', (2764, 2766), False, 'import sys, random, socket, struct\n'), ((2768, 2783), 'random.random', 'random.random', ([], {}), '()\n', (2781, 2783), False, 'import sys, random, socket, struct\n'), ((888, 916), 'struct.unpack', 'struct.unpack', (['"""=BBff"""', 'data'], {}), "('=BBff', data)\n", (901, 916), False, 'import sys, random, socket, struct\n'), ((1082, 1112), 'struct.unpack', 'struct.unpack', (['"""=BBBBBB"""', 'data'], {}), "('=BBBBBB', data)\n", (1095, 1112), False, 'import sys, random, socket, struct\n'), ((1241, 1270), 'struct.unpack', 'struct.unpack', (['"""=BB16s"""', 'data'], {}), "('=BB16s', data)\n", (1254, 1270), False, 'import sys, random, socket, struct\n'), ((1758, 1781), 'math.atan2', 'atan2', (['self.hy', 'self.hx'], {}), '(self.hy, self.hx)\n', (1763, 1781), False, 'from math import sqrt, sin, cos, pi, atan2, fmod\n'), ((4262, 4298), 'numpy.uint8', 'np.uint8', (['[[im[kp.pt[1]][kp.pt[0]]]]'], {}), '([[im[kp.pt[1]][kp.pt[0]]]])\n', (4270, 4298), True, 'import numpy as np\n')] |
# vim: expandtab:ts=4:sw=4
import numpy as np
import scipy.linalg
import pdb
"""
Table for the 0.95 quantile of the chi-square distribution with N degrees of
freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv
function and used as Mahalanobis gating threshold.
"""
chi2inv95 = {
1: 3.8415,
2: 5.9915,
3: 7.8147,
4: 9.4877,
5: 11.070,
6: 12.592,
7: 14.067,
8: 15.507,
9: 16.919}
chi2inv90 = {
1: 2.706,
2: 4.605,
3: 6.251,
4: 7.779,
5: 9.236,
6: 10.645,
7: 12.017,
8: 13.363,
9: 14.684}
chi2inv975 = {
1: 5.025,
2: 7.378,
3: 9.348,
4: 11.143,
5: 12.833,
6: 14.449,
7: 16.013,
8: 17.535,
9: 19.023}
chi2inv10 = {
1: .016,
2: .221,
3: .584,
4: 1.064,
5: 1.610,
6: 2.204,
7: 2.833,
8: 3.490,
9: 4.168}
chi2inv995 = {
1: 0.0000393,
2: 0.0100,
3: .0717,
4: .207,
5: .412,
6: .676,
7: .989,
8: 1.344,
9: 1.735}
chi2inv75 = {
1: 1.323,
2: 2.773,
3: 4.108,
4: 5.385,
5: 6.626,
6: 7.841,
7: 9.037,
8: 10.22,
9: 11.39}
def squared_mahalanobis_distance(mean, covariance, measurements):
# cholesky factorization used to solve for
# z = d * inv(covariance)
# so z is also the solution to
# covariance * z = d
d = measurements - mean
# cholesky_factor = np.linalg.cholesky(covariance)
# z = scipy.linalg.solve_triangular(
# cholesky_factor, d.T, lower=True, check_finite=False,
# overwrite_b=True)
squared_maha = np.linalg.multi_dot([d, np.linalg.inv(covariance),
d.T]).diagonal()
return squared_maha
class EKF(object):
"""
Generic extended kalman filter class
"""
def __init__(self):
pass
def initiate(self, measurement):
"""Create track from unassociated measurement.
Parameters
----------
measurement : ndarray
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the new track.
Unobserved velocities are initialized to 0 mean.
"""
pass
def predict_mean(self, mean):
# Updates predicted state from previous state (function g)
# Calculates motion update Jacobian (Gt)
# Returns (g(mean), Gt)
pass
def get_process_noise(self, mean, covariance):
# Returns Rt the motion noise covariance
pass
def predict_covariance(self, mean, covariance):
pass
def project_mean(self, mean):
# Measurement prediction from state (function h)
# Calculations sensor update Jacobian (Ht)
# Returns (h(mean), Ht)
pass
def project_cov(self, mean, covariance):
pass
def predict(self, mean, covariance):
"""Run Kalman filter prediction step.
Parameters
----------
mean : ndarray
The mean vector of the object state at the previous
time step.
covariance : ndarray
The covariance matrix of the object state at the
previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted
state. Unobserved velocities are initialized to 0 mean.
"""
# Perform prediction
covariance = self.predict_covariance(mean, covariance)
mean = self.predict_mean(mean)
return mean, covariance
def get_innovation_cov(self, covariance):
pass
def project(self, mean, covariance):
"""Project state distribution to measurement space.
Parameters
----------
mean : ndarray
The state's mean vector
covariance : ndarray
The state's covariance matrix
Returns
-------
(ndarray, ndarray)
Returns the projected mean and covariance matrix of the given state
estimate.
"""
# Measurement uncertainty scaled by estimated height
return self.project_mean(mean), self.project_cov(mean, covariance)
def update(self, mean, covariance, measurement_t, marginalization=None, JPDA=False):
"""Run Kalman filter correction step.
Parameters
----------
mean : ndarray
The predicted state's mean vector (8 dimensional).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
measurement : ndarray
The 4 dimensional measurement vector (x, y, a, h), where (x, y)
is the center position, a the aspect ratio, and h the height of the
bounding box.
Returns
-------
(ndarray, ndarray)
Returns the measurement-corrected state distribution.
"""
predicted_measurement, innovation_cov = self.project(mean, covariance)
# cholesky factorization used to solve for kalman gain since
# K = covariance * update_mat.T * inv(innovation_cov)
# so K is also the solution to
# innovation_cov * K = covariance * update_mat.T
try:
chol_factor, lower = scipy.linalg.cho_factor(
innovation_cov, lower=True, check_finite=False)
kalman_gain = scipy.linalg.cho_solve(
(chol_factor, lower), np.dot(covariance, self._observation_mat.T).T,
check_finite=False).T
except:
# in case cholesky factorization fails, revert to standard solver
kalman_gain = np.linalg.solve(innovation_cov, np.dot(covariance, self._observation_mat.T).T).T
if JPDA:
# marginalization
innovation = np.zeros((self.ndim))
cov_soft = np.zeros((self.ndim, self.ndim))
for measurement_idx, measurement in enumerate(measurement_t):
p_ij = marginalization[measurement_idx + 1] # + 1 for dummy
y_ij = measurement - predicted_measurement
innovation += y_ij * p_ij
cov_soft += p_ij * np.outer(y_ij, y_ij)
cov_soft = cov_soft - np.outer(innovation, innovation)
P_star = covariance - np.linalg.multi_dot((
kalman_gain, innovation_cov, kalman_gain.T))
p_0 = marginalization[0]
P_0 = p_0 * covariance + (1 - p_0) * P_star
new_covariance = P_0 + np.linalg.multi_dot((kalman_gain, cov_soft, kalman_gain.T))
else:
innovation = measurement_t - predicted_measurement
new_covariance = covariance - np.linalg.multi_dot((
kalman_gain, innovation_cov, kalman_gain.T))
new_mean = mean + np.dot(innovation, kalman_gain.T)
return new_mean, new_covariance
| [
"numpy.linalg.multi_dot",
"numpy.dot",
"numpy.zeros",
"numpy.outer",
"numpy.linalg.inv"
] | [((5839, 5858), 'numpy.zeros', 'np.zeros', (['self.ndim'], {}), '(self.ndim)\n', (5847, 5858), True, 'import numpy as np\n'), ((5885, 5917), 'numpy.zeros', 'np.zeros', (['(self.ndim, self.ndim)'], {}), '((self.ndim, self.ndim))\n', (5893, 5917), True, 'import numpy as np\n'), ((6846, 6879), 'numpy.dot', 'np.dot', (['innovation', 'kalman_gain.T'], {}), '(innovation, kalman_gain.T)\n', (6852, 6879), True, 'import numpy as np\n'), ((6262, 6294), 'numpy.outer', 'np.outer', (['innovation', 'innovation'], {}), '(innovation, innovation)\n', (6270, 6294), True, 'import numpy as np\n'), ((6330, 6395), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['(kalman_gain, innovation_cov, kalman_gain.T)'], {}), '((kalman_gain, innovation_cov, kalman_gain.T))\n', (6349, 6395), True, 'import numpy as np\n'), ((6543, 6602), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['(kalman_gain, cov_soft, kalman_gain.T)'], {}), '((kalman_gain, cov_soft, kalman_gain.T))\n', (6562, 6602), True, 'import numpy as np\n'), ((6736, 6801), 'numpy.linalg.multi_dot', 'np.linalg.multi_dot', (['(kalman_gain, innovation_cov, kalman_gain.T)'], {}), '((kalman_gain, innovation_cov, kalman_gain.T))\n', (6755, 6801), True, 'import numpy as np\n'), ((1631, 1656), 'numpy.linalg.inv', 'np.linalg.inv', (['covariance'], {}), '(covariance)\n', (1644, 1656), True, 'import numpy as np\n'), ((6206, 6226), 'numpy.outer', 'np.outer', (['y_ij', 'y_ij'], {}), '(y_ij, y_ij)\n', (6214, 6226), True, 'import numpy as np\n'), ((5480, 5523), 'numpy.dot', 'np.dot', (['covariance', 'self._observation_mat.T'], {}), '(covariance, self._observation_mat.T)\n', (5486, 5523), True, 'import numpy as np\n'), ((5717, 5760), 'numpy.dot', 'np.dot', (['covariance', 'self._observation_mat.T'], {}), '(covariance, self._observation_mat.T)\n', (5723, 5760), True, 'import numpy as np\n')] |
from pathlib import Path
import torch
import pandas as pd
import numpy as np
def load_train_data(root: Path):
train_data_path = root / "train.csv"
train_data = pd.read_csv(train_data_path)
labels = train_data["label"].values
data = train_data.drop("label", axis=1).values.reshape(-1, 1, 28, 28)
train_x = torch.FloatTensor(data).expand(-1, 3, 28, 28)
train_y = torch.LongTensor(labels.tolist())
return train_x, train_y
def load_test_data(root: Path):
test_data_path = root / "test.csv"
test_data = pd.read_csv(test_data_path).values.reshape(-1, 1, 28, 28)
return torch.FloatTensor(test_data).expand(-1, 3, 28, 28)
def save_result(result):
np.savetxt(
"submission.csv",
np.dstack((np.arange(1, result.size + 1), result))[0],
"%d,%d",
header="ImageId,Label",
comments="",
)
| [
"torch.FloatTensor",
"pandas.read_csv",
"numpy.arange"
] | [((171, 199), 'pandas.read_csv', 'pd.read_csv', (['train_data_path'], {}), '(train_data_path)\n', (182, 199), True, 'import pandas as pd\n'), ((330, 353), 'torch.FloatTensor', 'torch.FloatTensor', (['data'], {}), '(data)\n', (347, 353), False, 'import torch\n'), ((611, 639), 'torch.FloatTensor', 'torch.FloatTensor', (['test_data'], {}), '(test_data)\n', (628, 639), False, 'import torch\n'), ((541, 568), 'pandas.read_csv', 'pd.read_csv', (['test_data_path'], {}), '(test_data_path)\n', (552, 568), True, 'import pandas as pd\n'), ((750, 779), 'numpy.arange', 'np.arange', (['(1)', '(result.size + 1)'], {}), '(1, result.size + 1)\n', (759, 779), True, 'import numpy as np\n')] |
import os
import re
import warnings
import numpy as np
from typing import Tuple, Dict, Generator, Optional, Union
# Custom types
Map_File_Type = Tuple[
np.ndarray, Union[Dict[str, np.ndarray], dict], np.ndarray, Optional[np.ndarray]
]
def load_3d_map_from_file(file_name: str) -> Map_File_Type:
"""map loader from file
given path to file, load 3D world map
Args:
file_name (str): Path to 3D world map data
Returns:
boundary (numpy.ndarray, shape=(6,)): physical limits of the world
obstacles Dict[str, (numpy.ndarray, shape=(6,))]: physical bounds of obstacles
start (numpy.ndarray, shape=3,)): start location
goal (numpy.ndarray, shape=(3,)): goal location
Raises:
FileNotFoundError: if file_name is not a valid file
NotImplementedError: if file format is not supported
"""
if not os.path.isfile(file_name):
raise FileNotFoundError("No such file found")
# Check format
file_ext = os.path.splitext(file_name)[-1]
if file_ext not in [".txt"]:
raise NotImplementedError("File format is not supported give .txt file")
return load_3d_map_from_txt(file_name)
def init_parse(f_name: str) -> Generator[Tuple[str, np.ndarray], None, None]:
"""initial text parser
given path to txt file, parse to tag, value pair
Args:
file_name (str): Path to .txt file
Yields:
tag (str): keyword tag
val (np.ndarray): the value associated with the tag
Raises:
"""
assert isinstance(f_name, str)
with open(f_name) as f:
for line in f.readlines():
line = line.strip("\n")
if not line or line[0] == "#":
continue
try:
tag, val = line.split(":")
val = np.array(re.split(" ,|,", val)).astype(float)
except ValueError:
raise SyntaxError("Invalid Syntax")
assert isinstance(tag, str)
assert isinstance(val, np.ndarray)
if tag not in {"boundary", "obstacle", "start", "goal"}:
raise SyntaxError("Invalid keyword")
if tag in {"boundary", "obstacle"}:
if len(val) != 6:
raise ValueError("Invalid Size")
else:
if len(val) != 3:
raise ValueError("Invalid Size")
yield tag, val
def load_3d_map_from_txt(f_name: str) -> Map_File_Type:
"""map loader from text file
given path to txt file, load 3D world map
Args:
file_name (str): Path to .txt file
Returns:
boundary (numpy.ndarray, shape=(6,)): physical limits of the world
obstacles (numpy.ndarray, shape=(6,)): physical bounds of obstacles
start (numpy.ndarray, shape=3,)): start location
goal (numpy.ndarray, shape=(3,)): goal location
Raises:
"""
obstacles = {}
unique_obstacles = set()
res = {}
for tag, val in init_parse(f_name):
if tag in {"start", "goal", "boundary"}:
if tag not in res:
res[tag] = val
else:
raise ValueError("repeating keyword")
else: # if tag == "obstacle":
obstacles[f"obstacle_{len(obstacles)}"] = val
obstacle = tuple(val)
if obstacle in unique_obstacles:
warnings.warn(f"Repeating obstacle {val}")
unique_obstacles.add(obstacle)
if "boundary" not in res:
raise KeyError("boundary not specified in the file")
if "start" not in res:
warnings.warn("start not given,assuming (0, 0, 0)")
start = np.zeros((3))
else:
start = res["start"]
if "goal" not in res:
warnings.warn("goal not given, assuming 'None'")
goal = None
else:
goal = res["goal"]
return res["boundary"], obstacles, start, goal
| [
"re.split",
"os.path.splitext",
"os.path.isfile",
"numpy.zeros",
"warnings.warn"
] | [((878, 903), 'os.path.isfile', 'os.path.isfile', (['file_name'], {}), '(file_name)\n', (892, 903), False, 'import os\n'), ((994, 1021), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (1010, 1021), False, 'import os\n'), ((3599, 3650), 'warnings.warn', 'warnings.warn', (['"""start not given,assuming (0, 0, 0)"""'], {}), "('start not given,assuming (0, 0, 0)')\n", (3612, 3650), False, 'import warnings\n'), ((3667, 3678), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3675, 3678), True, 'import numpy as np\n'), ((3755, 3803), 'warnings.warn', 'warnings.warn', (['"""goal not given, assuming \'None\'"""'], {}), '("goal not given, assuming \'None\'")\n', (3768, 3803), False, 'import warnings\n'), ((3385, 3427), 'warnings.warn', 'warnings.warn', (['f"""Repeating obstacle {val}"""'], {}), "(f'Repeating obstacle {val}')\n", (3398, 3427), False, 'import warnings\n'), ((1819, 1840), 're.split', 're.split', (['""" ,|,"""', 'val'], {}), "(' ,|,', val)\n", (1827, 1840), False, 'import re\n')] |
#!/usr/bin/env python
from time import time
import numpy as np
from metrics import Metrics
from model import Model
from utils import flatten
class TwoWayMetrics(Metrics):
# pylint: disable=too-many-instance-attributes
def __init__(self, epoch, n_low_level_fids):
self.first_gen_loss = []
self.first_gen_losses = {loss: [] for loss in Model.all_individual_losses}
self.first_disc_loss = []
self.first_disc_on_real = []
self.first_disc_on_generated = []
self.first_disc_on_training_mean = np.nan
self.first_disc_on_training_std = np.nan
self.first_disc_on_test_mean = np.nan
self.first_disc_on_test_std = np.nan
self.first_fid = np.nan
self.first_mmd = np.nan
self.first_clustering_high = np.nan
self.first_clustering_low = np.nan
self.first_low_level_fids = [np.nan] * n_low_level_fids
self.first_combined_fid = np.nan
self.second_gen_loss = []
self.second_gen_losses = {loss: [] for loss in Model.all_individual_losses}
self.second_disc_loss = []
self.second_disc_on_real = []
self.second_disc_on_generated = []
self.second_disc_on_training_mean = np.nan
self.second_disc_on_training_std = np.nan
self.second_disc_on_test_mean = np.nan
self.second_disc_on_test_std = np.nan
self.second_fid = np.nan
self.second_mmd = np.nan
self.second_clustering_high = np.nan
self.second_clustering_low = np.nan
self.second_low_level_fids = [np.nan] * n_low_level_fids
self.second_combined_fid = np.nan
super(TwoWayMetrics, self).__init__(epoch, n_low_level_fids)
@staticmethod
def get_column_names():
epoch_fields = ["epoch", "epoch_time"]
loss_fields = ["first_gen_loss_mean", "first_gen_loss_std", "first_disc_loss_mean", "first_disc_loss_std",
"second_gen_loss_mean", "second_gen_loss_std", "second_disc_loss_mean", "second_disc_loss_std"]
individual_loss_fields = flatten([["first_gen_{}_loss_mean".format(loss), "first_gen_{}_loss_std".format(loss)]
for loss in Model.all_individual_losses]) + \
flatten([["second_gen_{}_loss_mean".format(loss), "second_gen_{}_loss_std".format(loss)]
for loss in Model.all_individual_losses]) # order matters!
discrimination_fields = [
"first_disc_on_real_mean", "first_disc_on_real_std", "first_disc_on_generated_mean", "first_disc_on_generated_std",
"second_disc_on_real_mean", "second_disc_on_real_std", "second_disc_on_generated_mean", "second_disc_on_generated_std"
]
disc_overfitting_fields = [
"first_disc_on_training_mean", "first_disc_on_training_std", "first_disc_on_test_mean", "first_disc_on_test_std",
"second_disc_on_training_mean", "second_disc_on_training_std", "second_disc_on_test_mean", "second_disc_on_test_std"
]
perceptual_fields = ["first_fid", "first_mmd", "first_clustering_high", "first_clustering_low"] + \
["first_low_level_fid_{}".format(i+1) for i in range(4)] + ["first_combined_fid"] + \
["second_fid", "second_mmd", "second_clustering_high", "second_clustering_low"] + \
["second_low_level_fid_{}".format(i+1) for i in range(4)] + ["second_combined_fid"]
return epoch_fields + loss_fields + individual_loss_fields + discrimination_fields + disc_overfitting_fields + perceptual_fields
def add_losses(self, gen_losses, disc_loss):
first_gen_losses, second_gen_losses = gen_losses
first_disc_loss, second_disc_loss = disc_loss
self.first_gen_loss.append(sum(first_gen_losses.values()).numpy())
for loss in first_gen_losses:
self.first_gen_losses[loss].append(first_gen_losses[loss].numpy())
self.first_disc_loss.append(first_disc_loss.numpy())
self.second_gen_loss.append(sum(second_gen_losses.values()).numpy())
for loss in second_gen_losses:
self.second_gen_losses[loss].append(second_gen_losses[loss].numpy())
self.second_disc_loss.append(second_disc_loss.numpy())
def add_discriminations(self, disc_on_real, disc_on_generated):
first_disc_on_real, second_disc_on_real = disc_on_real
first_disc_on_generated, second_disc_on_generated = disc_on_generated
self.first_disc_on_real.extend(first_disc_on_real.numpy().reshape(-1))
self.first_disc_on_generated.extend(first_disc_on_generated.numpy().reshape(-1))
self.second_disc_on_real.extend(second_disc_on_real.numpy().reshape(-1))
self.second_disc_on_generated.extend(second_disc_on_generated.numpy().reshape(-1))
def add_perceptual_scores(self, fid, mmd, clustering_high, clustering_low, low_level_fids, combined_fid):
first_fid, second_fid = fid
first_mmd, second_mmd = mmd
first_clustering_high, second_clustering_high = clustering_high
first_clustering_low, second_clustering_low = clustering_low
first_low_level_fids, second_low_level_fids = low_level_fids
first_combined_fid, second_combined_fid = combined_fid
self.first_fid = first_fid.numpy()
self.first_mmd = first_mmd.numpy()
self.first_clustering_high = first_clustering_high
self.first_clustering_low = first_clustering_low
for i in range(self.n_low_level_fids):
self.first_low_level_fids[i] = first_low_level_fids[i].numpy()
self.first_combined_fid = first_combined_fid.numpy()
self.second_fid = second_fid.numpy()
self.second_mmd = second_mmd.numpy()
self.second_clustering_high = second_clustering_high
self.second_clustering_low = second_clustering_low
for i in range(self.n_low_level_fids):
self.second_low_level_fids[i] = second_low_level_fids[i].numpy()
self.second_combined_fid = second_combined_fid.numpy()
def add_disc_on_training_test(self, disc_on_training_mean, disc_on_training_std, disc_on_test_mean, disc_on_test_std):
first_disc_on_training_mean, second_disc_on_training_mean = disc_on_training_mean
first_disc_on_training_std, second_disc_on_training_std = disc_on_training_std
first_disc_on_test_mean, second_disc_on_test_mean = disc_on_test_mean
first_disc_on_test_std, second_disc_on_test_std = disc_on_test_std
self.first_disc_on_training_mean = first_disc_on_training_mean
self.first_disc_on_training_std = first_disc_on_training_std
self.first_disc_on_test_mean = first_disc_on_test_mean
self.first_disc_on_test_std = first_disc_on_test_std
self.second_disc_on_training_mean = second_disc_on_training_mean
self.second_disc_on_training_std = second_disc_on_training_std
self.second_disc_on_test_mean = second_disc_on_test_mean
self.second_disc_on_test_std = second_disc_on_test_std
def get_row_data(self):
raw_data = [self.first_gen_loss, self.first_disc_loss, self.second_gen_loss, self.second_disc_loss] + \
[self.first_gen_losses[loss] for loss in Model.all_individual_losses] + \
[self.second_gen_losses[loss] for loss in Model.all_individual_losses] + \
[self.first_disc_on_real, self.first_disc_on_generated] + \
[self.second_disc_on_real, self.second_disc_on_generated] # order matters (individual losses)
processed_data = [item for sublist in \
[[np.mean(values) if values else np.nan, np.std(values) if values else np.nan] for values in raw_data] \
for item in sublist]
disc_on_training_test = [
self.first_disc_on_training_mean, self.first_disc_on_training_std, self.first_disc_on_test_mean, self.first_disc_on_test_std,
self.second_disc_on_training_mean, self.second_disc_on_training_std, self.second_disc_on_test_mean, self.second_disc_on_test_std
]
perceptual_scores = [
self.first_fid, self.first_mmd, self.first_clustering_high, self.first_clustering_low] + \
self.first_low_level_fids + [self.first_combined_fid,
self.second_fid, self.second_mmd, self.second_clustering_high, self.second_clustering_low] + \
self.second_low_level_fids + [self.second_combined_fid
]
return [self.epoch, time()-self.start_time] + processed_data + disc_on_training_test + perceptual_scores
| [
"numpy.mean",
"time.time",
"numpy.std"
] | [((7106, 7121), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (7113, 7121), True, 'import numpy as np\n'), ((7145, 7159), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (7151, 7159), True, 'import numpy as np\n'), ((7948, 7954), 'time.time', 'time', ([], {}), '()\n', (7952, 7954), False, 'from time import time\n')] |
import pandas as pd
import numpy as np
import argparse
from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, \
annotate, \
element_blank, element_text, scale_x_discrete,geom_errorbar,position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, \
facet_wrap, geom_boxplot, geom_hline
import sys
sys.path.insert(0, './')
# parameters
parser = argparse.ArgumentParser(description='Effect_of_sigma_smooth_figure')
parser.add_argument('--comparison', action='store_true', help='Results on all datasets or on one')
parser.add_argument('--dataset', default='CIFAR10', type=str, help='Dataset to be used if results are only on one')
args = parser.parse_args()
# check parameters
assert args.dataset == 'CIFAR10' or args.dataset == 'CIFAR100' or args.dataset == 'ImageNet', 'Dataset can only be CIFAR10, CIFAR100 or ImageNet.'
alpha = 0.1
epsilon = 0.125 # L2 bound on the adversarial noise
n_smooth = 256
dataset = args.dataset # dataset to be used 'MNIST', 'CIFAR100', 'CIFAR10', 'ImageNet'
Regularization = False
comparison = args.comparison
base_size = 18
line_size = 1.5
error_bar = 0.25
if comparison:
datasets = ["CIFAR10", "CIFAR100", "ImageNet"]
else:
datasets = [dataset]
for k, dataset in enumerate(datasets):
if dataset == "CIFAR100":
My_model = True
normalized = True
ratios = np.array([0.5, 1, 2, 3, 4, 6, 8])
else:
My_model = False
normalized = False
if dataset == "CIFAR10":
ratios = np.array([0.5, 1, 2, 4, 8])
else:
epsilon = 0.25
n_smooth = 64
ratios = np.array([1, 2, 4])
sigma_smooths = ratios * epsilon
Coverages_mean = np.zeros((2, np.size(sigma_smooths)))
Coverages_std = np.zeros((2, np.size(sigma_smooths)))
Sizes_mean = np.zeros((2, np.size(sigma_smooths)))
Sizes_std = np.zeros((2, np.size(sigma_smooths)))
for j, sigma_smooth in enumerate(sigma_smooths):
sigma_model = sigma_smooth
directory = "./Results/" + str(dataset) + "/epsilon_" + str(epsilon) + "/sigma_model_" + str(
sigma_model) + "/sigma_smooth_" + str(sigma_smooth) + "/n_smooth_" + str(n_smooth)
if normalized:
directory = directory + "/Robust"
if dataset == "CIFAR10":
if My_model:
directory = directory + "/My_Model"
else:
directory = directory + "/Their_Model"
if Regularization:
directory = directory + "/Regularization"
path = directory + "/results.csv"
results = pd.read_csv(path)
results = results.loc[:, ~results.columns.str.contains('^Unnamed')]
results = results.drop(columns=['Black box', 'Conditional coverage', 'Size cover'])
results1 = results[(results["Method"] == "SC_smoothed_score_correction")]
data1 = results1[results1["noise_L2_norm"] == epsilon].copy()
Coverages_mean[0, j] = data1['Coverage'].mean()
Coverages_std[0, j] = data1['Coverage'].sem()
Sizes_mean[0, j] = data1['Size'].mean()
Sizes_std[0, j] = data1['Size'].sem()
results2 = results[(results["Method"] == "HCC_smoothed_score_correction")]
data2 = results2[results2["noise_L2_norm"] == epsilon].copy()
Coverages_mean[1, j] = data2['Coverage'].mean()
Coverages_std[1, j] = data2['Coverage'].sem()
Sizes_mean[1, j] = data2['Size'].mean()
Sizes_std[1, j] = data2['Size'].sem()
df1 = pd.DataFrame(
{'Dataset': dataset, 'ratio': 1/ratios, 'Coverage': Coverages_mean[0, :], 'Coverage_STD': Coverages_std[0, :],
'Size': Sizes_mean[0, :], 'Size_STD': Sizes_std[0, :], 'Base Score': 'APS'})
df2 = pd.DataFrame(
{'Dataset': dataset, 'ratio': 1/ratios, 'Coverage': Coverages_mean[1, :], 'Coverage_STD': Coverages_std[1, :],
'Size': Sizes_mean[1, :], 'Size_STD': Sizes_std[1, :], 'Base Score': 'HPS'})
df1=df1.append(df2)
if k == 0:
final = df1
else:
final = final.append(df1)
if comparison:
p = ggplot(final,
aes(x="ratio", y="Coverage", color='Base Score')) \
+ geom_line(size=line_size) \
+ facet_wrap('~ Dataset', scales="free", nrow=1) \
+ labs(x=r'$M_{\delta}=\delta/\sigma$', y="Marginal Coverage", title="") \
+ theme_bw(base_size=base_size) \
+ theme(panel_grid_minor=element_blank(),
panel_grid_major=element_line(size=0.2, colour="#d3d3d3"),
plot_title=element_text(face="bold"),
legend_background=element_rect(fill="None", size=4, colour="white"),
text=element_text(size=base_size,face="plain"),
legend_title_align='center',
legend_direction='horizontal',
legend_entry_spacing=10,
subplots_adjust={'wspace': 0.25},
legend_position="none")\
+ scale_x_continuous(breaks=(1/8, 1/6, 1/4, 1/3, 1/2, 1, 2), trans='log2', labels=(r'$\frac{1}{8}$', r'$\frac{1}{6}$', r'$\frac{1}{4}$', r'$\frac{1}{3}$', r'$\frac{1}{2}$', r'$1$', r'$2$')) \
+ geom_errorbar(aes(ymin="Coverage-Coverage_STD", ymax="Coverage+Coverage_STD"), width=error_bar) \
+ geom_point(size=2*line_size)
p.save('./Create_Figures/Figures/Effect_of_sigma_smooth_coverage_comparison.pdf', width=15, height=4.8)
p = ggplot(final,
aes(x="ratio", y="Size", color='Base Score')) \
+ geom_line(size=line_size) \
+ facet_wrap('~ Dataset', scales="free", nrow=1) \
+ labs(x=r'$M_{\delta}=\delta/\sigma$', y="Average Set Size", title="") \
+ theme_bw(base_size=base_size) \
+ theme(panel_grid_minor=element_blank(),
panel_grid_major=element_line(size=0.2, colour="#d3d3d3"),
plot_title=element_text(face="bold"),
legend_background=element_rect(fill="None", size=4, colour="white"),
text=element_text(size= base_size,face="plain"),
legend_title_align='center',
legend_direction='horizontal',
legend_entry_spacing=10,
subplots_adjust={'wspace': 0.25},
legend_position=(0.5, -0.15))\
+ scale_x_continuous(breaks=(1/8, 1/6, 1/4, 1/3, 1/2, 1, 2), trans='log2', labels=(r'$\frac{1}{8}$', r'$\frac{1}{6}$', r'$\frac{1}{4}$', r'$\frac{1}{3}$', r'$\frac{1}{2}$', r'$1$', r'$2$')) \
+ geom_errorbar(aes(ymin="Size-Size_STD", ymax="Size+Size_STD"), width=error_bar) \
+ geom_point(size=2*line_size)
p.save('./Create_Figures/Figures/Effect_of_sigma_smooth_size_comparison.pdf', width=15, height=4.8)
else:
p = ggplot(final,
aes(x="ratio", y="Coverage", color='Base Score')) \
+ geom_line(size=line_size) \
+ labs(x=r'$M_{\delta}=\delta/\sigma$', y="Marginal Coverage", title="") \
+ theme_bw(base_size=base_size) \
+ theme(panel_grid_minor=element_blank(),
panel_grid_major=element_line(size=0.2, colour="#d3d3d3"),
plot_title=element_text(face="bold"),
legend_background=element_rect(fill="None", size=4, colour="None"),
text=element_text(size=base_size,face="plain"),
legend_title_align='center',
legend_direction='horizontal',
legend_entry_spacing=10,
axis_title_x=element_text(margin={'t': 21}),
legend_position=(0.6, 0.6)) \
+ scale_x_continuous(breaks=(1/8, 1/6, 1/4, 1/3, 1/2, 1, 2), trans='log2', labels=('1/8', '1/6', '1/4', '1/3', '1/2', '1', '2')) \
+ geom_errorbar(aes(ymin="Coverage-Coverage_STD", ymax="Coverage+Coverage_STD"), width=error_bar) \
+ geom_point(size=2*line_size)
p.save('./Create_Figures/Figures/Effect_of_sigma_smooth_coverage_' + str(dataset) + '.pdf')
p = ggplot(final,
aes(x="ratio", y="Size", color='Base Score')) \
+ geom_line(size=line_size) \
+ labs(x=r'$M_{\delta}=\delta/\sigma$', y="Average Set Size", title="") \
+ theme_bw(base_size=base_size) \
+ theme(panel_grid_minor=element_blank(),
panel_grid_major=element_line(size=0.2, colour="#d3d3d3"),
plot_title=element_text(face="bold"),
legend_background=element_rect(fill="None", size=4, colour="None"),
text=element_text(size=base_size,face="plain"),
legend_title_align='center',
legend_direction='horizontal',
legend_entry_spacing=10,
axis_title_x=element_text(margin={'t': 21}),
legend_position=(0.45, 0.75)) \
+ scale_x_continuous(breaks=(1/8, 1/6, 1/4, 1/3, 1/2, 1, 2), trans='log2', labels=('1/8', '1/6', '1/4', '1/3', '1/2', '1', '2')) \
+ geom_errorbar(aes(ymin="Size-Size_STD", ymax="Size+Size_STD"), width=error_bar) \
+ geom_point(size=2*line_size)
p.save('./Create_Figures/Figures/Effect_of_sigma_smooth_size_' + str(dataset) + '.pdf') | [
"plotnine.element_blank",
"plotnine.element_line",
"sys.path.insert",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.size",
"plotnine.theme_bw",
"plotnine.geom_line",
"plotnine.aes",
"plotnine.element_text",
"plotnine.facet_wrap",
"numpy.array",
"plotnine.scale_x_continuous",
"plotni... | [((387, 411), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./"""'], {}), "(0, './')\n", (402, 411), False, 'import sys\n'), ((435, 503), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Effect_of_sigma_smooth_figure"""'}), "(description='Effect_of_sigma_smooth_figure')\n", (458, 503), False, 'import argparse\n'), ((3564, 3774), 'pandas.DataFrame', 'pd.DataFrame', (["{'Dataset': dataset, 'ratio': 1 / ratios, 'Coverage': Coverages_mean[0, :],\n 'Coverage_STD': Coverages_std[0, :], 'Size': Sizes_mean[0, :],\n 'Size_STD': Sizes_std[0, :], 'Base Score': 'APS'}"], {}), "({'Dataset': dataset, 'ratio': 1 / ratios, 'Coverage':\n Coverages_mean[0, :], 'Coverage_STD': Coverages_std[0, :], 'Size':\n Sizes_mean[0, :], 'Size_STD': Sizes_std[0, :], 'Base Score': 'APS'})\n", (3576, 3774), True, 'import pandas as pd\n'), ((3793, 4003), 'pandas.DataFrame', 'pd.DataFrame', (["{'Dataset': dataset, 'ratio': 1 / ratios, 'Coverage': Coverages_mean[1, :],\n 'Coverage_STD': Coverages_std[1, :], 'Size': Sizes_mean[1, :],\n 'Size_STD': Sizes_std[1, :], 'Base Score': 'HPS'}"], {}), "({'Dataset': dataset, 'ratio': 1 / ratios, 'Coverage':\n Coverages_mean[1, :], 'Coverage_STD': Coverages_std[1, :], 'Size':\n Sizes_mean[1, :], 'Size_STD': Sizes_std[1, :], 'Base Score': 'HPS'})\n", (3805, 4003), True, 'import pandas as pd\n'), ((1417, 1450), 'numpy.array', 'np.array', (['[0.5, 1, 2, 3, 4, 6, 8]'], {}), '([0.5, 1, 2, 3, 4, 6, 8])\n', (1425, 1450), True, 'import numpy as np\n'), ((2652, 2669), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (2663, 2669), True, 'import pandas as pd\n'), ((5314, 5344), 'plotnine.geom_point', 'geom_point', ([], {'size': '(2 * line_size)'}), '(size=2 * line_size)\n', (5324, 5344), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((6620, 6650), 'plotnine.geom_point', 'geom_point', ([], {'size': '(2 * line_size)'}), '(size=2 * line_size)\n', (6630, 6650), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((7837, 7867), 'plotnine.geom_point', 'geom_point', ([], {'size': '(2 * line_size)'}), '(size=2 * line_size)\n', (7847, 7867), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((9021, 9051), 'plotnine.geom_point', 'geom_point', ([], {'size': '(2 * line_size)'}), '(size=2 * line_size)\n', (9031, 9051), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((1567, 1594), 'numpy.array', 'np.array', (['[0.5, 1, 2, 4, 8]'], {}), '([0.5, 1, 2, 4, 8])\n', (1575, 1594), True, 'import numpy as np\n'), ((1683, 1702), 'numpy.array', 'np.array', (['[1, 2, 4]'], {}), '([1, 2, 4])\n', (1691, 1702), True, 'import numpy as np\n'), ((1775, 1797), 'numpy.size', 'np.size', (['sigma_smooths'], {}), '(sigma_smooths)\n', (1782, 1797), True, 'import numpy as np\n'), ((1833, 1855), 'numpy.size', 'np.size', (['sigma_smooths'], {}), '(sigma_smooths)\n', (1840, 1855), True, 'import numpy as np\n'), ((1888, 1910), 'numpy.size', 'np.size', (['sigma_smooths'], {}), '(sigma_smooths)\n', (1895, 1910), True, 'import numpy as np\n'), ((1942, 1964), 'numpy.size', 'np.size', (['sigma_smooths'], {}), '(sigma_smooths)\n', (1949, 1964), True, 'import numpy as np\n'), ((5006, 5210), 'plotnine.scale_x_continuous', 'scale_x_continuous', ([], {'breaks': '(1 / 8, 1 / 6, 1 / 4, 1 / 3, 1 / 2, 1, 2)', 'trans': '"""log2"""', 'labels': "('$\\\\frac{1}{8}$', '$\\\\frac{1}{6}$', '$\\\\frac{1}{4}$', '$\\\\frac{1}{3}$',\n '$\\\\frac{1}{2}$', '$1$', '$2$')"}), "(breaks=(1 / 8, 1 / 6, 1 / 4, 1 / 3, 1 / 2, 1, 2), trans=\n 'log2', labels=('$\\\\frac{1}{8}$', '$\\\\frac{1}{6}$', '$\\\\frac{1}{4}$',\n '$\\\\frac{1}{3}$', '$\\\\frac{1}{2}$', '$1$', '$2$'))\n", (5024, 5210), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((5220, 5283), 'plotnine.aes', 'aes', ([], {'ymin': '"""Coverage-Coverage_STD"""', 'ymax': '"""Coverage+Coverage_STD"""'}), "(ymin='Coverage-Coverage_STD', ymax='Coverage+Coverage_STD')\n", (5223, 5283), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((6328, 6532), 'plotnine.scale_x_continuous', 'scale_x_continuous', ([], {'breaks': '(1 / 8, 1 / 6, 1 / 4, 1 / 3, 1 / 2, 1, 2)', 'trans': '"""log2"""', 'labels': "('$\\\\frac{1}{8}$', '$\\\\frac{1}{6}$', '$\\\\frac{1}{4}$', '$\\\\frac{1}{3}$',\n '$\\\\frac{1}{2}$', '$1$', '$2$')"}), "(breaks=(1 / 8, 1 / 6, 1 / 4, 1 / 3, 1 / 2, 1, 2), trans=\n 'log2', labels=('$\\\\frac{1}{8}$', '$\\\\frac{1}{6}$', '$\\\\frac{1}{4}$',\n '$\\\\frac{1}{3}$', '$\\\\frac{1}{2}$', '$1$', '$2$'))\n", (6346, 6532), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((6542, 6589), 'plotnine.aes', 'aes', ([], {'ymin': '"""Size-Size_STD"""', 'ymax': '"""Size+Size_STD"""'}), "(ymin='Size-Size_STD', ymax='Size+Size_STD')\n", (6545, 6589), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((7590, 7731), 'plotnine.scale_x_continuous', 'scale_x_continuous', ([], {'breaks': '(1 / 8, 1 / 6, 1 / 4, 1 / 3, 1 / 2, 1, 2)', 'trans': '"""log2"""', 'labels': "('1/8', '1/6', '1/4', '1/3', '1/2', '1', '2')"}), "(breaks=(1 / 8, 1 / 6, 1 / 4, 1 / 3, 1 / 2, 1, 2), trans=\n 'log2', labels=('1/8', '1/6', '1/4', '1/3', '1/2', '1', '2'))\n", (7608, 7731), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((7743, 7806), 'plotnine.aes', 'aes', ([], {'ymin': '"""Coverage-Coverage_STD"""', 'ymax': '"""Coverage+Coverage_STD"""'}), "(ymin='Coverage-Coverage_STD', ymax='Coverage+Coverage_STD')\n", (7746, 7806), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((8790, 8931), 'plotnine.scale_x_continuous', 'scale_x_continuous', ([], {'breaks': '(1 / 8, 1 / 6, 1 / 4, 1 / 3, 1 / 2, 1, 2)', 'trans': '"""log2"""', 'labels': "('1/8', '1/6', '1/4', '1/3', '1/2', '1', '2')"}), "(breaks=(1 / 8, 1 / 6, 1 / 4, 1 / 3, 1 / 2, 1, 2), trans=\n 'log2', labels=('1/8', '1/6', '1/4', '1/3', '1/2', '1', '2'))\n", (8808, 8931), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((8943, 8990), 'plotnine.aes', 'aes', ([], {'ymin': '"""Size-Size_STD"""', 'ymax': '"""Size+Size_STD"""'}), "(ymin='Size-Size_STD', ymax='Size+Size_STD')\n", (8946, 8990), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((4412, 4441), 'plotnine.theme_bw', 'theme_bw', ([], {'base_size': 'base_size'}), '(base_size=base_size)\n', (4420, 4441), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((5727, 5756), 'plotnine.theme_bw', 'theme_bw', ([], {'base_size': 'base_size'}), '(base_size=base_size)\n', (5735, 5756), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((6981, 7010), 'plotnine.theme_bw', 'theme_bw', ([], {'base_size': 'base_size'}), '(base_size=base_size)\n', (6989, 7010), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((8179, 8208), 'plotnine.theme_bw', 'theme_bw', ([], {'base_size': 'base_size'}), '(base_size=base_size)\n', (8187, 8208), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((4329, 4401), 'plotnine.labs', 'labs', ([], {'x': '"""$M_{\\\\delta}=\\\\delta/\\\\sigma$"""', 'y': '"""Marginal Coverage"""', 'title': '""""""'}), "(x='$M_{\\\\delta}=\\\\delta/\\\\sigma$', y='Marginal Coverage', title='')\n", (4333, 4401), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((4477, 4492), 'plotnine.element_blank', 'element_blank', ([], {}), '()\n', (4490, 4492), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((4527, 4567), 'plotnine.element_line', 'element_line', ([], {'size': '(0.2)', 'colour': '"""#d3d3d3"""'}), "(size=0.2, colour='#d3d3d3')\n", (4539, 4567), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((4596, 4621), 'plotnine.element_text', 'element_text', ([], {'face': '"""bold"""'}), "(face='bold')\n", (4608, 4621), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((4657, 4706), 'plotnine.element_rect', 'element_rect', ([], {'fill': '"""None"""', 'size': '(4)', 'colour': '"""white"""'}), "(fill='None', size=4, colour='white')\n", (4669, 4706), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((4729, 4771), 'plotnine.element_text', 'element_text', ([], {'size': 'base_size', 'face': '"""plain"""'}), "(size=base_size, face='plain')\n", (4741, 4771), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((5645, 5716), 'plotnine.labs', 'labs', ([], {'x': '"""$M_{\\\\delta}=\\\\delta/\\\\sigma$"""', 'y': '"""Average Set Size"""', 'title': '""""""'}), "(x='$M_{\\\\delta}=\\\\delta/\\\\sigma$', y='Average Set Size', title='')\n", (5649, 5716), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((5792, 5807), 'plotnine.element_blank', 'element_blank', ([], {}), '()\n', (5805, 5807), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((5842, 5882), 'plotnine.element_line', 'element_line', ([], {'size': '(0.2)', 'colour': '"""#d3d3d3"""'}), "(size=0.2, colour='#d3d3d3')\n", (5854, 5882), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((5911, 5936), 'plotnine.element_text', 'element_text', ([], {'face': '"""bold"""'}), "(face='bold')\n", (5923, 5936), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((5972, 6021), 'plotnine.element_rect', 'element_rect', ([], {'fill': '"""None"""', 'size': '(4)', 'colour': '"""white"""'}), "(fill='None', size=4, colour='white')\n", (5984, 6021), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((6044, 6086), 'plotnine.element_text', 'element_text', ([], {'size': 'base_size', 'face': '"""plain"""'}), "(size=base_size, face='plain')\n", (6056, 6086), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((6898, 6970), 'plotnine.labs', 'labs', ([], {'x': '"""$M_{\\\\delta}=\\\\delta/\\\\sigma$"""', 'y': '"""Marginal Coverage"""', 'title': '""""""'}), "(x='$M_{\\\\delta}=\\\\delta/\\\\sigma$', y='Marginal Coverage', title='')\n", (6902, 6970), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((7046, 7061), 'plotnine.element_blank', 'element_blank', ([], {}), '()\n', (7059, 7061), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((7096, 7136), 'plotnine.element_line', 'element_line', ([], {'size': '(0.2)', 'colour': '"""#d3d3d3"""'}), "(size=0.2, colour='#d3d3d3')\n", (7108, 7136), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((7165, 7190), 'plotnine.element_text', 'element_text', ([], {'face': '"""bold"""'}), "(face='bold')\n", (7177, 7190), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((7226, 7274), 'plotnine.element_rect', 'element_rect', ([], {'fill': '"""None"""', 'size': '(4)', 'colour': '"""None"""'}), "(fill='None', size=4, colour='None')\n", (7238, 7274), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((7297, 7339), 'plotnine.element_text', 'element_text', ([], {'size': 'base_size', 'face': '"""plain"""'}), "(size=base_size, face='plain')\n", (7309, 7339), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((7502, 7532), 'plotnine.element_text', 'element_text', ([], {'margin': "{'t': 21}"}), "(margin={'t': 21})\n", (7514, 7532), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((8097, 8168), 'plotnine.labs', 'labs', ([], {'x': '"""$M_{\\\\delta}=\\\\delta/\\\\sigma$"""', 'y': '"""Average Set Size"""', 'title': '""""""'}), "(x='$M_{\\\\delta}=\\\\delta/\\\\sigma$', y='Average Set Size', title='')\n", (8101, 8168), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((8244, 8259), 'plotnine.element_blank', 'element_blank', ([], {}), '()\n', (8257, 8259), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((8294, 8334), 'plotnine.element_line', 'element_line', ([], {'size': '(0.2)', 'colour': '"""#d3d3d3"""'}), "(size=0.2, colour='#d3d3d3')\n", (8306, 8334), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((8363, 8388), 'plotnine.element_text', 'element_text', ([], {'face': '"""bold"""'}), "(face='bold')\n", (8375, 8388), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((8424, 8472), 'plotnine.element_rect', 'element_rect', ([], {'fill': '"""None"""', 'size': '(4)', 'colour': '"""None"""'}), "(fill='None', size=4, colour='None')\n", (8436, 8472), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((8495, 8537), 'plotnine.element_text', 'element_text', ([], {'size': 'base_size', 'face': '"""plain"""'}), "(size=base_size, face='plain')\n", (8507, 8537), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((8700, 8730), 'plotnine.element_text', 'element_text', ([], {'margin': "{'t': 21}"}), "(margin={'t': 21})\n", (8712, 8730), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((4270, 4316), 'plotnine.facet_wrap', 'facet_wrap', (['"""~ Dataset"""'], {'scales': '"""free"""', 'nrow': '(1)'}), "('~ Dataset', scales='free', nrow=1)\n", (4280, 4316), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((5586, 5632), 'plotnine.facet_wrap', 'facet_wrap', (['"""~ Dataset"""'], {'scales': '"""free"""', 'nrow': '(1)'}), "('~ Dataset', scales='free', nrow=1)\n", (5596, 5632), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((6860, 6885), 'plotnine.geom_line', 'geom_line', ([], {'size': 'line_size'}), '(size=line_size)\n', (6869, 6885), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((8059, 8084), 'plotnine.geom_line', 'geom_line', ([], {'size': 'line_size'}), '(size=line_size)\n', (8068, 8084), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((4232, 4257), 'plotnine.geom_line', 'geom_line', ([], {'size': 'line_size'}), '(size=line_size)\n', (4241, 4257), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((5548, 5573), 'plotnine.geom_line', 'geom_line', ([], {'size': 'line_size'}), '(size=line_size)\n', (5557, 5573), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((6798, 6846), 'plotnine.aes', 'aes', ([], {'x': '"""ratio"""', 'y': '"""Coverage"""', 'color': '"""Base Score"""'}), "(x='ratio', y='Coverage', color='Base Score')\n", (6801, 6846), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((8001, 8045), 'plotnine.aes', 'aes', ([], {'x': '"""ratio"""', 'y': '"""Size"""', 'color': '"""Base Score"""'}), "(x='ratio', y='Size', color='Base Score')\n", (8004, 8045), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((4170, 4218), 'plotnine.aes', 'aes', ([], {'x': '"""ratio"""', 'y': '"""Coverage"""', 'color': '"""Base Score"""'}), "(x='ratio', y='Coverage', color='Base Score')\n", (4173, 4218), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n'), ((5490, 5534), 'plotnine.aes', 'aes', ([], {'x': '"""ratio"""', 'y': '"""Size"""', 'color': '"""Base Score"""'}), "(x='ratio', y='Size', color='Base Score')\n", (5493, 5534), False, 'from plotnine import ggplot, scale_x_continuous, theme_bw, element_rect, element_line, geom_line, scale_color_brewer, annotate, element_blank, element_text, scale_x_discrete, geom_errorbar, position_dodge, scale_y_continuous, aes, theme, facet_grid, labs, geom_point, facet_wrap, geom_boxplot, geom_hline\n')] |
import numpy as np
import logging
max_seed = 2147483647
#logging.basicConfig(level="DEBUG")
logger = logging.getLogger("TUMOR2D")
#logger.setLevel("ERROR")
class Tumor2dExperiment:
def __init__(self, mean_gc, mean_ecm, mean_prolif, std_gc, std_ecm, std_prolif, full_data_gc=None,
full_data_ecm=None, full_data_prolif=None):
self.full_data_gc = full_data_gc
self.full_data_ecm = full_data_ecm
self.full_data_prolif = full_data_prolif
self.mean_gc = mean_gc
self.mean_ecm = mean_ecm
self.mean_prolif = mean_prolif
self.std_gc = std_gc
self.std_ecm = std_ecm
self.std_prolif = std_prolif
def compare_with_simulation(self, sim):
score_gc = 0
score_ecm = 0
score_prolif = 0
return 0
class Tumor2dSimulation:
def __init__(self, val_gc, val_ecm, val_prolif):
self.growth_curve = val_gc
self.extra_cellular_matrix = val_ecm
self.proliferation = val_prolif
def tumor2d_simulate(initial_radius=12.0, initial_quiescent_fraction=0.75, max_celldivision_rate=0.0417,
division_depth=100, ecm_threshold_quiescence=0.010, emc_productionrate=0.005,
ecm_degradationrate=0.0008, endtime=500, outputrate=24, profiletime=408, profiledepth=1000,
randseed=None):
"""
Tumor2d simulation.
*Not* according to the published paper.
Parameters
----------
"""
# don't load at module level due to memory leaks in the original code
import tumor2d.src.nixTumor2d as nixTumor2d
if randseed is None:
raise Exception("Randseed necessary.")
profiletime /= 24
pars = str(locals())
logger.debug(f"START:{pars}")
growth_curve, ecm_prof, prolif_prof = nixTumor2d.tumor2d_interface(initial_radius, initial_quiescent_fraction,
max_celldivision_rate, division_depth,
ecm_threshold_quiescence,
emc_productionrate, ecm_degradationrate,
endtime, outputrate, profiletime, profiledepth,
randseed)
logger.debug(f"DONE:{pars}")
result = Tumor2dSimulation(growth_curve, ecm_prof, prolif_prof)
return result
def tumor2d_statistic(num_reps=10, initial_radius=12.0, initial_quiescent_fraction=0.75, max_celldivision_rate=0.0417,
division_depth=100, ecm_threshold_quiescence=0.010, emc_productionrate=0.005,
ecm_degradationrate=0.0008, endtime=500, outputrate=24, profiletime=408, profiledepth=1000,
randseed=np.nan):
if np.isnan(randseed):
randseed = np.random.randint(max_seed)
np.random.seed(randseed)
full_data_gc = np.empty([num_reps, np.int(np.floor(endtime / outputrate))])
full_data_ecm = np.empty([num_reps, profiledepth])
full_data_prolif = np.empty([num_reps, profiledepth])
for i in range(num_reps):
seed_simu = np.random.randint(max_seed)
simu = tumor2d_simulate(initial_radius=initial_radius, initial_quiescent_fraction=initial_quiescent_fraction,
max_celldivision_rate=max_celldivision_rate,
division_depth=division_depth,
ecm_threshold_quiescence=ecm_threshold_quiescence,
emc_productionrate=emc_productionrate, ecm_degradationrate=ecm_degradationrate,
endtime=endtime, outputrate=outputrate, profiletime=profiletime,
profiledepth=profiledepth, randseed=seed_simu)
full_data_gc[i] = simu.growth_curve
full_data_ecm[i] = simu.extra_cellular_matrix
full_data_prolif[i] = simu.proliferation
mean_gc = np.mean(full_data_gc, axis=0)
mean_ecm = np.mean(full_data_ecm, axis=0)
mean_prolif = np.mean(full_data_prolif, axis=0)
std_gc = np.max(np.std(full_data_gc, axis=0))
std_ecm = np.max(np.std(full_data_ecm, axis=0))
std_prolif = np.max(np.std(full_data_prolif, axis=0))
result = Tumor2dExperiment(mean_gc, mean_ecm, mean_prolif, std_gc, std_ecm, std_prolif, full_data_gc, full_data_ecm,
full_data_prolif)
return result
| [
"logging.getLogger",
"numpy.mean",
"numpy.floor",
"numpy.random.randint",
"numpy.empty",
"numpy.random.seed",
"numpy.isnan",
"numpy.std",
"tumor2d.src.nixTumor2d.tumor2d_interface"
] | [((103, 131), 'logging.getLogger', 'logging.getLogger', (['"""TUMOR2D"""'], {}), "('TUMOR2D')\n", (120, 131), False, 'import logging\n'), ((1802, 2050), 'tumor2d.src.nixTumor2d.tumor2d_interface', 'nixTumor2d.tumor2d_interface', (['initial_radius', 'initial_quiescent_fraction', 'max_celldivision_rate', 'division_depth', 'ecm_threshold_quiescence', 'emc_productionrate', 'ecm_degradationrate', 'endtime', 'outputrate', 'profiletime', 'profiledepth', 'randseed'], {}), '(initial_radius, initial_quiescent_fraction,\n max_celldivision_rate, division_depth, ecm_threshold_quiescence,\n emc_productionrate, ecm_degradationrate, endtime, outputrate,\n profiletime, profiledepth, randseed)\n', (1830, 2050), True, 'import tumor2d.src.nixTumor2d as nixTumor2d\n'), ((2896, 2914), 'numpy.isnan', 'np.isnan', (['randseed'], {}), '(randseed)\n', (2904, 2914), True, 'import numpy as np\n'), ((2967, 2991), 'numpy.random.seed', 'np.random.seed', (['randseed'], {}), '(randseed)\n', (2981, 2991), True, 'import numpy as np\n'), ((3092, 3126), 'numpy.empty', 'np.empty', (['[num_reps, profiledepth]'], {}), '([num_reps, profiledepth])\n', (3100, 3126), True, 'import numpy as np\n'), ((3150, 3184), 'numpy.empty', 'np.empty', (['[num_reps, profiledepth]'], {}), '([num_reps, profiledepth])\n', (3158, 3184), True, 'import numpy as np\n'), ((4053, 4082), 'numpy.mean', 'np.mean', (['full_data_gc'], {'axis': '(0)'}), '(full_data_gc, axis=0)\n', (4060, 4082), True, 'import numpy as np\n'), ((4098, 4128), 'numpy.mean', 'np.mean', (['full_data_ecm'], {'axis': '(0)'}), '(full_data_ecm, axis=0)\n', (4105, 4128), True, 'import numpy as np\n'), ((4147, 4180), 'numpy.mean', 'np.mean', (['full_data_prolif'], {'axis': '(0)'}), '(full_data_prolif, axis=0)\n', (4154, 4180), True, 'import numpy as np\n'), ((2935, 2962), 'numpy.random.randint', 'np.random.randint', (['max_seed'], {}), '(max_seed)\n', (2952, 2962), True, 'import numpy as np\n'), ((3235, 3262), 'numpy.random.randint', 'np.random.randint', (['max_seed'], {}), '(max_seed)\n', (3252, 3262), True, 'import numpy as np\n'), ((4201, 4229), 'numpy.std', 'np.std', (['full_data_gc'], {'axis': '(0)'}), '(full_data_gc, axis=0)\n', (4207, 4229), True, 'import numpy as np\n'), ((4252, 4281), 'numpy.std', 'np.std', (['full_data_ecm'], {'axis': '(0)'}), '(full_data_ecm, axis=0)\n', (4258, 4281), True, 'import numpy as np\n'), ((4307, 4339), 'numpy.std', 'np.std', (['full_data_prolif'], {'axis': '(0)'}), '(full_data_prolif, axis=0)\n', (4313, 4339), True, 'import numpy as np\n'), ((3038, 3068), 'numpy.floor', 'np.floor', (['(endtime / outputrate)'], {}), '(endtime / outputrate)\n', (3046, 3068), True, 'import numpy as np\n')] |
import io
import os
import numpy as np
import requests as rq
import onnxruntime as ort
from PIL import Image
from app.utils import load_labels, softmax
class Predictor:
def __init__(self, mode_path) -> None:
self.model = ort.InferenceSession(f'{os.getcwd()}/{mode_path}', None)
def pre_process(self, input_data):
# Resize
width = 224
height = 224
image = input_data.resize((width, height), Image.BILINEAR)
# HWC -> CHW
image = np.array(image).transpose(2, 0, 1)
img_data = image.astype('float32')
# normalize
mean_vec = np.array([0.485, 0.456, 0.406])
stddev_vec = np.array([0.229, 0.224, 0.225])
norm_img_data = np.zeros(img_data.shape).astype('float32')
for i in range(img_data.shape[0]):
norm_img_data[i, :, :] = (
img_data[i, :, :]/255 - mean_vec[i]) / stddev_vec[i]
# add batch channel
norm_img_data = norm_img_data.reshape(1, 3, 224, 224).astype('float32')
return norm_img_data
def predict(self, image_url):
response = rq.get(image_url)
image_bytes = io.BytesIO(response.content)
image = Image.open(image_bytes)
input_data = self.pre_process(image)
input_name = self.model.get_inputs()[0].name
raw_result = self.model.run([], {input_name: input_data})
res = self.post_process(raw_result)
return res
def post_process(self, raw_result):
labels = load_labels(f'{os.getcwd()}/app/labels.json')
res = softmax(np.array(raw_result)).tolist()
idx = np.argmax(res)
return labels[idx]
| [
"PIL.Image.open",
"io.BytesIO",
"numpy.argmax",
"requests.get",
"os.getcwd",
"numpy.array",
"numpy.zeros"
] | [((568, 599), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (576, 599), True, 'import numpy as np\n'), ((617, 648), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (625, 648), True, 'import numpy as np\n'), ((1025, 1042), 'requests.get', 'rq.get', (['image_url'], {}), '(image_url)\n', (1031, 1042), True, 'import requests as rq\n'), ((1061, 1089), 'io.BytesIO', 'io.BytesIO', (['response.content'], {}), '(response.content)\n', (1071, 1089), False, 'import io\n'), ((1102, 1125), 'PIL.Image.open', 'Image.open', (['image_bytes'], {}), '(image_bytes)\n', (1112, 1125), False, 'from PIL import Image\n'), ((1492, 1506), 'numpy.argmax', 'np.argmax', (['res'], {}), '(res)\n', (1501, 1506), True, 'import numpy as np\n'), ((462, 477), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (470, 477), True, 'import numpy as np\n'), ((669, 693), 'numpy.zeros', 'np.zeros', (['img_data.shape'], {}), '(img_data.shape)\n', (677, 693), True, 'import numpy as np\n'), ((252, 263), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (261, 263), False, 'import os\n'), ((1402, 1413), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1411, 1413), False, 'import os\n'), ((1451, 1471), 'numpy.array', 'np.array', (['raw_result'], {}), '(raw_result)\n', (1459, 1471), True, 'import numpy as np\n')] |
# Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
import numpy as np
import opveclib as ovl
import tensorflow as tf
import itertools
import time
@ovl.operator()
def conv_1d(x, v, kernel_orientation='as-is', stride=1, mode='same', data_format='NCE'):
"""
Define the operator function.
:param x: An input tensor of shape [num_batches, num_channels, num_elements].
:param v: A filter/kernel of shape [num_filters, num_channels, kernel_size].
:param kernel_orientation: The orientation of the kernel to use: 'as-is' or 'flipped'. This language is used
rather than 'convolution' or 'cross-correlation' since the terms have become overloaded and ambiguous across
some fields. As defined in https://en.wikipedia.org/wiki/Cross-correlation#Properties, 'as-is' yields the
cross-correlation and 'flipped' yields the convolution.
:param stride: kernel stride to use.
:param mode: border mode: 'same', 'valid', or 'full'
:param data_format: order of the dimensions in the input: 'NCE', 'NEC' etc.
:return: an output tensor of shape [num_batches, num_filters, num_elements]
"""
if kernel_orientation != 'as-is' and kernel_orientation != 'flipped':
raise ValueError("kernel_orientation must be 'as-is' or 'flipped'")
# resolve data layout based on data_format input
assert x.rank == 3
assert len(data_format) == 3
assert data_format.count('N') == 1
assert data_format.count('C') == 1
assert data_format.count('E') == 1
n_axis = data_format.find('N')
c_axis = data_format.find('C')
e_axis = data_format.find('E')
num_elements = x.shape[e_axis]
num_channels = x.shape[c_axis]
num_batches = x.shape[n_axis]
assert v.rank == 3
if num_channels != v.shape[c_axis]:
raise ValueError('Channel axis size of input must match that of the filter.')
num_filters = v.shape[n_axis]
filter_size = v.shape[e_axis]
left_apron = filter_size // 2
right_apron = filter_size - left_apron - 1
if not isinstance(stride, int) or stride < 1 or stride > num_elements:
raise ValueError('Stride must be a positive integer')
if mode == 'same':
if filter_size > num_elements:
raise ValueError('filter size, ' + str(filter_size) +
', cannot be larger than number of elements, ' + str(num_elements))
starting_element = -left_apron
ending_element = num_elements - left_apron
elif mode == 'valid':
if filter_size > num_elements:
raise ValueError('filter size, ' + str(filter_size) +
', cannot be larger than number of elements, ' + str(num_elements))
starting_element = 0
ending_element = num_elements - (left_apron + right_apron)
elif mode == 'full':
starting_element = -(filter_size - 1)
ending_element = num_elements
else:
raise ValueError("mode must be 'same', 'valid', or 'full'.")
output_elements = (ending_element - starting_element)
output_shape = [0, 0, 0]
output_shape[n_axis] = num_batches
output_shape[c_axis] = num_filters
output_shape[e_axis] = output_elements
output = ovl.output(output_shape, x.dtype)
filters_per_worker = 1
filter_workers, filter_remainder = divmod(num_filters, filters_per_worker)
if filter_remainder > 0:
filter_workers += 1
batches_per_worker = 1
batch_workers, batch_remainder = divmod(num_batches, batches_per_worker)
if batch_remainder > 0:
batch_workers += 1
elements_per_worker = 10
element_workers, element_remainder = divmod(output_elements, elements_per_worker)
if element_remainder > 0:
element_workers += 1
workgroup_shape = [batch_workers, filter_workers, element_workers]
ovl.logger.debug(u' workgroup_shape: ' + str(workgroup_shape))
pos = ovl.position_in(workgroup_shape)
cur_batch_block = pos[0]
cur_filter_block = pos[1]
cur_element_block = pos[2]
num_block_batches = ovl.variable(batches_per_worker, ovl.uint32)
if batch_remainder > 0:
with ovl.if_(cur_batch_block == batch_workers-1):
num_block_batches <<= batch_remainder
num_block_filters = ovl.variable(filters_per_worker, ovl.uint32)
if filter_remainder > 0:
with ovl.if_(cur_filter_block == filter_workers-1):
num_block_filters <<= filter_remainder
num_block_elements = ovl.variable(elements_per_worker, ovl.uint32)
if element_remainder > 0:
with ovl.if_(cur_element_block == element_workers-1):
num_block_elements <<= element_remainder
accum = ovl.zeros((batches_per_worker, filters_per_worker, elements_per_worker), ovl.float64) #4*4
filter_block = ovl.zeros((filters_per_worker, filter_size), v.dtype) #4*10
input_block = ovl.zeros((batches_per_worker, filter_size), x.dtype) #4*10
for cur_channel in ovl.arange(num_channels):
# load all filters for this channel
for intra_block_filter in ovl.arange(filters_per_worker):
for f_pos in ovl.arange(filter_size):
filter_index = [None, None, None]
filter_index[c_axis] = cur_channel
filter_index[n_axis] = ovl.cast(intra_block_filter, ovl.uint32) + cur_filter_block * filters_per_worker
if kernel_orientation == 'as-is':
filter_index[e_axis] = f_pos
elif kernel_orientation == 'flipped':
filter_index[e_axis] = filter_size - f_pos - 1
else:
raise ValueError("kernel_orientation must be 'as-is' or 'flipped'")
filter_block[intra_block_filter, f_pos] = v[filter_index]
# load initial inputs for this channel
buffer_head = ovl.variable(0, ovl.uint32)
for intra_block_batch in ovl.arange(num_block_batches):
cur_batch = intra_block_batch + cur_batch_block*batches_per_worker
for f_pos in ovl.arange(filter_size):
x_index = [None, None, None]
x_index[c_axis] = cur_channel
x_index[n_axis] = cur_batch
x_elem_index = starting_element + ovl.cast(cur_element_block * elements_per_worker, ovl.uint64) + ovl.cast(f_pos, ovl.uint64)
x_index[e_axis] = x_elem_index
index_in_bounds = ovl.logical_and(x_elem_index >= 0, x_elem_index < num_elements)
with ovl.if_(index_in_bounds):
input_block[intra_block_batch, f_pos] = x[x_index]
with ovl.else_():
input_block[intra_block_batch, f_pos] = 0
for intra_block_element in ovl.arange(num_block_elements):
cur_elem = intra_block_element + cur_element_block*elements_per_worker
for intra_block_batch in ovl.arange(num_block_batches):
cur_batch = intra_block_batch + cur_batch_block*batches_per_worker
for intra_block_filter in ovl.arange(num_block_filters):
for f_pos in ovl.arange(filter_size):
x_pos = (buffer_head + ovl.cast(f_pos, ovl.uint32)) % filter_size
cur_x = ovl.cast(input_block[intra_block_batch, x_pos], ovl.float64)
cur_v = ovl.cast(filter_block[intra_block_filter, f_pos], ovl.float64)
accum[intra_block_batch, intra_block_filter, intra_block_element] = \
accum[intra_block_batch, intra_block_filter, intra_block_element] + cur_x * cur_v
# load new element
x_index = [None, None, None]
x_index[c_axis] = cur_channel
x_index[n_axis] = cur_batch
x_elem_index = starting_element + cur_elem + filter_size
x_index[e_axis] = x_elem_index
index_in_bounds = ovl.logical_and(x_elem_index >= 0, x_elem_index < num_elements)
with ovl.if_(index_in_bounds):
input_block[intra_block_batch, buffer_head] = x[x_index]
with ovl.else_():
input_block[intra_block_batch, buffer_head] = 0
buffer_head <<= (buffer_head + 1) % filter_size
for intra_block_batch in ovl.arange(num_block_batches):
cur_batch = intra_block_batch + cur_batch_block*batches_per_worker
for intra_block_filter in ovl.arange(num_block_filters):
cur_filter = intra_block_filter + cur_filter_block*filters_per_worker
for intra_block_element in ovl.arange(num_block_elements):
cur_elem = intra_block_element + cur_element_block*elements_per_worker
output_index = [None, None, None]
output_index[n_axis] = cur_batch
output_index[e_axis] = cur_elem
output_index[c_axis] = cur_filter
output[output_index] = ovl.cast(accum[intra_block_batch, intra_block_filter, intra_block_element],
output.dtype)
return output
def reference(x, v, mode, orientation, data_format):
# resolve data layout based on data_format input
assert len(x.shape) == 3
assert len(data_format) == 3
assert data_format.count('N') == 1
assert data_format.count('C') == 1
assert data_format.count('E') == 1
n_axis = data_format.find('N')
c_axis = data_format.find('C')
e_axis = data_format.find('E')
num_channels = x.shape[c_axis]
num_batches = x.shape[n_axis]
num_elements = x.shape[e_axis]
assert len(v.shape) == 3
if num_channels != v.shape[c_axis]:
raise ValueError('Channel axis size ' + str(num_channels) +
' of input must match that of the filter - ' +
str(v.shape[c_axis]))
num_filters = v.shape[n_axis]
filter_size = v.shape[e_axis]
left_apron = filter_size // 2
right_apron = filter_size - left_apron - 1
output_shape = [None, None, None]
output_shape[n_axis] = num_batches
output_shape[c_axis] = num_filters
if mode == 'same':
output_elements = num_elements
elif mode == 'valid':
output_elements = num_elements - left_apron - right_apron
elif mode == 'full':
output_elements = num_elements + left_apron + right_apron
else:
raise ValueError
output_shape[e_axis] = output_elements
output = np.empty(output_shape, dtype=float)
for cur_batch in range(num_batches):
for cur_filter in range(num_filters):
accum = np.zeros(output_elements)
for cur_channel in range(num_channels):
x_index = [None, None, None]
x_index[n_axis] = cur_batch
x_index[c_axis] = cur_channel
x_index[e_axis] = slice(num_elements)
v_index = [None, None, None]
v_index[n_axis] = cur_filter
v_index[c_axis] = cur_channel
v_index[e_axis] = slice(filter_size)
if orientation == 'as-is':
accum += np.correlate(x[x_index], v[v_index], mode=mode)
elif orientation == 'flipped':
accum += np.convolve(x[x_index], v[v_index], mode=mode)
else:
raise RuntimeError()
output_index = [None, None, None]
output_index[n_axis] = cur_batch
output_index[c_axis] = cur_filter
output_index[e_axis] = slice(output_elements)
output[output_index] = accum
return output
def run_tf(tensor_in_sizes, filter_in_sizes):
# test TF 2D convolution operator in 1D vs. OVL
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
tin1 = tf.constant(x1, shape=tensor_in_sizes, dtype=tf.float32)
tin2 = tf.constant(x2, shape=filter_in_sizes, dtype=tf.float32)
conv = tf.nn.conv2d(tin1, tin2,
strides=[1, 1, 1, 1],
padding="SAME",
data_format='NHWC')
# compare to OVL - need to convert input to 1-D - ie. input_rows = filter_rows = 1
# also transpose initial data since filter index is last in TF and first in OVL
# TF input = batch, input_row, input_col, channels
# TF filter = filter_row, filter_col, channels, num_filters
# OVL NEC input = batches, num_elements, channels
# OVL NEC filter = num_filters, kernel_size, channels
assert(tensor_in_sizes[1] == 1)
assert(filter_in_sizes[0] == 1)
ovl_tensor_in_sizes = [tensor_in_sizes[0], tensor_in_sizes[2], tensor_in_sizes[3]]
num_filter = filter_in_sizes[3]
num_elem = filter_in_sizes[1]
num_chan = filter_in_sizes[2]
ovl_filter_in_sizes = [num_filter, num_elem, num_chan]
ovl.logger.debug(u'input and filter sizes: ' + str(ovl_tensor_in_sizes) + ', ' + str(ovl_filter_in_sizes))
ar1 = np.array(x1, dtype=np.float).reshape(ovl_tensor_in_sizes)
# does not produce the correct results
# ar2 = np.array(x2, dtype=np.float).reshape(ovl_filter_in_sizes, order='F')
ar2 = np.zeros(ovl_filter_in_sizes, dtype=np.float)
for col in range(0, num_elem):
for chan in range(0, num_chan):
for num in range(0, num_filter):
index = col * num_chan * num_filter + chan * num_filter + num
# print('ar2 ' + str(num) + ',' + str(col) + ',' + str(chan) + ' is index ' + str(index) + ' val: ' + str(x2[index]))
ar2[num,col,chan] = x2[index]
t0 = time.time()
ref = reference(ar1, ar2, mode='same', orientation='as-is', data_format= 'NEC')
t1 = time.time()
np_time = (t1-t0)*1000
iters = 100
ovlOp = conv_1d(ar1, ar2, mode='same', kernel_orientation='as-is', data_format='NEC')
ovl_cuda_time = 0
if ovl.cuda_enabled:
ovlResult, prof = ovl.profile(ovlOp, target_language='cuda', profiling_iterations=iters, opt_level=3)
ovl_cuda_time = np.min(list(prof.values())[0])
assert np.allclose(ovlResult, ref)
#TODO - cpp is really slow...
ovlcppResult, profcpp = ovl.profile(ovlOp, target_language='cpp', profiling_iterations=iters, opt_level=3)
ovl_cpp_time = np.min(list(profcpp.values())[0])
assert np.allclose(ovlcppResult, ref)
# ensure TF runs on GPU
test_config=tf.ConfigProto(allow_soft_placement=False)
test_config.graph_options.optimizer_options.opt_level = -1
# OVL-TF integration
ovl_tf_time = 0
dev_string = '/cpu:0'
if ovl.cuda_enabled:
dev_string = '/gpu:0'
with tf.Session(config=test_config) as sess:
with tf.device(dev_string):
ovlOp_tf = ovl.as_tensorflow(ovlOp)
init = tf.initialize_all_variables()
sess.run(init)
ovlOp_tf_result = sess.run(ovlOp_tf)
t0 = time.time()
for dummy in itertools.repeat(None, iters):
sess.run(ovlOp_tf.op)
t1 = time.time()
ovl_tf_time = (t1-t0)/float(iters) * 1000.00
assert np.allclose(ovlOp_tf_result, ref)
sess.close()
# run TF 2D conv alone
tf_time = 0
with tf.Session(config=test_config) as sess:
with tf.device(dev_string):
result = sess.run([conv])
t0 = time.time()
for dummy in itertools.repeat(None, iters):
sess.run([conv.op])
t1 = time.time()
tf_time = (t1-t0)/float(iters) * 1000.00
# TF result is 4D - have to convert to 3D to match OVL
tf_shape = result[0].shape
assert(tf_shape[1] == 1)
ovl_shape = [tf_shape[0], tf_shape[2], tf_shape[3]]
tf_result = np.array(result[0], dtype=np.float).reshape(ovl_shape)
#TODO - if number of filter elements is even, TF result does not match reference - first element "wraps" to end
assert np.allclose(tf_result, ref)
sess.close()
times = [np_time, ovl_cuda_time, ovl_cpp_time, ovl_tf_time, tf_time]
ovl.logger.debug(u' time [np, OVL_cuda, OVL_cpp, OVL_TF, TF]: ' + str(times))
def run_tests():
bb = 1
cc = 1
ee = 1000
k_num = 10
a1 = np.random.random((bb, ee, cc))
a2 = np.random.random((bb, ee, cc))
for k_ee in range(13, 14):
b = np.random.random((k_num, k_ee, cc))
for md in ['valid', 'same', 'full']:
for orientation in ['as-is', 'flipped']:
import time
t1 = time.time()
y1 = reference(a1, b, md, orientation, 'NEC')
t2 = time.time()
y2 = reference(a2, b, md, orientation, 'NEC')
op = conv_1d(a1, b, mode=md, kernel_orientation=orientation, data_format='NEC')
# result1 =
if ovl.cuda_enabled:
assert np.allclose(ovl.evaluate(op, target_language='cuda'), y1)
# for d in range(1):
a1[:] = a2[:]
if ovl.cuda_enabled:
assert np.allclose(ovl.evaluate(op, target_language='cuda'), y2)
res, prof = ovl.profile(op, target_language='cuda', profiling_iterations=100, opt_level=3)
ovl.logger.debug(k_ee, md, orientation, (t2 - t1) * 1000, np.min(list(prof.values())[0]))
# assert np.allclose(result1, y1)
# assert np.allclose(result2, y2)
#TODO - OVL evaluate fails if it is run after a TF session
run_tf([5, 1, 1000, 3], [1, 13, 3, 10])
# run_tests()
# op = Convolution1D(np.reshape(a, (batches, chans, elems)), np.reshape(v, (chans, kern_elems)))
# @staticmethod
# def _conv_core(input, filter, n_axis, c_axis, e_axis, kernel_orientation, border_policy,
# upsample_factor=None, downsample_factor=None):
# filters_per_worker = 3
# batches_per_worker = 5
# strides_per_worker = 100
#
# def mod_ceil(x, y):
# m, remainder = divmod(x, y)
# if remainder == 0:
# return m
# else:
# return m + 1
#
# filter_workers = mod_ceil(num_filters, filters_per_worker)
# batch_workers = mod_ceil(num_batches, batches_per_worker)
#
#
# element_workers, final_worker_elements = divmod(elements, elements_per_worker)
# if final_worker_elements != 0:
# element_workers += 1
# batches_per_worker = 1
# batch_workers, final_worker_batches = divmod(batches, batches_per_worker)
# if final_worker_batches != 0:
# batch_workers += 1
#
# workgroup_shape = [batches_per_worker, channels, element_workers]
# workgroup_position = ops.position_in(workgroup_shape)
#
# cur_batch_worker = workgroup_position[0]
# cur_channel = workgroup_position[1]
# cur_element_worker = workgroup_position[2]
#
# for cur_batch in ops.arange(cur_batch_worker*batches_per_worker, (cur_batch_worker+1)*batches_per_worker):
# for cur_element in ops.arange(cur_element_worker*elements_per_worker,
# (cur_element_worker+1)*elements_per_worker):
# with ops.if_(ops.logical_and(cur_batch < batches, cur_element < elements)):
# pass
| [
"numpy.convolve",
"opveclib.arange",
"opveclib.output",
"opveclib.position_in",
"numpy.array",
"itertools.repeat",
"numpy.random.random",
"tensorflow.Session",
"opveclib.if_",
"numpy.empty",
"opveclib.profile",
"tensorflow.ConfigProto",
"opveclib.operator",
"tensorflow.nn.conv2d",
"tenso... | [((692, 706), 'opveclib.operator', 'ovl.operator', ([], {}), '()\n', (704, 706), True, 'import opveclib as ovl\n'), ((3758, 3791), 'opveclib.output', 'ovl.output', (['output_shape', 'x.dtype'], {}), '(output_shape, x.dtype)\n', (3768, 3791), True, 'import opveclib as ovl\n'), ((4443, 4475), 'opveclib.position_in', 'ovl.position_in', (['workgroup_shape'], {}), '(workgroup_shape)\n', (4458, 4475), True, 'import opveclib as ovl\n'), ((4591, 4635), 'opveclib.variable', 'ovl.variable', (['batches_per_worker', 'ovl.uint32'], {}), '(batches_per_worker, ovl.uint32)\n', (4603, 4635), True, 'import opveclib as ovl\n'), ((4797, 4841), 'opveclib.variable', 'ovl.variable', (['filters_per_worker', 'ovl.uint32'], {}), '(filters_per_worker, ovl.uint32)\n', (4809, 4841), True, 'import opveclib as ovl\n'), ((5008, 5053), 'opveclib.variable', 'ovl.variable', (['elements_per_worker', 'ovl.uint32'], {}), '(elements_per_worker, ovl.uint32)\n', (5020, 5053), True, 'import opveclib as ovl\n'), ((5212, 5301), 'opveclib.zeros', 'ovl.zeros', (['(batches_per_worker, filters_per_worker, elements_per_worker)', 'ovl.float64'], {}), '((batches_per_worker, filters_per_worker, elements_per_worker),\n ovl.float64)\n', (5221, 5301), True, 'import opveclib as ovl\n'), ((5323, 5376), 'opveclib.zeros', 'ovl.zeros', (['(filters_per_worker, filter_size)', 'v.dtype'], {}), '((filters_per_worker, filter_size), v.dtype)\n', (5332, 5376), True, 'import opveclib as ovl\n'), ((5402, 5455), 'opveclib.zeros', 'ovl.zeros', (['(batches_per_worker, filter_size)', 'x.dtype'], {}), '((batches_per_worker, filter_size), x.dtype)\n', (5411, 5455), True, 'import opveclib as ovl\n'), ((5486, 5510), 'opveclib.arange', 'ovl.arange', (['num_channels'], {}), '(num_channels)\n', (5496, 5510), True, 'import opveclib as ovl\n'), ((8847, 8876), 'opveclib.arange', 'ovl.arange', (['num_block_batches'], {}), '(num_block_batches)\n', (8857, 8876), True, 'import opveclib as ovl\n'), ((11147, 11182), 'numpy.empty', 'np.empty', (['output_shape'], {'dtype': 'float'}), '(output_shape, dtype=float)\n', (11155, 11182), True, 'import numpy as np\n'), ((12868, 12924), 'tensorflow.constant', 'tf.constant', (['x1'], {'shape': 'tensor_in_sizes', 'dtype': 'tf.float32'}), '(x1, shape=tensor_in_sizes, dtype=tf.float32)\n', (12879, 12924), True, 'import tensorflow as tf\n'), ((12936, 12992), 'tensorflow.constant', 'tf.constant', (['x2'], {'shape': 'filter_in_sizes', 'dtype': 'tf.float32'}), '(x2, shape=filter_in_sizes, dtype=tf.float32)\n', (12947, 12992), True, 'import tensorflow as tf\n'), ((13004, 13091), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['tin1', 'tin2'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""', 'data_format': '"""NHWC"""'}), "(tin1, tin2, strides=[1, 1, 1, 1], padding='SAME', data_format=\n 'NHWC')\n", (13016, 13091), True, 'import tensorflow as tf\n'), ((14191, 14236), 'numpy.zeros', 'np.zeros', (['ovl_filter_in_sizes'], {'dtype': 'np.float'}), '(ovl_filter_in_sizes, dtype=np.float)\n', (14199, 14236), True, 'import numpy as np\n'), ((14625, 14636), 'time.time', 'time.time', ([], {}), '()\n', (14634, 14636), False, 'import time\n'), ((14730, 14741), 'time.time', 'time.time', ([], {}), '()\n', (14739, 14741), False, 'import time\n'), ((15193, 15279), 'opveclib.profile', 'ovl.profile', (['ovlOp'], {'target_language': '"""cpp"""', 'profiling_iterations': 'iters', 'opt_level': '(3)'}), "(ovlOp, target_language='cpp', profiling_iterations=iters,\n opt_level=3)\n", (15204, 15279), True, 'import opveclib as ovl\n'), ((15340, 15370), 'numpy.allclose', 'np.allclose', (['ovlcppResult', 'ref'], {}), '(ovlcppResult, ref)\n', (15351, 15370), True, 'import numpy as np\n'), ((15416, 15458), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(False)'}), '(allow_soft_placement=False)\n', (15430, 15458), True, 'import tensorflow as tf\n'), ((17268, 17298), 'numpy.random.random', 'np.random.random', (['(bb, ee, cc)'], {}), '((bb, ee, cc))\n', (17284, 17298), True, 'import numpy as np\n'), ((17308, 17338), 'numpy.random.random', 'np.random.random', (['(bb, ee, cc)'], {}), '((bb, ee, cc))\n', (17324, 17338), True, 'import numpy as np\n'), ((5591, 5621), 'opveclib.arange', 'ovl.arange', (['filters_per_worker'], {}), '(filters_per_worker)\n', (5601, 5621), True, 'import opveclib as ovl\n'), ((6368, 6395), 'opveclib.variable', 'ovl.variable', (['(0)', 'ovl.uint32'], {}), '(0, ovl.uint32)\n', (6380, 6395), True, 'import opveclib as ovl\n'), ((6429, 6458), 'opveclib.arange', 'ovl.arange', (['num_block_batches'], {}), '(num_block_batches)\n', (6439, 6458), True, 'import opveclib as ovl\n'), ((7262, 7292), 'opveclib.arange', 'ovl.arange', (['num_block_elements'], {}), '(num_block_elements)\n', (7272, 7292), True, 'import opveclib as ovl\n'), ((8987, 9016), 'opveclib.arange', 'ovl.arange', (['num_block_filters'], {}), '(num_block_filters)\n', (8997, 9016), True, 'import opveclib as ovl\n'), ((14949, 15036), 'opveclib.profile', 'ovl.profile', (['ovlOp'], {'target_language': '"""cuda"""', 'profiling_iterations': 'iters', 'opt_level': '(3)'}), "(ovlOp, target_language='cuda', profiling_iterations=iters,\n opt_level=3)\n", (14960, 15036), True, 'import opveclib as ovl\n'), ((15103, 15130), 'numpy.allclose', 'np.allclose', (['ovlResult', 'ref'], {}), '(ovlResult, ref)\n', (15114, 15130), True, 'import numpy as np\n'), ((15659, 15689), 'tensorflow.Session', 'tf.Session', ([], {'config': 'test_config'}), '(config=test_config)\n', (15669, 15689), True, 'import tensorflow as tf\n'), ((16240, 16270), 'tensorflow.Session', 'tf.Session', ([], {'config': 'test_config'}), '(config=test_config)\n', (16250, 16270), True, 'import tensorflow as tf\n'), ((17382, 17417), 'numpy.random.random', 'np.random.random', (['(k_num, k_ee, cc)'], {}), '((k_num, k_ee, cc))\n', (17398, 17417), True, 'import numpy as np\n'), ((4677, 4722), 'opveclib.if_', 'ovl.if_', (['(cur_batch_block == batch_workers - 1)'], {}), '(cur_batch_block == batch_workers - 1)\n', (4684, 4722), True, 'import opveclib as ovl\n'), ((4884, 4931), 'opveclib.if_', 'ovl.if_', (['(cur_filter_block == filter_workers - 1)'], {}), '(cur_filter_block == filter_workers - 1)\n', (4891, 4931), True, 'import opveclib as ovl\n'), ((5097, 5146), 'opveclib.if_', 'ovl.if_', (['(cur_element_block == element_workers - 1)'], {}), '(cur_element_block == element_workers - 1)\n', (5104, 5146), True, 'import opveclib as ovl\n'), ((5648, 5671), 'opveclib.arange', 'ovl.arange', (['filter_size'], {}), '(filter_size)\n', (5658, 5671), True, 'import opveclib as ovl\n'), ((6564, 6587), 'opveclib.arange', 'ovl.arange', (['filter_size'], {}), '(filter_size)\n', (6574, 6587), True, 'import opveclib as ovl\n'), ((7414, 7443), 'opveclib.arange', 'ovl.arange', (['num_block_batches'], {}), '(num_block_batches)\n', (7424, 7443), True, 'import opveclib as ovl\n'), ((9139, 9169), 'opveclib.arange', 'ovl.arange', (['num_block_elements'], {}), '(num_block_elements)\n', (9149, 9169), True, 'import opveclib as ovl\n'), ((11303, 11328), 'numpy.zeros', 'np.zeros', (['output_elements'], {}), '(output_elements)\n', (11311, 11328), True, 'import numpy as np\n'), ((13999, 14027), 'numpy.array', 'np.array', (['x1'], {'dtype': 'np.float'}), '(x1, dtype=np.float)\n', (14007, 14027), True, 'import numpy as np\n'), ((15712, 15733), 'tensorflow.device', 'tf.device', (['dev_string'], {}), '(dev_string)\n', (15721, 15733), True, 'import tensorflow as tf\n'), ((15758, 15782), 'opveclib.as_tensorflow', 'ovl.as_tensorflow', (['ovlOp'], {}), '(ovlOp)\n', (15775, 15782), True, 'import opveclib as ovl\n'), ((15802, 15831), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (15829, 15831), True, 'import tensorflow as tf\n'), ((15925, 15936), 'time.time', 'time.time', ([], {}), '()\n', (15934, 15936), False, 'import time\n'), ((15962, 15991), 'itertools.repeat', 'itertools.repeat', (['None', 'iters'], {}), '(None, iters)\n', (15978, 15991), False, 'import itertools\n'), ((16048, 16059), 'time.time', 'time.time', ([], {}), '()\n', (16057, 16059), False, 'import time\n'), ((16136, 16169), 'numpy.allclose', 'np.allclose', (['ovlOp_tf_result', 'ref'], {}), '(ovlOp_tf_result, ref)\n', (16147, 16169), True, 'import numpy as np\n'), ((16293, 16314), 'tensorflow.device', 'tf.device', (['dev_string'], {}), '(dev_string)\n', (16302, 16314), True, 'import tensorflow as tf\n'), ((16371, 16382), 'time.time', 'time.time', ([], {}), '()\n', (16380, 16382), False, 'import time\n'), ((16408, 16437), 'itertools.repeat', 'itertools.repeat', (['None', 'iters'], {}), '(None, iters)\n', (16424, 16437), False, 'import itertools\n'), ((16492, 16503), 'time.time', 'time.time', ([], {}), '()\n', (16501, 16503), False, 'import time\n'), ((16986, 17013), 'numpy.allclose', 'np.allclose', (['tf_result', 'ref'], {}), '(tf_result, ref)\n', (16997, 17013), True, 'import numpy as np\n'), ((6948, 7011), 'opveclib.logical_and', 'ovl.logical_and', (['(x_elem_index >= 0)', '(x_elem_index < num_elements)'], {}), '(x_elem_index >= 0, x_elem_index < num_elements)\n', (6963, 7011), True, 'import opveclib as ovl\n'), ((7570, 7599), 'opveclib.arange', 'ovl.arange', (['num_block_filters'], {}), '(num_block_filters)\n', (7580, 7599), True, 'import opveclib as ovl\n'), ((8466, 8529), 'opveclib.logical_and', 'ovl.logical_and', (['(x_elem_index >= 0)', '(x_elem_index < num_elements)'], {}), '(x_elem_index >= 0, x_elem_index < num_elements)\n', (8481, 8529), True, 'import opveclib as ovl\n'), ((9495, 9588), 'opveclib.cast', 'ovl.cast', (['accum[intra_block_batch, intra_block_filter, intra_block_element]', 'output.dtype'], {}), '(accum[intra_block_batch, intra_block_filter, intra_block_element],\n output.dtype)\n', (9503, 9588), True, 'import opveclib as ovl\n'), ((17565, 17576), 'time.time', 'time.time', ([], {}), '()\n', (17574, 17576), False, 'import time\n'), ((17660, 17671), 'time.time', 'time.time', ([], {}), '()\n', (17669, 17671), False, 'import time\n'), ((5813, 5853), 'opveclib.cast', 'ovl.cast', (['intra_block_filter', 'ovl.uint32'], {}), '(intra_block_filter, ovl.uint32)\n', (5821, 5853), True, 'import opveclib as ovl\n'), ((6839, 6866), 'opveclib.cast', 'ovl.cast', (['f_pos', 'ovl.uint64'], {}), '(f_pos, ovl.uint64)\n', (6847, 6866), True, 'import opveclib as ovl\n'), ((7033, 7057), 'opveclib.if_', 'ovl.if_', (['index_in_bounds'], {}), '(index_in_bounds)\n', (7040, 7057), True, 'import opveclib as ovl\n'), ((7151, 7162), 'opveclib.else_', 'ovl.else_', ([], {}), '()\n', (7160, 7162), True, 'import opveclib as ovl\n'), ((7634, 7657), 'opveclib.arange', 'ovl.arange', (['filter_size'], {}), '(filter_size)\n', (7644, 7657), True, 'import opveclib as ovl\n'), ((8551, 8575), 'opveclib.if_', 'ovl.if_', (['index_in_bounds'], {}), '(index_in_bounds)\n', (8558, 8575), True, 'import opveclib as ovl\n'), ((8675, 8686), 'opveclib.else_', 'ovl.else_', ([], {}), '()\n', (8684, 8686), True, 'import opveclib as ovl\n'), ((11876, 11923), 'numpy.correlate', 'np.correlate', (['x[x_index]', 'v[v_index]'], {'mode': 'mode'}), '(x[x_index], v[v_index], mode=mode)\n', (11888, 11923), True, 'import numpy as np\n'), ((16788, 16823), 'numpy.array', 'np.array', (['result[0]'], {'dtype': 'np.float'}), '(result[0], dtype=np.float)\n', (16796, 16823), True, 'import numpy as np\n'), ((18202, 18280), 'opveclib.profile', 'ovl.profile', (['op'], {'target_language': '"""cuda"""', 'profiling_iterations': '(100)', 'opt_level': '(3)'}), "(op, target_language='cuda', profiling_iterations=100, opt_level=3)\n", (18213, 18280), True, 'import opveclib as ovl\n'), ((6775, 6836), 'opveclib.cast', 'ovl.cast', (['(cur_element_block * elements_per_worker)', 'ovl.uint64'], {}), '(cur_element_block * elements_per_worker, ovl.uint64)\n', (6783, 6836), True, 'import opveclib as ovl\n'), ((7781, 7841), 'opveclib.cast', 'ovl.cast', (['input_block[intra_block_batch, x_pos]', 'ovl.float64'], {}), '(input_block[intra_block_batch, x_pos], ovl.float64)\n', (7789, 7841), True, 'import opveclib as ovl\n'), ((7874, 7936), 'opveclib.cast', 'ovl.cast', (['filter_block[intra_block_filter, f_pos]', 'ovl.float64'], {}), '(filter_block[intra_block_filter, f_pos], ovl.float64)\n', (7882, 7936), True, 'import opveclib as ovl\n'), ((12008, 12054), 'numpy.convolve', 'np.convolve', (['x[x_index]', 'v[v_index]'], {'mode': 'mode'}), '(x[x_index], v[v_index], mode=mode)\n', (12019, 12054), True, 'import numpy as np\n'), ((17935, 17975), 'opveclib.evaluate', 'ovl.evaluate', (['op'], {'target_language': '"""cuda"""'}), "(op, target_language='cuda')\n", (17947, 17975), True, 'import opveclib as ovl\n'), ((18124, 18164), 'opveclib.evaluate', 'ovl.evaluate', (['op'], {'target_language': '"""cuda"""'}), "(op, target_language='cuda')\n", (18136, 18164), True, 'import opveclib as ovl\n'), ((7706, 7733), 'opveclib.cast', 'ovl.cast', (['f_pos', 'ovl.uint32'], {}), '(f_pos, ovl.uint32)\n', (7714, 7733), True, 'import opveclib as ovl\n')] |
import numpy as np
from yt.testing import fake_random_ds, assert_equal
from yt.frontends.stream.data_structures import load_uniform_grid
def test_exclude_above():
the_ds = fake_random_ds(ndims=3)
all_data = the_ds.all_data()
new_ds = all_data.exclude_above('density', 1)
assert_equal(new_ds['density'], all_data['density'])
new_ds = all_data.exclude_above('density', 1e6, 'g/m**3')
assert_equal(new_ds['density'], all_data['density'])
new_ds = all_data.exclude_above('density', 0)
assert_equal(new_ds['density'], [])
def test_exclude_below():
the_ds = fake_random_ds(ndims=3)
all_data = the_ds.all_data()
new_ds = all_data.exclude_below('density', 1)
assert_equal(new_ds['density'], [])
new_ds = all_data.exclude_below('density', 1e6, 'g/m**3')
assert_equal(new_ds['density'], [])
new_ds = all_data.exclude_below('density', 0)
assert_equal(new_ds['density'], all_data['density'])
def test_exclude_nan():
test_array = np.nan*np.ones((10, 10, 10))
test_array[1,1,:] = 1
data = dict(density=test_array)
ds = load_uniform_grid(data, test_array.shape,
length_unit='cm', nprocs=1)
ad = ds.all_data()
no_nan_ds = ad.exclude_nan('density')
assert_equal(no_nan_ds['density'], np.array(np.ones(10)))
def test_equal():
test_array = np.ones((10, 10, 10))
test_array[1,1,:] = 2.
test_array[2,1,:] = 3.
data = dict(density=test_array)
ds = load_uniform_grid(data, test_array.shape,
length_unit='cm', nprocs=1)
ad = ds.all_data()
no_ones = ad.exclude_equal('density', 1.0)
assert np.all(no_ones['density'] != 1.0)
only_ones = ad.include_equal('density', 1.0)
assert np.all(only_ones['density'] == 1.0)
def test_inside_outside():
test_array = np.ones((10, 10, 10))
test_array[1,1,:] = 2.
test_array[2,1,:] = 3.
data = dict(density=test_array)
ds = load_uniform_grid(data, test_array.shape,
length_unit='cm', nprocs=1)
ad = ds.all_data()
only_ones_and_twos = ad.include_inside('density', 0.9, 2.1)
assert np.all(only_ones_and_twos['density'] != 3.)
assert len(only_ones_and_twos['density']) == 990
only_ones_and_twos = ad.exclude_outside('density', 0.9, 2.1)
assert len(only_ones_and_twos['density']) == 990
assert np.all(only_ones_and_twos['density'] != 3.)
only_threes = ad.include_outside('density', 0.9, 2.1)
assert np.all(only_threes['density'] == 3)
assert len(only_threes['density']) == 10
only_threes = ad.include_outside('density', 0.9, 2.1)
assert np.all(only_threes['density'] == 3)
assert len(only_threes['density']) == 10
# Repeat, but convert units to g/m**3
only_ones_and_twos = ad.include_inside('density', 0.9e6, 2.1e6, 'g/m**3')
assert np.all(only_ones_and_twos['density'] != 3.)
assert len(only_ones_and_twos['density']) == 990
only_ones_and_twos = ad.exclude_outside('density', 0.9e6, 2.1e6, 'g/m**3')
assert len(only_ones_and_twos['density']) == 990
assert np.all(only_ones_and_twos['density'] != 3.)
only_threes = ad.include_outside('density', 0.9e6, 2.1e6, 'g/m**3')
assert np.all(only_threes['density'] == 3)
assert len(only_threes['density']) == 10
only_threes = ad.include_outside('density', 0.9e6, 2.1e6, 'g/m**3')
assert np.all(only_threes['density'] == 3)
assert len(only_threes['density']) == 10
| [
"yt.testing.assert_equal",
"numpy.ones",
"yt.testing.fake_random_ds",
"yt.frontends.stream.data_structures.load_uniform_grid",
"numpy.all"
] | [((179, 202), 'yt.testing.fake_random_ds', 'fake_random_ds', ([], {'ndims': '(3)'}), '(ndims=3)\n', (193, 202), False, 'from yt.testing import fake_random_ds, assert_equal\n'), ((290, 342), 'yt.testing.assert_equal', 'assert_equal', (["new_ds['density']", "all_data['density']"], {}), "(new_ds['density'], all_data['density'])\n", (302, 342), False, 'from yt.testing import fake_random_ds, assert_equal\n'), ((409, 461), 'yt.testing.assert_equal', 'assert_equal', (["new_ds['density']", "all_data['density']"], {}), "(new_ds['density'], all_data['density'])\n", (421, 461), False, 'from yt.testing import fake_random_ds, assert_equal\n'), ((516, 551), 'yt.testing.assert_equal', 'assert_equal', (["new_ds['density']", '[]'], {}), "(new_ds['density'], [])\n", (528, 551), False, 'from yt.testing import fake_random_ds, assert_equal\n'), ((592, 615), 'yt.testing.fake_random_ds', 'fake_random_ds', ([], {'ndims': '(3)'}), '(ndims=3)\n', (606, 615), False, 'from yt.testing import fake_random_ds, assert_equal\n'), ((703, 738), 'yt.testing.assert_equal', 'assert_equal', (["new_ds['density']", '[]'], {}), "(new_ds['density'], [])\n", (715, 738), False, 'from yt.testing import fake_random_ds, assert_equal\n'), ((805, 840), 'yt.testing.assert_equal', 'assert_equal', (["new_ds['density']", '[]'], {}), "(new_ds['density'], [])\n", (817, 840), False, 'from yt.testing import fake_random_ds, assert_equal\n'), ((895, 947), 'yt.testing.assert_equal', 'assert_equal', (["new_ds['density']", "all_data['density']"], {}), "(new_ds['density'], all_data['density'])\n", (907, 947), False, 'from yt.testing import fake_random_ds, assert_equal\n'), ((1090, 1159), 'yt.frontends.stream.data_structures.load_uniform_grid', 'load_uniform_grid', (['data', 'test_array.shape'], {'length_unit': '"""cm"""', 'nprocs': '(1)'}), "(data, test_array.shape, length_unit='cm', nprocs=1)\n", (1107, 1159), False, 'from yt.frontends.stream.data_structures import load_uniform_grid\n'), ((1332, 1353), 'numpy.ones', 'np.ones', (['(10, 10, 10)'], {}), '((10, 10, 10))\n', (1339, 1353), True, 'import numpy as np\n'), ((1453, 1522), 'yt.frontends.stream.data_structures.load_uniform_grid', 'load_uniform_grid', (['data', 'test_array.shape'], {'length_unit': '"""cm"""', 'nprocs': '(1)'}), "(data, test_array.shape, length_unit='cm', nprocs=1)\n", (1470, 1522), False, 'from yt.frontends.stream.data_structures import load_uniform_grid\n'), ((1612, 1645), 'numpy.all', 'np.all', (["(no_ones['density'] != 1.0)"], {}), "(no_ones['density'] != 1.0)\n", (1618, 1645), True, 'import numpy as np\n'), ((1706, 1741), 'numpy.all', 'np.all', (["(only_ones['density'] == 1.0)"], {}), "(only_ones['density'] == 1.0)\n", (1712, 1741), True, 'import numpy as np\n'), ((1787, 1808), 'numpy.ones', 'np.ones', (['(10, 10, 10)'], {}), '((10, 10, 10))\n', (1794, 1808), True, 'import numpy as np\n'), ((1908, 1977), 'yt.frontends.stream.data_structures.load_uniform_grid', 'load_uniform_grid', (['data', 'test_array.shape'], {'length_unit': '"""cm"""', 'nprocs': '(1)'}), "(data, test_array.shape, length_unit='cm', nprocs=1)\n", (1925, 1977), False, 'from yt.frontends.stream.data_structures import load_uniform_grid\n'), ((2089, 2133), 'numpy.all', 'np.all', (["(only_ones_and_twos['density'] != 3.0)"], {}), "(only_ones_and_twos['density'] != 3.0)\n", (2095, 2133), True, 'import numpy as np\n'), ((2320, 2364), 'numpy.all', 'np.all', (["(only_ones_and_twos['density'] != 3.0)"], {}), "(only_ones_and_twos['density'] != 3.0)\n", (2326, 2364), True, 'import numpy as np\n'), ((2438, 2473), 'numpy.all', 'np.all', (["(only_threes['density'] == 3)"], {}), "(only_threes['density'] == 3)\n", (2444, 2473), True, 'import numpy as np\n'), ((2597, 2632), 'numpy.all', 'np.all', (["(only_threes['density'] == 3)"], {}), "(only_threes['density'] == 3)\n", (2603, 2632), True, 'import numpy as np\n'), ((2810, 2854), 'numpy.all', 'np.all', (["(only_ones_and_twos['density'] != 3.0)"], {}), "(only_ones_and_twos['density'] != 3.0)\n", (2816, 2854), True, 'import numpy as np\n'), ((3051, 3095), 'numpy.all', 'np.all', (["(only_ones_and_twos['density'] != 3.0)"], {}), "(only_ones_and_twos['density'] != 3.0)\n", (3057, 3095), True, 'import numpy as np\n'), ((3179, 3214), 'numpy.all', 'np.all', (["(only_threes['density'] == 3)"], {}), "(only_threes['density'] == 3)\n", (3185, 3214), True, 'import numpy as np\n'), ((3344, 3379), 'numpy.all', 'np.all', (["(only_threes['density'] == 3)"], {}), "(only_threes['density'] == 3)\n", (3350, 3379), True, 'import numpy as np\n'), ((997, 1018), 'numpy.ones', 'np.ones', (['(10, 10, 10)'], {}), '((10, 10, 10))\n', (1004, 1018), True, 'import numpy as np\n'), ((1282, 1293), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (1289, 1293), True, 'import numpy as np\n')] |
# builtin packages
import os
import numpy as np
from tqdm import tqdm
# torch
import torch
from torch import optim
from torch.utils.data import DataLoader
# from my module
from depth_completion.data import DepthDataset
from depth_completion.data import customed_collate_fn
import depth_completion.utils.loss_func as loss_func
from .base_agent import BaseAgent
class DepthCompletionAgent(BaseAgent):
def __init__(self, config=None, file_manager=None):
super(DepthCompletionAgent, self).__init__(config, file_manager)
### Need to define your model yourself ###
self.use_cuda = True if len(self._config['device_ids']) >= 1 else False
self.device_ids = self._config['device_ids']
if self.use_cuda is True:
self.model = self.model.to(self.device_ids[0])
self.optimizer = optim.Adam(self.model.parameters(),
lr=self._config['lr'])
self._epoch = 0
if self._config['load_model_path'] is not None:
param_only = self._config['param_only']
self.load_model(self._config['load_model_path'], param_only)
if param_only == False:
new_optimizer = optim.Adam(self.model.parameters(),
lr=self._config['lr'])
new_optimizer.load_state_dict(self.optimizer.state_dict())
self.optimizer = new_optimizer
print(self._epoch)
print (self._config)
def run(self):
"""Load data and start running model"""
assert self._config['mode'] in ['train', 'test']
dataset_name = self._config['dataset_name']
# loss functions & weight
# self.loss_funcs = list of (key (str), loss_func, weight (float))
self.loss_funcs = []
for loss_func_key, loss_func_name, weight in self._config['loss_func']:
if not hasattr(loss_func, loss_func_name):
raise AttributeError(f'Not supported loss function name: '\
f'{loss_func_name}. Please add to '\
f'utils/loss_func.py.')
else:
self.loss_funcs.append((loss_func_key,
getattr(loss_func,
loss_func_name),
weight))
if self._config['mode'] == 'test':
depth_test_dataset = DepthDataset(dataset_name, self._config['test_path'], train=False)
self.test_loader = DataLoader(
dataset=depth_test_dataset,
batch_size=self._config['batch_size'],
shuffle=False,
num_workers=self._config['num_workers'],
collate_fn=customed_collate_fn(dataset_name))
# visualize output path
if not os.path.isdir(self._config['output']):
os.mkdir(self._config['output'])
avg_loss, avg_detailed_loss = self.test()
elif self._config['mode'] == 'train':
# load datasets
depth_dataset = DepthDataset(dataset_name,
self._config['train_path'], train=True)
self.train_loader = DataLoader(
dataset=depth_dataset,
batch_size=self._config['batch_size'],
shuffle=True,
num_workers=self._config['num_workers'],
collate_fn=customed_collate_fn(dataset_name))
if self._config['validation'] is True:
depth_val_dataset = DepthDataset(
dataset_name, self._config['valid_path'], train=False)
self.test_loader = DataLoader(
dataset=depth_val_dataset,
batch_size=self._config['batch_size'],
shuffle=False,
num_workers=self._config['num_workers'],
collate_fn=customed_collate_fn(dataset_name))
# FileManager
self._file_manager.set_base_path(self._config)
self.train()
def train(self):
print ('Start Training ...')
self.init_log()
start_epoch = 0 if self._epoch == 0 else self._epoch + 1
for self._epoch in range(start_epoch, self._config['epoches']):
print(f'Start {self._epoch} epoch')
# train
self.train_loss, self.train_detailed_loss = self.train_one_epoch()
# validation
if self._config['validation'] is True:
self.val_loss, self.val_detailed_loss = self.test(validate=True)
# save log
self.save_loss_to_log(print_msg=True)
# save model
self.save_model()
def test(self, validate=False):
tqdm_loader = tqdm(self.test_loader, total=len(self.test_loader))
self.change_model_state('eval')
# array to save loss for testing data
total_valid_loss = []
total_valid_detailed_loss = {}
for (one_detailed_key, _, _) in self.loss_funcs:
total_valid_detailed_loss[one_detailed_key] = []
for step, batch in enumerate(tqdm_loader):
with torch.no_grad():
# go through network
output = self.feed_into_net(batch, mode='eval')
# calculate loss in single step
loss, detailed_loss = self.calculate_loss(output, batch)
total_valid_loss.append(loss.cpu().data.numpy())
for one_detailed_key in detailed_loss.keys():
total_valid_detailed_loss[one_detailed_key].append(
detailed_loss[one_detailed_key].cpu().data.numpy())
# testing mode (visualize)
if validate is False:
scene_name = batch['scene_name']
vis_items = {'original':batch['depth'],
'output':output['ori_output_depth'],
'render':batch['render_depth'],
'boundary':batch['depth_boundary']}
self.save_image(vis_items, scene_name)
tqdm_loader.close()
# average loss
avg_loss = np.mean(total_valid_loss)
avg_detailed_loss = {key: np.mean(ele) for key, ele in total_valid_detailed_loss.items()}
return avg_loss, avg_detailed_loss
def train_one_epoch(self):
tqdm_loader = tqdm(self.train_loader, total=len(self.train_loader))
self.change_model_state('train')
# array to save loss in one epoch
total_train_loss = []
total_detailed_loss = {}
for (one_detailed_key, _, _) in self.loss_funcs:
total_detailed_loss[one_detailed_key] = []
for step, batch in enumerate(tqdm_loader):
# go through network
output = self.feed_into_net(batch, mode='train')
# calculate loss in single step
loss, detailed_loss = self.calculate_loss(output, batch)
# backpropogation
self.update(loss)
total_train_loss.append(loss.cpu().data.numpy())
for one_detailed_key in detailed_loss.keys():
total_detailed_loss[one_detailed_key].append(
detailed_loss[one_detailed_key].cpu().data.numpy())
if self._epoch == 0:
_avg_loss = np.mean(total_train_loss)
print (f'step : {step}, loss : {_avg_loss}')
for one_detailed_key in detailed_loss.keys():
print (f'{one_detailed_key} : {detailed_loss[one_detailed_key].cpu().data.numpy()}',
end=" ")
print("")
tqdm_loader.close()
# average loss
avg_loss = np.mean(total_train_loss)
avg_detailed_loss = {key: np.mean(ele) for key, ele in total_detailed_loss.items()}
return avg_loss, avg_detailed_loss
def feed_into_net(self, batch, mode):
assert mode in {'train', 'eval'}
# load into GPU
if self.use_cuda:
for key in batch:
if isinstance(batch[key], torch.Tensor):
batch[key] = batch[key].to(self.device_ids[0])
# process batch data
color = batch['color']
depth = batch['depth']
normal = batch['normal']
mask = batch['mask']
boundary = batch['boundary']
render_depth = batch['render_depth']
# extract feature and feed into model
feature = torch.cat([color, depth, normal, boundary, mask], dim=1)
if self.use_cuda and mode == 'train':
output_depth = torch.nn.parallel.data_parallel(
self.model, feature, device_ids=self.device_ids)
else:
# Avoid SN in eval mode crash
output_depth = self.model(feature)
# process output
output_mask = None
render_depth_mask = torch.ones_like(render_depth)
render_depth_mask[render_depth == 0] = 0
ori_output_depth = output_depth
output_depth = ori_output_depth * render_depth_mask
output = {'output_depth': output_depth,
'ori_output_depth': ori_output_depth}
return output
def calculate_loss(self, output, batch):
"""Calculate loss based on output and ground truth
return:
loss (torch.Tensor): the total loss calculated
detailed_loss ({'loss_name': loss}): detailed loss with loss name
"""
detailed_loss = {}
for loss_func_key, this_loss_func, weight in self.loss_funcs:
this_loss = this_loss_func(output, batch) * weight
detailed_loss[loss_func_key] = this_loss
loss = sum(detailed_loss.values())
return loss, detailed_loss
def update(self, loss):
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def init_log(self):
def _append_log_order_msg(log_msg, valid):
for (loss_func_key, _, _) in self.loss_funcs:
if valid:
loss_func_key = 'valid_' + loss_func_key
self._log_order.append(loss_func_key)
log_msg += ',%s' % loss_func_key
return log_msg
self._log_order = ['epoch', 'loss']
log_msg = 'epoch,loss'
if len(self.loss_funcs) > 1:
log_msg = _append_log_order_msg(log_msg, False)
self._log_order.append('valid_loss')
log_msg += ',valid_loss'
if len(self.loss_funcs) > 1 and self._config['validation']:
log_msg = _append_log_order_msg(log_msg, True)
self.save_log(log_msg)
def save_loss_to_log(self, print_msg=True):
log_msg = ''
for order_name in self._log_order:
if order_name == 'epoch':
log_msg += f'{self._epoch}'
continue
if 'valid_' in order_name and 'valid_' == order_name[:6]:
if order_name == 'valid_loss':
log_msg += ',%.3f' % self.val_loss
else:
key_name = order_name[6:]
log_msg += ',%.3f' % self.val_detailed_loss[key_name]
else:
if order_name == 'loss':
log_msg += ',%.3f' % self.train_loss
else:
log_msg += ',%.3f' % self.train_detailed_loss[order_name]
if print_msg:
print(','.join(self._log_order))
print(log_msg)
self.save_log(log_msg)
def change_model_state(self, state):
if state == 'train':
self.model.train()
elif state == 'eval':
self.model.eval()
| [
"numpy.mean",
"torch.ones_like",
"torch.nn.parallel.data_parallel",
"depth_completion.data.customed_collate_fn",
"os.path.isdir",
"os.mkdir",
"torch.no_grad",
"torch.cat",
"depth_completion.data.DepthDataset"
] | [((6279, 6304), 'numpy.mean', 'np.mean', (['total_valid_loss'], {}), '(total_valid_loss)\n', (6286, 6304), True, 'import numpy as np\n'), ((7833, 7858), 'numpy.mean', 'np.mean', (['total_train_loss'], {}), '(total_train_loss)\n', (7840, 7858), True, 'import numpy as np\n'), ((8583, 8639), 'torch.cat', 'torch.cat', (['[color, depth, normal, boundary, mask]'], {'dim': '(1)'}), '([color, depth, normal, boundary, mask], dim=1)\n', (8592, 8639), False, 'import torch\n'), ((9003, 9032), 'torch.ones_like', 'torch.ones_like', (['render_depth'], {}), '(render_depth)\n', (9018, 9032), False, 'import torch\n'), ((2485, 2551), 'depth_completion.data.DepthDataset', 'DepthDataset', (['dataset_name', "self._config['test_path']"], {'train': '(False)'}), "(dataset_name, self._config['test_path'], train=False)\n", (2497, 2551), False, 'from depth_completion.data import DepthDataset\n'), ((6339, 6351), 'numpy.mean', 'np.mean', (['ele'], {}), '(ele)\n', (6346, 6351), True, 'import numpy as np\n'), ((7893, 7905), 'numpy.mean', 'np.mean', (['ele'], {}), '(ele)\n', (7900, 7905), True, 'import numpy as np\n'), ((8713, 8798), 'torch.nn.parallel.data_parallel', 'torch.nn.parallel.data_parallel', (['self.model', 'feature'], {'device_ids': 'self.device_ids'}), '(self.model, feature, device_ids=self.device_ids\n )\n', (8744, 8798), False, 'import torch\n'), ((2901, 2938), 'os.path.isdir', 'os.path.isdir', (["self._config['output']"], {}), "(self._config['output'])\n", (2914, 2938), False, 'import os\n'), ((2956, 2988), 'os.mkdir', 'os.mkdir', (["self._config['output']"], {}), "(self._config['output'])\n", (2964, 2988), False, 'import os\n'), ((3147, 3213), 'depth_completion.data.DepthDataset', 'DepthDataset', (['dataset_name', "self._config['train_path']"], {'train': '(True)'}), "(dataset_name, self._config['train_path'], train=True)\n", (3159, 3213), False, 'from depth_completion.data import DepthDataset\n'), ((5257, 5272), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5270, 5272), False, 'import torch\n'), ((7446, 7471), 'numpy.mean', 'np.mean', (['total_train_loss'], {}), '(total_train_loss)\n', (7453, 7471), True, 'import numpy as np\n'), ((2811, 2844), 'depth_completion.data.customed_collate_fn', 'customed_collate_fn', (['dataset_name'], {}), '(dataset_name)\n', (2830, 2844), False, 'from depth_completion.data import customed_collate_fn\n'), ((3634, 3701), 'depth_completion.data.DepthDataset', 'DepthDataset', (['dataset_name', "self._config['valid_path']"], {'train': '(False)'}), "(dataset_name, self._config['valid_path'], train=False)\n", (3646, 3701), False, 'from depth_completion.data import DepthDataset\n'), ((3511, 3544), 'depth_completion.data.customed_collate_fn', 'customed_collate_fn', (['dataset_name'], {}), '(dataset_name)\n', (3530, 3544), False, 'from depth_completion.data import customed_collate_fn\n'), ((4005, 4038), 'depth_completion.data.customed_collate_fn', 'customed_collate_fn', (['dataset_name'], {}), '(dataset_name)\n', (4024, 4038), False, 'from depth_completion.data import customed_collate_fn\n')] |
import sys
sys.path.append('/Users/bryanwhiting/Dropbox/interviews/downstream/DataScienceInterview-Bryan/src')
import numpy as np
import plotnine as g
import pandas as pd
from bryan.mcmc import MCMC
# TODO: Placeholder for unit tests
# Testing the code (would do unit tests w/more time)
k = 26
n_fake_datapoints = 10
roas = {}
cost = {}
for i in range(0, k):
roas[i] = np.random.exponential(i * .1, n_fake_datapoints)
cost[i] = np.random.normal(i * 10, 10, n_fake_datapoints)
# g.qplot(y = np.random.exponential(100, 100))
# q.qplot(y = np.random.gamma(shape=20, scale=15))
r_mcmc = MCMC(data=roas, niter=500)
r_mcmc.fit()
r_mcmc.estimates_as_json()['theta']
#r_mcmc.plot_theta(5)
c_mcmc = MCMC(data=cost, niter=500)
c_mcmc.fit()
t = c_mcmc.estimates_as_json()
for i in [1, 2, 5, 10, 20]:
r_mcmc.plot_theta(i)
for i in [1, 10, 15, 20, 25]:
c_mcmc.plot_theta(i, burnin=False)
for i in [1, 10, 100, 400]:
c_mcmc.plot_day(i)
np.mean(t['tau2'])
c_mcmc.plot_tau2()
c_mcmc.plot_hours()
| [
"numpy.random.normal",
"numpy.mean",
"numpy.random.exponential",
"bryan.mcmc.MCMC",
"sys.path.append"
] | [((11, 120), 'sys.path.append', 'sys.path.append', (['"""/Users/bryanwhiting/Dropbox/interviews/downstream/DataScienceInterview-Bryan/src"""'], {}), "(\n '/Users/bryanwhiting/Dropbox/interviews/downstream/DataScienceInterview-Bryan/src'\n )\n", (26, 120), False, 'import sys\n'), ((597, 623), 'bryan.mcmc.MCMC', 'MCMC', ([], {'data': 'roas', 'niter': '(500)'}), '(data=roas, niter=500)\n', (601, 623), False, 'from bryan.mcmc import MCMC\n'), ((705, 731), 'bryan.mcmc.MCMC', 'MCMC', ([], {'data': 'cost', 'niter': '(500)'}), '(data=cost, niter=500)\n', (709, 731), False, 'from bryan.mcmc import MCMC\n'), ((953, 971), 'numpy.mean', 'np.mean', (["t['tau2']"], {}), "(t['tau2'])\n", (960, 971), True, 'import numpy as np\n'), ((377, 426), 'numpy.random.exponential', 'np.random.exponential', (['(i * 0.1)', 'n_fake_datapoints'], {}), '(i * 0.1, n_fake_datapoints)\n', (398, 426), True, 'import numpy as np\n'), ((440, 487), 'numpy.random.normal', 'np.random.normal', (['(i * 10)', '(10)', 'n_fake_datapoints'], {}), '(i * 10, 10, n_fake_datapoints)\n', (456, 487), True, 'import numpy as np\n')] |
import sys
sys.path.append("./MPC/ThermalModels")
sys.path.append("..")
import numpy as np
import utils
# TODO distinguish between actions and add different noise correspondingly.
class SimulationTstat:
def __init__(self, mpc_thermal_model, curr_temperature):
self.mpc_thermal_model = mpc_thermal_model
self.temperature = curr_temperature
self.time_step = 0
self.gaussian_distributions = {}
def set_gaussian_distributions(self, X, y):
"""Needs to be called before calling .temperature. Sets the guassian distribution for the noise
given by the data X, y.
:param X: the test_data. pd.df with timeseries data and columns "action",
"t_in", "t_out" and "zone_temperatures_*"
:param y: the expected labels for X. In order of X data.
"""
# no action
rmse, mean_error, std = self.mpc_thermal_model.thermal_model.score(X=X, y=y, scoreType=utils.NO_ACTION)
self.gaussian_distributions[utils.NO_ACTION] = [mean_error, std]
# heating
heating_rmse, heating_mean_error, heating_std = \
self.mpc_thermal_model.thermal_model.score(X=X, y=y, scoreType=utils.HEATING_ACTION)
self.gaussian_distributions[utils.HEATING_ACTION] = [heating_mean_error, heating_std]
# cooling
cooling_rmse, cooling_mean_error, cooling_std = \
self.mpc_thermal_model.thermal_model.score(X=X, y=y, scoreType=utils.COOLING_ACTION)
self.gaussian_distributions[utils.COOLING_ACTION] = [cooling_mean_error, cooling_std]
# two stage heating
two_heating_rmse, two_heating_mean_error, two_heating_std = \
self.mpc_thermal_model.thermal_model.score(X=X, y=y, scoreType=utils.TWO_STAGE_HEATING_ACTION)
self.gaussian_distributions[utils.TWO_STAGE_HEATING_ACTION] = [two_heating_mean_error, two_heating_std]
# two stage cooling
two_cooling_rmse, two_cooling_mean_error, two_cooling_std = \
self.mpc_thermal_model.thermal_model.score(X=X, y=y, scoreType=utils.TWO_STAGE_COOLING_ACTION)
self.gaussian_distributions[utils.TWO_STAGE_COOLING_ACTION] = [two_cooling_mean_error, two_cooling_std]
def next_temperature(self, debug=False):
"""Needs to be called before using the next temperature.
Predicts for the given action and adds noise as given by the guassian distribution from the error
of the thermal model. Also, updates the time_step by one so we know how often we have predicted.
NOTE: Assumes that mpc_thermal_model has the outside temperatures it used to predict in Advise and the last
action the Advise predicted as the optimal action.
:param debug: boolean, whether to return more infromation for debugging. such as returning the noise as well.
:return int, the current temperature."""
# inferring the last action from the mpc_thermal_model.
action = self.mpc_thermal_model.last_action
# Make sure we trained the gaussian
try:
gaussian_mean, gaussian_std = self.gaussian_distributions[action]
except AttributeError:
raise RuntimeError("You must train gaussian before predicting data!")
self.time_step += 1
# TODO fix the outside temperature. This is not quiet right since the dictinary need not be in order.
curr_outside_temperature = self.mpc_thermal_model.outside_temperature.values()[0]
next_temperature = self.mpc_thermal_model.predict(self.temperature, action,
outside_temperature=curr_outside_temperature, debug=False)[0]
noise = np.random.normal(gaussian_mean, gaussian_std)
self.temperature = next_temperature + noise
if debug:
return self.temperature, noise
else:
return self.temperature
| [
"numpy.random.normal",
"sys.path.append"
] | [((12, 50), 'sys.path.append', 'sys.path.append', (['"""./MPC/ThermalModels"""'], {}), "('./MPC/ThermalModels')\n", (27, 50), False, 'import sys\n'), ((51, 72), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (66, 72), False, 'import sys\n'), ((3689, 3734), 'numpy.random.normal', 'np.random.normal', (['gaussian_mean', 'gaussian_std'], {}), '(gaussian_mean, gaussian_std)\n', (3705, 3734), True, 'import numpy as np\n')] |
#./usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME> (1459333)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from os import path
import timeit
import numpy as np
from Pyfhel import PyCtxt, Pyfhel
from .util import createDir
class Encryption:
def __init__(self,
verbosity = False,
p_modulus = 1964769281, # Plaintext modulus. All operations are modulo p. (t)
coeff_modulus = 8192, # Coefficient modulus (n)
batching = True,# Set to true to enable batching
poly_base = 2, # Polynomial base (x)
security_level = 128, # Security level equivalent in AES. 128 or 192. (10 || 12 rounds)
intDigits = 64, # Truncated positions for integer part.
fracDigits = 32, # Truncated positions for fractional part.,
relin_keys_count = 2, # The number of relinKeys will be generated/restored
relin_bitcount = 16, # [1,60] bigger is faster but noiser
relin_size = 4, # |cxtx| = K+1 ==> size at least K-1
base_dir = "storage/contexts/",
preprocess_dir = "storage/layers/preprocessed/",
precision = 4
):
self.verbosity = verbosity
self.precision = precision
self.t = p_modulus
self.n = coeff_modulus
self.batching = batching
self.pbase = poly_base
self.security = security_level
self.idig = intDigits
self.fdig = fracDigits
self.relin_bits = relin_bitcount
self.relin_size = relin_size
self.py = Pyfhel()
#Required directories
self.preprocess_dir = preprocess_dir
self.base_dir = base_dir
self.ctxt_dir = base_dir + "ctx_" + str(p_modulus) + "_" + str(coeff_modulus)
self.enclayers_dir = self.ctxt_dir + "/layers/precision_" + str(precision)
self.keys_dir = self.ctxt_dir + "/keys"
createDir(self.enclayers_dir)
context = self.ctxt_dir + "/context.ctxt"
if path.exists(context):
if self.verbosity:
print("Restoring the crypto context...")
self.py.restoreContext(context)
else:
if self.verbosity:
print("Creating the crypto context...")
self.py.contextGen(
p_modulus, coeff_modulus,
batching,
poly_base,
security_level,
intDigits,
fracDigits
)
self.py.saveContext(context)
if path.exists(self.keys_dir):
if self.verbosity:
print("Restoring keys from local storage...")
self.py.restorepublicKey(self.keys_dir + "/public.key")
self.py.restoresecretKey(self.keys_dir + "/secret.key")
self.py.restorerelinKey(self.keys_dir + "/relin.keys")
else:
if self.verbosity:
print("Creating keys for this contest...")
createDir(self.keys_dir)
self.py.keyGen()
if self.verbosity:
print("Generating " + str(relin_keys_count) + " relinearization key(s)")
for i in range(relin_keys_count):
self.py.relinKeyGen(relin_bitcount, relin_size)
self.py.saverelinKey(self.keys_dir + "/relin.keys")
self.py.savepublicKey(self.keys_dir + "/public.key")
self.py.savesecretKey(self.keys_dir + "/secret.key")
if self.verbosity:
print("Created with success with the following parameters:")
self.context_info()
def context_info(self):
""" Print the local context information """
print("")
print("Context parameters")
print("============================")
print("Batch encoding: " + str(self.py.getflagBatch()))
print("Polynomial base: " + str(self.py.getbase()))
print("Frac digits: " + str(self.py.getfracDigits()))
print("Int digits: " + str(self.py.getintDigits()))
print("Plaintext coeff (m): " + str(self.py.getm()))
print("Slots fitting in a ctxt: " + str(self.py.getnSlots()))
print("Plaintext modulus (p): " + str(self.py.getp()))
print("Security level (AES): " + str(self.py.getsec()))
print("")
print("")
# =========================================================================
# CONVOLUTION LAYER
# -------------------------------------------------------------------------
# It is computed given the preprocessed input and the preprocessed
# weights and biases from the keras model.
# =========================================================================
def convolution(self, size, kernel, stride):
if self.verbosity:
print("Computing Convolution")
print("==================================")
conv_folder = self.enclayers_dir + "/conv"
pre_conv = conv_folder + "/pre"
out_conv = conv_folder + "/output"
if not path.exists(conv_folder):
createDir(conv_folder)
conv_w = self.preprocess_dir+"precision_"+ str(self.precision) + "/pre_0_conv2d_3.npy"
conv_b = self.preprocess_dir+"precision_"+ str(self.precision) + "/pre_bias_0_conv2d_3.npy"
if path.exists(pre_conv):
print("(Pre)processed before. You can found it in " +
pre_conv + " folder.")
elif not path.exists(conv_w):
print("Convolution weights need to be preprocessed before (with precision "+
str(self.precision)+ ").")
print("")
else:
createDir(pre_conv)
filters = np.load(conv_w)
start = timeit.default_timer()
fshape = filters.shape
f = filters.reshape((fshape[0]*fshape[1],fshape[2]))
conv_map = self.get_conv_map(size, kernel, stride)
if(conv_map.shape[0] != f.shape[0]):
raise Exception("Convolution map and filter shapes must match.")
if self.verbosity:
print("Convolution: output preprocessing...")
print("0%")
for x in range(f.shape[0]):
for y in range(f.shape[1]):
w_filter = self.get_map(f[x,y])
for k in range(conv_map.shape[1]):
enc_pixel = self.getEncryptedPixel(conv_map[x,k])
# computing |self.n| dot products at time
res = self.py.multiply_plain(enc_pixel, w_filter, True)
f_name = pre_conv + "/pixel" + str(conv_map[x,k]) + "_filter" + str(y)
res.save(f_name)
if self.verbosity:
perc = int(((x+1)/f.shape[0]) * 100)
print(str(perc)+"% (" + str(x+1) + "/" + str(f.shape[0]) + ")")
stop = timeit.default_timer()
if self.verbosity:
print("Convolution: output preprocessed in " + str(stop-start) + " s.")
if path.exists(out_conv):
print("Processed before. You can found it in " +
out_conv + " folder.")
print("")
elif not path.exists(conv_b):
print("Convolution biases need to be preprocessed before (with precision "+
str(self.precision)+ ").")
print("")
else:
createDir(out_conv)
biases = np.load(conv_b)
start = timeit.default_timer()
bshape = biases.shape
windows = self.get_conv_windows(size,kernel,stride)
wshape = windows.shape
if self.verbosity:
print("Convolution: output processing...")
print("0%")
for x in range(bshape[0]):
encoded_bias = self.get_map(biases[x])
for y in range(wshape[0]):
local_sum = None
for k in range(wshape[1]):
f_name = pre_conv + "/pixel" + str(windows[y,k]) + "_filter" + str(x)
p = PyCtxt()
p.load(f_name,'batch')
if(local_sum == None):
local_sum = p
else:
local_sum = self.py.add(local_sum, p)
local_sum = self.py.add_plain(local_sum, encoded_bias)
file_name = out_conv + "/" + str(y) + "_filter" + str(x)
local_sum.save(file_name)
if self.verbosity:
perc = int(((x+1)/bshape[0]) * 100)
print(str(perc)+"% (" + str(x+1) + "/" + str(bshape[0]) + ")")
stop = timeit.default_timer()
if self.verbosity:
print("Convolution: output processed in " + str(stop-start) + " s.")
print("")
return out_conv
def _get_conv_window_(self, size, kernel):
""" Get the indices relative to the first convolutional window. """
res = []
x = 0
for i in range(kernel):
x = size * i
for j in range(kernel):
res.append(x+j)
return res
def _get_conv_indexes_(self, pixel, size, kernel, stride, padding = 0):
""" Slide the given index in the flatten volume returning all the indexes
to which the same convolution filter must be applied. """
res = []
output_size = int(((size - kernel + (2 * padding))/stride)) + 1
x = pixel
for i in range(output_size):
x = pixel + ((size * stride) * i)
for j in range(output_size):
res.append(x)
x += stride
return res
def get_conv_map(self, size, kernel, stride):
""" Return the convolutional map of the input volume (given its width)
according to the element index in its flatten version. """
window = self._get_conv_window_(size,kernel)
conv_map = []
for i in range(window.__len__()):
conv_map.append(self._get_conv_indexes_(window[i], size, kernel, stride))
return np.array(conv_map)
def get_conv_windows(self, size, kernel, stride):
cm = self.get_conv_map(size,kernel,stride)
windows = []
for i in range(cm.shape[1]):
w = []
for j in range(cm.shape[0]):
w.append(cm[j,i])
windows.append(w)
return np.array(windows)
def get_map(self, el):
el = [el] * self.n
return self._encode_arr_(el)
def get_enc_map(self, el):
el = [el] * self.n
return self._enc_arr_(el)
# =========================================================================
# FIRST DENSE LAYER
# -------------------------------------------------------------------------
# It is computed given the output files from the convolution layer and the
# preprocessed weights (filters) and biases from the model
# =========================================================================
def dense1(self, input_shape):
if self.verbosity:
print("Computing First Dense (square)")
print("==================================")
dense_folder = self.enclayers_dir + "/dense1"
out_folder = dense_folder + "/output"
conv_folder = self.enclayers_dir + "/conv"
out_conv = conv_folder + "/output"
wfile = "storage/layers/preprocessed/precision_"+ str(self.precision) + "/pre_2_dense_9.npy"
bfile = "storage/layers/preprocessed/precision_"+ str(self.precision) + "/pre_bias_2_dense_9.npy"
if not path.exists(dense_folder):
createDir(dense_folder)
if path.exists(out_folder):
print("Processed before. You can found it in " +
out_folder + " folder.")
print("")
elif not path.exists(wfile) or not path.exists(bfile):
raise Exception("First dense layer weights and biases need to be preprocessed before (with precision "+
str(self.precision)+ ").")
elif not path.exists(out_conv):
raise Exception("Convolution output required. Please run Encryption.convolution(...) before.")
else:
createDir(out_folder)
w = np.load(wfile)
b = np.load(bfile)
start = timeit.default_timer()
per = input_shape[0] * input_shape[1]
filters = input_shape[2]
flat = per * filters
if flat != w.shape[0] :
raise Exception("Input shape " + str(input_shape) +
" is not compatible with preprocessed input " +
str(w.shape))
if w.shape[1] != b.shape[0]:
raise Exception("Preprocessed weights "+
str(w.shape) +" and biases "+ str(b.shape) +
"are incopatible.")
if self.verbosity:
print("First Dense: output processing...")
print("0%")
for x in range(w.shape[1]):
local_sum = None
for i in range(per):
for j in range(filters):
fname = out_conv + "/" + str(i) + "_filter" + str(j)
p = PyCtxt()
p.load(fname,'batch')
row = (i*filters + j)
encw = self.get_map(w[row][x])
el = self.py.multiply_plain(p, encw, True)
if(local_sum == None):
local_sum = el
else:
local_sum = self.py.add(local_sum, el)
enc_b = self.get_map(b[x])
ts = self.py.add_plain(local_sum, enc_b, True)
ts = self.py.square(ts)
out_name = out_folder + "/square_"+str(x)
ts.save(out_name)
if self.verbosity:
perc = int(((x+1)/w.shape[1]) * 100)
print(str(perc)+"% (" + str(x+1) + "/" + str(w.shape[1]) + ")")
stop = timeit.default_timer()
if self.verbosity:
print("First Dense: output processed in " + str(stop-start) + " s.")
print("")
# =========================================================================
# SECOND DENSE LAYER
# -------------------------------------------------------------------------
# It is computed given the output files from first dense layer and the
# weights (filters) and biases preprocessed from the model
# =========================================================================
def dense2(self):
if self.verbosity:
print("Computing Second Dense (square)")
print("==================================")
input_folder = self.enclayers_dir + "/dense1/output"
dense_folder = self.enclayers_dir + "/dense2"
out_folder = dense_folder + "/output"
wfile = "storage/layers/preprocessed/precision_"+ str(self.precision) + "/pre_3_dense_10.npy"
bfile = "storage/layers/preprocessed/precision_"+ str(self.precision) + "/pre_bias_3_dense_10.npy"
if not path.exists(dense_folder):
createDir(dense_folder)
if path.exists(out_folder):
print("Processed before. You can found it in " +
out_folder + " folder.")
print("")
elif not path.exists(wfile) or not path.exists(bfile):
raise Exception("Second dense layer weights and biases need to be preprocessed before (with precision "+
str(self.precision)+ ").")
elif not path.exists(input_folder):
raise Exception("First dense output required. Please run Encryption.dense1(...) before.")
else:
createDir(out_folder)
w = np.load(wfile)
b = np.load(bfile)
if w.shape[1] != b.shape[0]:
raise Exception("Preprocessed weights "+
str(w.shape) +" and biases "+ str(b.shape) +
"are incopatible.")
if self.verbosity:
print("Second Dense: output processing...")
print("0%")
start = timeit.default_timer()
for x in range(w.shape[1]):
local_sum = None
for i in range(w.shape[0]):
fname = input_folder + "/square_" + str(i)
p = PyCtxt()
p.load(fname,'batch')
encw = self.get_map(w[i][x])
el = self.py.multiply_plain(p, encw, True)
if(local_sum == None):
local_sum = el
else:
local_sum = self.py.add(local_sum, el)
enc_b = self.get_map(b[x])
ts = self.py.add_plain(local_sum, enc_b, True)
ts = self.py.square(ts)
out_name = out_folder + "/square_"+str(x)
ts.save(out_name)
if self.verbosity:
perc = int(((x+1)/w.shape[1]) * 100)
print(str(perc)+"% (" + str(x+1) + "/" + str(w.shape[1]) + ")")
stop = timeit.default_timer()
if self.verbosity:
print("Second Dense: output processed in " + str(stop-start) + " s.")
print("")
# =========================================================================
# FULLY CONNECTED LAYER
# -------------------------------------------------------------------------
# It is computed given the output files from the second dense layer and the
# weights (filters) and biases preprocessed from the model
# =========================================================================
def fully_connected(self):
if self.verbosity:
print("Computing Fully Connected")
print("==================================")
input_folder = self.enclayers_dir + "/dense2/output"
fc_folder = self.enclayers_dir + "/fullyconnected"
out_folder = fc_folder + "/output"
wfile = "storage/layers/preprocessed/precision_"+ str(self.precision) + "/pre_4_dense_11.npy"
bfile = "storage/layers/preprocessed/precision_"+ str(self.precision) + "/pre_bias_4_dense_11.npy"
if not path.exists(fc_folder):
createDir(fc_folder)
if path.exists(out_folder):
print("Processed before. You can found it in " +
out_folder + " folder.")
print("")
elif not path.exists(wfile) or not path.exists(bfile):
raise Exception("Fully connected layer weights and biases need to be preprocessed before (with precision "+
str(self.precision)+ ").")
elif not path.exists(input_folder):
raise Exception("Second dense output required. Please run Encryption.dense2(...) before.")
else:
createDir(out_folder)
w = np.load(wfile)
b = np.load(bfile)
if w.shape[1] != b.shape[0]:
raise Exception("Preprocessed weights "+
str(w.shape) +" and biases "+ str(b.shape) +
"are incopatible.")
if self.verbosity:
print("Fully Connected: output processing...")
print("0%")
start = timeit.default_timer()
for x in range(w.shape[1]):
local_sum = None
for i in range(w.shape[0]):
fname = input_folder + "/square_" + str(i)
p = PyCtxt()
p.load(fname,'batch')
encw = self.get_map(w[i][x])
el = self.py.multiply_plain(p, encw, True)
if(local_sum == None):
local_sum = el
else:
local_sum = self.py.add(local_sum, el)
enc_b = self.get_map(b[x])
ts = self.py.add_plain(local_sum, enc_b, True)
out_name = out_folder + "/fc_"+str(x)
ts.save(out_name)
if self.verbosity:
perc = int(((x+1)/w.shape[1]) * 100)
print(str(perc)+"% (" + str(x+1) + "/" + str(w.shape[1]) + ")")
stop = timeit.default_timer()
if self.verbosity:
print("Fully Connected: output processed in " + str(stop-start) + " s.")
print("")
def get_results(self, test_labels):
dense_folder = self.enclayers_dir + "/fullyconnected"
out_folder = dense_folder + "/output"
el = []
for i in range(test_labels.shape[1]):
file = out_folder + "/fc_"+str(i)
p = PyCtxt()
p.load(file,'batch')
ptxt = self.py.decrypt(p)
ptxt = self.py.decodeBatch(ptxt)
if(el.__len__() <= i):
el.append([])
for j in range(ptxt.__len__()):
if(el.__len__() <= j):
el.append([ptxt[j]])
else:
el[j].append(ptxt[j])
return np.array(el)
def predict(self, test_labels):
if self.verbosity:
print("Computing Prediction")
print("==================================")
fc_folder = self.enclayers_dir + "/fullyconnected"
out_folder = fc_folder + "/output"
if not path.exists(out_folder):
raise Exception("You need to compute the fully connected layer before.")
print(test_labels[0])
# Only q predictions are done simultaneously
# for i in range(self.n)
el = []
start = timeit.default_timer()
for i in range(test_labels.shape[1]):
file = out_folder + "/fc_"+str(i)
p = PyCtxt()
p.load(file,'batch')
ptxt = self.py.decrypt(p)
ptxt = self.py.decodeBatch(ptxt)
ptxt = self.decode_tensor(ptxt, self.t, self.precision)
if(el.__len__() <= i):
el.append([])
for j in range(ptxt.__len__()):
if(el.__len__() <= j):
el.append([ptxt[j]])
else:
el[j].append(ptxt[j])
el = np.array(el)
print(el.shape)
print(el[0])
pos = 0
for i in range(el.shape[0]):
mp = np.argmax(el[i])
ml = np.argmax(test_labels[i])
if(mp == ml):
pos+=1
stop = timeit.default_timer()
print("Computation time: " + str(stop-start) + " s.")
print("Positive prediction: " + str(pos))
print("Negative prediction: " + str(self.n - pos))
acc = (pos/self.n) * 100
print("Model Accurancy:" + str(acc) + "%")
def _encode_(self, to_encode, t, precision):
""" Check encode for the given value:
Admitted intervals:
+ : [0, t/2]
- : [(t/2)+1, t] ==> [-((t/2)+1), 0]
Ex:
positive: [0,982384640] ==> [0,982384640] ==> [0, t/2]
negative: [-982384640, 0] ==> [982384641, 1964769281] ==> [(t/2)+1, t]
"""
precision = pow(10, precision)
val = round((to_encode * precision))
t2 = t/2
if val < 0:
minval = -(t2+1)
if val < minval:
raise Exception("The value to encode (" +
str(val) + ") is smaller than -((t/2)+1) = " +
str(minval))
else:
return (t + val)
else:
if val > t2:
raise Exception("The value to encode (" +
str(val) + ") is larger than t/2 = " + str(t2))
else:
return val
def _decode_(self, to_decode, t, precision):
""" Decode the value encoded with _encode_ """
t2 = t/2
if to_decode > t2:
return (to_decode-t) / pow(10, precision)
else:
return to_decode / pow(10, precision)
def decode_tensor(self, tensor, t, precision):
ret = []
for i in range(tensor.__len__()):
ret.append(self._decode_(tensor[i], t, precision))
return np.array(ret)
def encrypt_input(self, get_result = False):
""" Encrypt the input layer generating one file per
encrypted pixel index """
pre_input_file = self.preprocess_dir + "precision_" + str(self.precision) + "/pre_input.npy"
if not path.exists(pre_input_file):
raise Exception("Preprocessed input not found in " + pre_input_file +
" please run Encryption.preprocess before.")
input_folder = self.enclayers_dir + "/input"
if path.exists(input_folder):
print("Input layer encrypted before. You can found it in: " + input_folder)
if not get_result:
return None
createDir(input_folder)
pre_input = np.load(self.preprocess_dir + "precision_" +
str(self.precision) + "/pre_input.npy")
if self.verbosity:
print("")
print("Encrypting (preprocessed) input layer with shape " +
str(pre_input.shape)+"...")
input_dim, dim, dim1 = pre_input.shape
pre_flat = pre_input.flatten()
arr = []
pixel_arr_dim = dim*dim1
for x in range(pre_flat.__len__()):
if x < pixel_arr_dim:
arr.append([pre_flat[x]])
else:
arr[(x % pixel_arr_dim)].append(pre_flat[x])
arr = np.array(arr)
enc = []
for i in range(arr.shape[0]):
fname = input_folder+'/pixel_'+ str(i) + ".pyctxt"
enc.append(self._enc_arr_(arr[i], fname))
if self.verbosity:
print("Input layer encrypted with success in " +
str(enc.__len__()) + " files (one per pixel)")
return np.array(enc)
def getEncryptedPixel(self, index):
pixel_file = self.enclayers_dir + "/input/pixel_" + str(index) + ".pyctxt"
p = PyCtxt()
p.load(pixel_file,'batch')
return p
def _encode_arr_(self, arr):
if not self.py.getflagBatch() :
raise Exception("You need to initialize Batch for this context.")
res = []
for x in range(self.n):
res.append(arr[x])
res = np.array(res)
encoded = self.py.encodeBatch(res)
return encoded
def _enc_arr_(self, arr, file_name = None):
if not self.py.getflagBatch() :
raise Exception("You need to initialize Batch for this context.")
if file_name != None:
if path.exists(file_name):
ct = PyCtxt()
ct.load(file_name,'batch')
return ct
res = []
for x in range(self.n):
res.append(arr[x])
res = np.array(res)
encoded = self.py.encodeBatch(res)
encrypted = self.py.encryptPtxt(encoded)
if file_name != None:
encrypted.save(file_name)
return encrypted
def preprocess(self,
model, test_set):
""" Start the preprocessing of the NN input and weights """
self._pre_process_input_(model, test_set,)
self._pre_process_layers_(model)
def _pre_process_input_(self,model, test_set):
""" Preprocess (encode) the input and save it in laysers/pre_input file """
input_size, input_dim, input_dim1, el_index = test_set.shape
if(input_size < self.n):
raise Exception("Too small input set. It must be at least " +
str(self.n) + " len. " + str(input_size) + "given")
base_dir = self.preprocess_dir + "precision_" + str(self.precision) + "/"
createDir(base_dir, False)
if path.exists(base_dir + 'pre_input.npy'):
print("")
print("Input layer encoded before. You can found it in " +
base_dir + 'pre_input.npy')
print("")
else:
encoded_input = np.empty(
shape=(input_size,input_dim,input_dim),
dtype=np.uint64)
if self.verbosity :
print("")
print("Processing input...")
print("=====================================================")
print("Input shape: " + str(test_set.shape))
print("Precision: " + str(self.precision))
print("")
for i in range(input_size):
for x in range(input_dim):
for y in range(input_dim1):
encoded_input[i,x,y] = self._encode_(
test_set[i,x,y,0].item(),
self.t,
self.precision)
if self.verbosity :
print("Saving preprocessed input...")
print("Input shape: " + str(encoded_input.shape))
np.save("./"+base_dir +"pre_input", encoded_input)
if self.verbosity :
print("Preprocessed input saved.")
encoded_input = None
def _pre_process_layers_(self, model):
""" Preprocess (encode) NN weights and biases """
base_dir = self.preprocess_dir + "precision_" + str(self.precision) + "/"
createDir(base_dir, False)
for i in range(model.layers.__len__()):
self._pre_process_layer_(model.layers[i], i)
def _pre_process_layer_(self, layer, index = 0):
base_dir = self.preprocess_dir + "precision_" + str(self.precision) + "/"
if self.verbosity :
print("")
print("Processing the " + str(index) + "_" + str(layer.name) +
" layer...")
print("=========================================================")
if(path.exists(base_dir +"pre_"+ str(index) + "_" +
str(layer.name) + ".npy")):
print("Layer prepocessed before. You can found it in " +
base_dir +" folder.")
else:
if(layer.get_weights().__len__() > 0):
weights = layer.get_weights()[0]
biases = layer.get_weights()[1]
#encoding layer weights and biases
encoded_weights = None
encoded_biases = np.empty(shape=biases.shape, dtype=np.uint64)
if self.verbosity :
print("Weights tensor shape: " + str(weights.shape))
print("Biases tensor shape: " + str(weights.shape))
print("")
#The convolutional layer
if(weights.shape == (3,3,1,5)):
encoded_weights = np.empty(shape=(3,3,5),
dtype=np.uint64)
for i in range(3):
for x in range(3):
for y in range(5):
encoded_weights[i, x, y] = self._encode_(
weights[i, x, 0, y].item(),
self.t,
self.precision)
else:
encoded_weights = np.empty(shape=weights.shape,
dtype=np.uint64)
for i in range(weights.shape[0]):
for x in range(weights.shape[1]):
encoded_weights[i,x] = self._encode_(
weights[i, x].item(),
self.t,
self.precision)
if self.verbosity:
print("1/3) Weights encoded with success.")
for i in range(biases.shape[0]):
encoded_biases[i] = self._encode_(
biases[i].item(),
self.t,
self.precision)
if self.verbosity:
print("2/3) Biases encoded with success.")
np.save('./' + base_dir +'pre_' + str(index) + "_" +
str(layer.name), encoded_weights)
np.save('./'+ base_dir + 'pre_bias_' + str(index) + "_" +
str(layer.name), encoded_biases)
if self.verbosity:
print("3/3) Layer " + str(layer.name)+"_"+str(index)+" weights and biases saved.")
print("")
print("Layer precomputation ends with success.")
print("")
encoded_weights = None
encoded_biases = None
else:
print("[ERR] This layer is not pre processable.")
print("")
| [
"os.path.exists",
"timeit.default_timer",
"numpy.argmax",
"Pyfhel.Pyfhel",
"numpy.array",
"numpy.empty",
"numpy.load",
"Pyfhel.PyCtxt",
"numpy.save"
] | [((1640, 1648), 'Pyfhel.Pyfhel', 'Pyfhel', ([], {}), '()\n', (1646, 1648), False, 'from Pyfhel import PyCtxt, Pyfhel\n'), ((2128, 2148), 'os.path.exists', 'path.exists', (['context'], {}), '(context)\n', (2139, 2148), False, 'from os import path\n'), ((2700, 2726), 'os.path.exists', 'path.exists', (['self.keys_dir'], {}), '(self.keys_dir)\n', (2711, 2726), False, 'from os import path\n'), ((5620, 5641), 'os.path.exists', 'path.exists', (['pre_conv'], {}), '(pre_conv)\n', (5631, 5641), False, 'from os import path\n'), ((7541, 7562), 'os.path.exists', 'path.exists', (['out_conv'], {}), '(out_conv)\n', (7552, 7562), False, 'from os import path\n'), ((10955, 10973), 'numpy.array', 'np.array', (['conv_map'], {}), '(conv_map)\n', (10963, 10973), True, 'import numpy as np\n'), ((11290, 11307), 'numpy.array', 'np.array', (['windows'], {}), '(windows)\n', (11298, 11307), True, 'import numpy as np\n'), ((12676, 12699), 'os.path.exists', 'path.exists', (['out_folder'], {}), '(out_folder)\n', (12687, 12699), False, 'from os import path\n'), ((16756, 16779), 'os.path.exists', 'path.exists', (['out_folder'], {}), '(out_folder)\n', (16767, 16779), False, 'from os import path\n'), ((20317, 20340), 'os.path.exists', 'path.exists', (['out_folder'], {}), '(out_folder)\n', (20328, 20340), False, 'from os import path\n'), ((23465, 23477), 'numpy.array', 'np.array', (['el'], {}), '(el)\n', (23473, 23477), True, 'import numpy as np\n'), ((24085, 24107), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (24105, 24107), False, 'import timeit\n'), ((24762, 24774), 'numpy.array', 'np.array', (['el'], {}), '(el)\n', (24770, 24774), True, 'import numpy as np\n'), ((25046, 25068), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (25066, 25068), False, 'import timeit\n'), ((26894, 26907), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (26902, 26907), True, 'import numpy as np\n'), ((27467, 27492), 'os.path.exists', 'path.exists', (['input_folder'], {}), '(input_folder)\n', (27478, 27492), False, 'from os import path\n'), ((28388, 28401), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (28396, 28401), True, 'import numpy as np\n'), ((28783, 28796), 'numpy.array', 'np.array', (['enc'], {}), '(enc)\n', (28791, 28796), True, 'import numpy as np\n'), ((28941, 28949), 'Pyfhel.PyCtxt', 'PyCtxt', ([], {}), '()\n', (28947, 28949), False, 'from Pyfhel import PyCtxt, Pyfhel\n'), ((29283, 29296), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (29291, 29296), True, 'import numpy as np\n'), ((29822, 29835), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (29830, 29835), True, 'import numpy as np\n'), ((30876, 30915), 'os.path.exists', 'path.exists', (["(base_dir + 'pre_input.npy')"], {}), "(base_dir + 'pre_input.npy')\n", (30887, 30915), False, 'from os import path\n'), ((5335, 5359), 'os.path.exists', 'path.exists', (['conv_folder'], {}), '(conv_folder)\n', (5346, 5359), False, 'from os import path\n'), ((12593, 12618), 'os.path.exists', 'path.exists', (['dense_folder'], {}), '(dense_folder)\n', (12604, 12618), False, 'from os import path\n'), ((16673, 16698), 'os.path.exists', 'path.exists', (['dense_folder'], {}), '(dense_folder)\n', (16684, 16698), False, 'from os import path\n'), ((20240, 20262), 'os.path.exists', 'path.exists', (['fc_folder'], {}), '(fc_folder)\n', (20251, 20262), False, 'from os import path\n'), ((22995, 23003), 'Pyfhel.PyCtxt', 'PyCtxt', ([], {}), '()\n', (23001, 23003), False, 'from Pyfhel import PyCtxt, Pyfhel\n'), ((23800, 23823), 'os.path.exists', 'path.exists', (['out_folder'], {}), '(out_folder)\n', (23811, 23823), False, 'from os import path\n'), ((24226, 24234), 'Pyfhel.PyCtxt', 'PyCtxt', ([], {}), '()\n', (24232, 24234), False, 'from Pyfhel import PyCtxt, Pyfhel\n'), ((24901, 24917), 'numpy.argmax', 'np.argmax', (['el[i]'], {}), '(el[i])\n', (24910, 24917), True, 'import numpy as np\n'), ((24935, 24960), 'numpy.argmax', 'np.argmax', (['test_labels[i]'], {}), '(test_labels[i])\n', (24944, 24960), True, 'import numpy as np\n'), ((27200, 27227), 'os.path.exists', 'path.exists', (['pre_input_file'], {}), '(pre_input_file)\n', (27211, 27227), False, 'from os import path\n'), ((29587, 29609), 'os.path.exists', 'path.exists', (['file_name'], {}), '(file_name)\n', (29598, 29609), False, 'from os import path\n'), ((31147, 31214), 'numpy.empty', 'np.empty', ([], {'shape': '(input_size, input_dim, input_dim)', 'dtype': 'np.uint64'}), '(shape=(input_size, input_dim, input_dim), dtype=np.uint64)\n', (31155, 31214), True, 'import numpy as np\n'), ((32165, 32218), 'numpy.save', 'np.save', (["('./' + base_dir + 'pre_input')", 'encoded_input'], {}), "('./' + base_dir + 'pre_input', encoded_input)\n", (32172, 32218), True, 'import numpy as np\n'), ((5781, 5800), 'os.path.exists', 'path.exists', (['conv_w'], {}), '(conv_w)\n', (5792, 5800), False, 'from os import path\n'), ((6052, 6067), 'numpy.load', 'np.load', (['conv_w'], {}), '(conv_w)\n', (6059, 6067), True, 'import numpy as np\n'), ((6101, 6123), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (6121, 6123), False, 'import timeit\n'), ((7362, 7384), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (7382, 7384), False, 'import timeit\n'), ((7732, 7751), 'os.path.exists', 'path.exists', (['conv_b'], {}), '(conv_b)\n', (7743, 7751), False, 'from os import path\n'), ((8014, 8029), 'numpy.load', 'np.load', (['conv_b'], {}), '(conv_b)\n', (8021, 8029), True, 'import numpy as np\n'), ((8063, 8085), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (8083, 8085), False, 'import timeit\n'), ((9422, 9444), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (9442, 9444), False, 'import timeit\n'), ((29632, 29640), 'Pyfhel.PyCtxt', 'PyCtxt', ([], {}), '()\n', (29638, 29640), False, 'from Pyfhel import PyCtxt, Pyfhel\n'), ((33746, 33791), 'numpy.empty', 'np.empty', ([], {'shape': 'biases.shape', 'dtype': 'np.uint64'}), '(shape=biases.shape, dtype=np.uint64)\n', (33754, 33791), True, 'import numpy as np\n'), ((12871, 12889), 'os.path.exists', 'path.exists', (['wfile'], {}), '(wfile)\n', (12882, 12889), False, 'from os import path\n'), ((12897, 12915), 'os.path.exists', 'path.exists', (['bfile'], {}), '(bfile)\n', (12908, 12915), False, 'from os import path\n'), ((13121, 13142), 'os.path.exists', 'path.exists', (['out_conv'], {}), '(out_conv)\n', (13132, 13142), False, 'from os import path\n'), ((13354, 13368), 'numpy.load', 'np.load', (['wfile'], {}), '(wfile)\n', (13361, 13368), True, 'import numpy as np\n'), ((13385, 13399), 'numpy.load', 'np.load', (['bfile'], {}), '(bfile)\n', (13392, 13399), True, 'import numpy as np\n'), ((13433, 13455), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (13453, 13455), False, 'import timeit\n'), ((15454, 15476), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (15474, 15476), False, 'import timeit\n'), ((16951, 16969), 'os.path.exists', 'path.exists', (['wfile'], {}), '(wfile)\n', (16962, 16969), False, 'from os import path\n'), ((16977, 16995), 'os.path.exists', 'path.exists', (['bfile'], {}), '(bfile)\n', (16988, 16995), False, 'from os import path\n'), ((17202, 17227), 'os.path.exists', 'path.exists', (['input_folder'], {}), '(input_folder)\n', (17213, 17227), False, 'from os import path\n'), ((17434, 17448), 'numpy.load', 'np.load', (['wfile'], {}), '(wfile)\n', (17441, 17448), True, 'import numpy as np\n'), ((17465, 17479), 'numpy.load', 'np.load', (['bfile'], {}), '(bfile)\n', (17472, 17479), True, 'import numpy as np\n'), ((17891, 17913), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (17911, 17913), False, 'import timeit\n'), ((18976, 18998), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (18996, 18998), False, 'import timeit\n'), ((20512, 20530), 'os.path.exists', 'path.exists', (['wfile'], {}), '(wfile)\n', (20523, 20530), False, 'from os import path\n'), ((20538, 20556), 'os.path.exists', 'path.exists', (['bfile'], {}), '(bfile)\n', (20549, 20556), False, 'from os import path\n'), ((20766, 20791), 'os.path.exists', 'path.exists', (['input_folder'], {}), '(input_folder)\n', (20777, 20791), False, 'from os import path\n'), ((20999, 21013), 'numpy.load', 'np.load', (['wfile'], {}), '(wfile)\n', (21006, 21013), True, 'import numpy as np\n'), ((21030, 21044), 'numpy.load', 'np.load', (['bfile'], {}), '(bfile)\n', (21037, 21044), True, 'import numpy as np\n'), ((21459, 21481), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (21479, 21481), False, 'import timeit\n'), ((22500, 22522), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (22520, 22522), False, 'import timeit\n'), ((34189, 34231), 'numpy.empty', 'np.empty', ([], {'shape': '(3, 3, 5)', 'dtype': 'np.uint64'}), '(shape=(3, 3, 5), dtype=np.uint64)\n', (34197, 34231), True, 'import numpy as np\n'), ((34722, 34768), 'numpy.empty', 'np.empty', ([], {'shape': 'weights.shape', 'dtype': 'np.uint64'}), '(shape=weights.shape, dtype=np.uint64)\n', (34730, 34768), True, 'import numpy as np\n'), ((8731, 8739), 'Pyfhel.PyCtxt', 'PyCtxt', ([], {}), '()\n', (8737, 8739), False, 'from Pyfhel import PyCtxt, Pyfhel\n'), ((18133, 18141), 'Pyfhel.PyCtxt', 'PyCtxt', ([], {}), '()\n', (18139, 18141), False, 'from Pyfhel import PyCtxt, Pyfhel\n'), ((21701, 21709), 'Pyfhel.PyCtxt', 'PyCtxt', ([], {}), '()\n', (21707, 21709), False, 'from Pyfhel import PyCtxt, Pyfhel\n'), ((14498, 14506), 'Pyfhel.PyCtxt', 'PyCtxt', ([], {}), '()\n', (14504, 14506), False, 'from Pyfhel import PyCtxt, Pyfhel\n')] |
from Optimithon import Base
from Optimithon import QuasiNewton
from numpy import array, sin, pi
from scipy.optimize import minimize
fun = lambda x: sin(x[0] + x[1]) + (x[0] - x[1]) ** 2 - 1.5 * x[0] + 2.5 * x[1] + 1.
x0 = array((0., 0.))
print(fun(x0))
sol1 = minimize(fun, x0, method='COBYLA')
sol2 = minimize(fun, x0, method='SLSQP')
print("solution according to 'COBYLA':")
print(sol1)
print("solution according to 'SLSQP':")
print(sol2)
OPTIM = Base(fun,
method=QuasiNewton, x0=x0, # max_lngth=100.,
t_method='Cauchy_x', # 'Cauchy_x', 'ZeroGradient',
dd_method='BFGS',
# 'Newton', 'SR1', 'HestenesStiefel', 'PolakRibiere', 'FletcherReeves', 'Gradient', 'DFP', 'BFGS', 'Broyden', 'DaiYuan'
ls_method='Backtrack', # 'BarzilaiBorwein', 'Backtrack',
ls_bt_method='Armijo', # 'Armijo', 'Goldstein', 'Wolfe', 'BinarySearch'
)
OPTIM.Verbose = False
OPTIM.MaxIteration = 1500
OPTIM()
print("==========================" * 4)
print(OPTIM.solution)
| [
"numpy.sin",
"numpy.array",
"scipy.optimize.minimize",
"Optimithon.Base"
] | [((223, 240), 'numpy.array', 'array', (['(0.0, 0.0)'], {}), '((0.0, 0.0))\n', (228, 240), False, 'from numpy import array, sin, pi\n'), ((261, 295), 'scipy.optimize.minimize', 'minimize', (['fun', 'x0'], {'method': '"""COBYLA"""'}), "(fun, x0, method='COBYLA')\n", (269, 295), False, 'from scipy.optimize import minimize\n'), ((303, 336), 'scipy.optimize.minimize', 'minimize', (['fun', 'x0'], {'method': '"""SLSQP"""'}), "(fun, x0, method='SLSQP')\n", (311, 336), False, 'from scipy.optimize import minimize\n'), ((451, 576), 'Optimithon.Base', 'Base', (['fun'], {'method': 'QuasiNewton', 'x0': 'x0', 't_method': '"""Cauchy_x"""', 'dd_method': '"""BFGS"""', 'ls_method': '"""Backtrack"""', 'ls_bt_method': '"""Armijo"""'}), "(fun, method=QuasiNewton, x0=x0, t_method='Cauchy_x', dd_method='BFGS',\n ls_method='Backtrack', ls_bt_method='Armijo')\n", (455, 576), False, 'from Optimithon import Base\n'), ((149, 165), 'numpy.sin', 'sin', (['(x[0] + x[1])'], {}), '(x[0] + x[1])\n', (152, 165), False, 'from numpy import array, sin, pi\n')] |
from __future__ import division
from tkinter import Button, Label, Tk
import threading
import pyaudio
import numpy as np
from core.stream import Stream
from core.tone2frequency import tone2frequency
from data.key_midi_mapping import midi_key_mapping
class ThreadPlayer:
def __init__(self, **kwargs):
self.play = True
self.thread_kill = False
self.sample_rate = 44100
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=pyaudio.paFloat32, channels=1, rate=self.sample_rate, output=True)
self.samples = None
def create_sine_tone(self, frequency, duration):
self.samples = (np.sin(2 * np.pi * np.arange(self.sample_rate * duration) * frequency
/ self.sample_rate)).astype(np.float32)
def callback(self, in_data, frame_count, time_info, status):
return self.samples, pyaudio.paContinue
def start_sound(self, f):
self.p = pyaudio.PyAudio()
self.create_sine_tone(f, 1/f*10)
self.stream = self.p.open(format=pyaudio.paFloat32, channels=1, rate=self.sample_rate, output=True,
stream_callback=self.callback, frames_per_buffer=len(self.samples))
self.stream.start_stream()
# while True and not self.thread_kill:
# if self.play:
# try:
# self.stream.write(1. * self.samples, exception_on_underflow=False)
# except:
# print("thrown")
# continue
# self.stream.finish()
def stop_sound(self):
self.play = False
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
def start_thread(self, frequency):
self.play = True
self.start_sound(frequency)
# t1 = threading.Thread(target=self.start_sound, args=(frequency,))
# t1.start()
class MainWindow:
def __init__(self):
self.tone = 49 # A4
self.frequency = 440
self.p1 = ThreadPlayer()
self.p2 = ThreadPlayer()
def _frequency_increment(self):
self.tone += 1
self.frequency = tone2frequency(self.tone)
def _frequency_decrement(self):
self.tone -= 1
self.frequency = tone2frequency(self.tone)
def show(self):
window = Tk()
window.title("Piano reference")
window.geometry('350x200')
lbl_midi = Label(window, text="49")
lbl_key = Label(window, text="A4")
lbl_freq = Label(window, text="440.0")
lbl_midi.grid(column=2, row=1)
lbl_key.grid(column=2, row=2)
lbl_freq.grid(column=2, row=3)
def left_click(event):
self._frequency_decrement()
self.p1.stop_sound()
self.p2.stop_sound()
self.p1 = ThreadPlayer()
self.p2 = ThreadPlayer()
self.p1.start_thread(self.frequency)
lbl_midi.configure(text=self.tone)
lbl_key.configure(text=midi_key_mapping[self.tone])
lbl_freq.configure(text=self.frequency)
def right_click(event):
self._frequency_increment()
self.p1.stop_sound()
self.p2.stop_sound()
self.p1 = ThreadPlayer()
self.p2 = ThreadPlayer()
self.p2.start_thread(self.frequency)
lbl_midi.configure(text=self.tone)
lbl_key.configure(text=midi_key_mapping[self.tone])
lbl_freq.configure(text=self.frequency)
btn1 = Button(window, text="<<", command=lambda: left_click(None))
btn2 = Button(window, text=">>", command=lambda: right_click(None))
btn1.grid(column=0, row=0)
btn2.grid(column=1, row=0)
window.bind('<Left>', left_click)
window.bind('<Right>', right_click)
window.mainloop()
if __name__ == '__main__':
mw = MainWindow()
mw.show()
| [
"tkinter.Tk",
"tkinter.Label",
"pyaudio.PyAudio",
"core.tone2frequency.tone2frequency",
"numpy.arange"
] | [((417, 434), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (432, 434), False, 'import pyaudio\n'), ((953, 970), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (968, 970), False, 'import pyaudio\n'), ((2159, 2184), 'core.tone2frequency.tone2frequency', 'tone2frequency', (['self.tone'], {}), '(self.tone)\n', (2173, 2184), False, 'from core.tone2frequency import tone2frequency\n'), ((2270, 2295), 'core.tone2frequency.tone2frequency', 'tone2frequency', (['self.tone'], {}), '(self.tone)\n', (2284, 2295), False, 'from core.tone2frequency import tone2frequency\n'), ((2334, 2338), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (2336, 2338), False, 'from tkinter import Button, Label, Tk\n'), ((2434, 2458), 'tkinter.Label', 'Label', (['window'], {'text': '"""49"""'}), "(window, text='49')\n", (2439, 2458), False, 'from tkinter import Button, Label, Tk\n'), ((2477, 2501), 'tkinter.Label', 'Label', (['window'], {'text': '"""A4"""'}), "(window, text='A4')\n", (2482, 2501), False, 'from tkinter import Button, Label, Tk\n'), ((2521, 2548), 'tkinter.Label', 'Label', (['window'], {'text': '"""440.0"""'}), "(window, text='440.0')\n", (2526, 2548), False, 'from tkinter import Button, Label, Tk\n'), ((669, 707), 'numpy.arange', 'np.arange', (['(self.sample_rate * duration)'], {}), '(self.sample_rate * duration)\n', (678, 707), True, 'import numpy as np\n')] |
import os
import subprocess
import sys
import tarfile
import tempfile
from dataclasses import asdict
import numpy as np
import onnxruntime as ort
import tensorflow as tf
import yaml
from tvm.contrib.download import download
from arachne.data import ModelSpec, TensorSpec
from arachne.tools.openvino2tf import OpenVINO2TF, OpenVINO2TFConfig
from arachne.tools.openvino_mo import OpenVINOModelOptConfig, OpenVINOModelOptimizer
from arachne.utils.model_utils import init_from_file
from arachne.utils.tf_utils import make_tf_gpu_usage_growth
def check_openvino2tf_output(onnx_model_path, tf_model_path):
tf_loaded = tf.saved_model.load(tf_model_path)
resnet18_tf = tf_loaded.signatures["serving_default"] # type: ignore
input = np.random.rand(1, 3, 224, 224).astype(np.float32) # type: ignore
# onnx runtime
sess = ort.InferenceSession(onnx_model_path, providers=["CPUExecutionProvider"])
input_name = sess.get_inputs()[0].name
dout = sess.run(output_names=None, input_feed={input_name: input})[0]
# tf
tf_input = tf.convert_to_tensor(np.transpose(input, (0, 2, 3, 1)))
tf_result = resnet18_tf(tf_input)
aout = tf_result["tf.identity"].numpy()
np.testing.assert_allclose(aout, dout, atol=1e-5, rtol=1e-5) # type: ignore
def test_openvino2tf():
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
url = (
"https://arachne-public-pkgs.s3.ap-northeast-1.amazonaws.com/models/test/resnet18.onnx"
)
onnx_model_path = "resnet18.onnx"
download(url, onnx_model_path)
input_model = init_from_file(onnx_model_path)
m = OpenVINOModelOptimizer.run(input_model, OpenVINOModelOptConfig())
m = OpenVINO2TF.run(m, OpenVINO2TFConfig())
check_openvino2tf_output(onnx_model_path, m.path)
def test_cli():
# Due to the test time, we only test one case
make_tf_gpu_usage_growth()
with tempfile.TemporaryDirectory() as tmp_dir:
os.chdir(tmp_dir)
url = (
"https://arachne-public-pkgs.s3.ap-northeast-1.amazonaws.com/models/test/resnet18.onnx"
)
onnx_model_path = "resnet18.onnx"
download(url, onnx_model_path)
ret = subprocess.run(
[
sys.executable,
"-m",
"arachne.driver.cli",
"+tools=openvino_mo",
"model_file=resnet18.onnx",
"output_path=output.tar",
]
)
assert ret.returncode == 0
model_path = None
with tarfile.open("output.tar", "r:gz") as tar:
for m in tar.getmembers():
if m.name.endswith("_0"):
model_path = m.name
tar.extractall(".")
assert model_path is not None
spec = ModelSpec(
inputs=[TensorSpec(name="input0", shape=[1, 3, 224, 224], dtype="float32")],
outputs=[TensorSpec(name="output0", shape=[1, 1000], dtype="float32")],
)
with open("spec.yaml", "w") as file:
yaml.dump(asdict(spec), file)
ret2 = subprocess.run(
[
sys.executable,
"-m",
"arachne.driver.cli",
"+tools=openvino2tf",
f"model_dir={model_path}",
"model_spec_file=spec.yaml",
"output_path=output2.tar",
]
)
assert ret2.returncode == 0
with tarfile.open("output2.tar", "r:gz") as tar:
for m in tar.getmembers():
if m.name.endswith("saved_model"):
model_path = m.name
tar.extractall(".")
check_openvino2tf_output(onnx_model_path, model_path)
| [
"tensorflow.saved_model.load",
"tempfile.TemporaryDirectory",
"tarfile.open",
"numpy.random.rand",
"dataclasses.asdict",
"numpy.testing.assert_allclose",
"subprocess.run",
"onnxruntime.InferenceSession",
"arachne.data.TensorSpec",
"arachne.tools.openvino2tf.OpenVINO2TFConfig",
"os.chdir",
"ara... | [((620, 654), 'tensorflow.saved_model.load', 'tf.saved_model.load', (['tf_model_path'], {}), '(tf_model_path)\n', (639, 654), True, 'import tensorflow as tf\n'), ((839, 912), 'onnxruntime.InferenceSession', 'ort.InferenceSession', (['onnx_model_path'], {'providers': "['CPUExecutionProvider']"}), "(onnx_model_path, providers=['CPUExecutionProvider'])\n", (859, 912), True, 'import onnxruntime as ort\n'), ((1198, 1260), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['aout', 'dout'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(aout, dout, atol=1e-05, rtol=1e-05)\n', (1224, 1260), True, 'import numpy as np\n'), ((1904, 1930), 'arachne.utils.tf_utils.make_tf_gpu_usage_growth', 'make_tf_gpu_usage_growth', ([], {}), '()\n', (1928, 1930), False, 'from arachne.utils.tf_utils import make_tf_gpu_usage_growth\n'), ((1076, 1109), 'numpy.transpose', 'np.transpose', (['input', '(0, 2, 3, 1)'], {}), '(input, (0, 2, 3, 1))\n', (1088, 1109), True, 'import numpy as np\n'), ((1310, 1339), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1337, 1339), False, 'import tempfile\n'), ((1360, 1377), 'os.chdir', 'os.chdir', (['tmp_dir'], {}), '(tmp_dir)\n', (1368, 1377), False, 'import os\n'), ((1556, 1586), 'tvm.contrib.download.download', 'download', (['url', 'onnx_model_path'], {}), '(url, onnx_model_path)\n', (1564, 1586), False, 'from tvm.contrib.download import download\n'), ((1610, 1641), 'arachne.utils.model_utils.init_from_file', 'init_from_file', (['onnx_model_path'], {}), '(onnx_model_path)\n', (1624, 1641), False, 'from arachne.utils.model_utils import init_from_file\n'), ((1941, 1970), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1968, 1970), False, 'import tempfile\n'), ((1991, 2008), 'os.chdir', 'os.chdir', (['tmp_dir'], {}), '(tmp_dir)\n', (1999, 2008), False, 'import os\n'), ((2186, 2216), 'tvm.contrib.download.download', 'download', (['url', 'onnx_model_path'], {}), '(url, onnx_model_path)\n', (2194, 2216), False, 'from tvm.contrib.download import download\n'), ((2232, 2377), 'subprocess.run', 'subprocess.run', (["[sys.executable, '-m', 'arachne.driver.cli', '+tools=openvino_mo',\n 'model_file=resnet18.onnx', 'output_path=output.tar']"], {}), "([sys.executable, '-m', 'arachne.driver.cli',\n '+tools=openvino_mo', 'model_file=resnet18.onnx', 'output_path=output.tar']\n )\n", (2246, 2377), False, 'import subprocess\n'), ((3127, 3300), 'subprocess.run', 'subprocess.run', (["[sys.executable, '-m', 'arachne.driver.cli', '+tools=openvino2tf',\n f'model_dir={model_path}', 'model_spec_file=spec.yaml',\n 'output_path=output2.tar']"], {}), "([sys.executable, '-m', 'arachne.driver.cli',\n '+tools=openvino2tf', f'model_dir={model_path}',\n 'model_spec_file=spec.yaml', 'output_path=output2.tar'])\n", (3141, 3300), False, 'import subprocess\n'), ((742, 772), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (756, 772), True, 'import numpy as np\n'), ((1694, 1718), 'arachne.tools.openvino_mo.OpenVINOModelOptConfig', 'OpenVINOModelOptConfig', ([], {}), '()\n', (1716, 1718), False, 'from arachne.tools.openvino_mo import OpenVINOModelOptConfig, OpenVINOModelOptimizer\n'), ((1751, 1770), 'arachne.tools.openvino2tf.OpenVINO2TFConfig', 'OpenVINO2TFConfig', ([], {}), '()\n', (1768, 1770), False, 'from arachne.tools.openvino2tf import OpenVINO2TF, OpenVINO2TFConfig\n'), ((2578, 2612), 'tarfile.open', 'tarfile.open', (['"""output.tar"""', '"""r:gz"""'], {}), "('output.tar', 'r:gz')\n", (2590, 2612), False, 'import tarfile\n'), ((3493, 3528), 'tarfile.open', 'tarfile.open', (['"""output2.tar"""', '"""r:gz"""'], {}), "('output2.tar', 'r:gz')\n", (3505, 3528), False, 'import tarfile\n'), ((3091, 3103), 'dataclasses.asdict', 'asdict', (['spec'], {}), '(spec)\n', (3097, 3103), False, 'from dataclasses import asdict\n'), ((2860, 2926), 'arachne.data.TensorSpec', 'TensorSpec', ([], {'name': '"""input0"""', 'shape': '[1, 3, 224, 224]', 'dtype': '"""float32"""'}), "(name='input0', shape=[1, 3, 224, 224], dtype='float32')\n", (2870, 2926), False, 'from arachne.data import ModelSpec, TensorSpec\n'), ((2950, 3010), 'arachne.data.TensorSpec', 'TensorSpec', ([], {'name': '"""output0"""', 'shape': '[1, 1000]', 'dtype': '"""float32"""'}), "(name='output0', shape=[1, 1000], dtype='float32')\n", (2960, 3010), False, 'from arachne.data import ModelSpec, TensorSpec\n')] |
import numpy as np
import matplotlib.pyplot as plt
import time, psutil, sys, gc
useColab = False
if useColab:
#!pip3 install hdf5storage
from google.colab import drive
drive.mount('/content/gdrive')
import hdf5storage as hdf
def loadData(filename):
#Get data
return hdf.loadmat(filename)
def saveData(filename, trainXs, trainYs, testXs, testYs):
hdf.savemat(filename,
{'features_training': np.array(trainXs, dtype='float32'),
'labels_training': np.array(trainYs, dtype='int8'),
'features_test': np.array(testXs, dtype='float32'),
'labels_test': np.array(testYs, dtype='int8') },
do_compression=True, format='5')
#Print number of samples by classes
def samplesByClass(data, printClasses=False):
#Unique classes
if printClasses:
print(np.unique(data, return_counts=True, axis=0)[0])
#Number of samples of each unique class
print(np.unique(data, return_counts=True, axis=0)[1])
def createDataset(data, faults, trainSize=0.8, featuresName='X', labelsName='Y'):
print ("start: ", psutil.virtual_memory())
#Get data
X = data[featuresName]
Y = data[labelsName]
numClasses = len(faults)
print ("get X Y: ", psutil.virtual_memory())
#if NOT onehot encoded
#make sure Y has cols dim 1 if NOT onehot encoded
if len(Y.shape) == 1:
Y = Y.reshape(Y.shape[0], 1)
print ("Y reshape: ", psutil.virtual_memory())
'''
#Normalize
print('Normalize')
#X = minmax(X)
X = quantileTransform(X)
print ("Normalize: ", psutil.virtual_memory())
'''
'''
#Separate datasets equally by fault classes
sinalLength= int(X.shape[1])
samples = int(X.shape[0] / numClasses)
splitPoint = int(samples*trainSize)
#print(samples, splitPoint)
trainXs = []
trainYs = []
testXs = []
testYs = []
for i in range(numClasses):
print (i)
#slice fault
st = i*samples
sp = st + splitPoint
end = (i+1)*samples
#shuffle in place each fault data before slice train/set
p = np.random.permutation(samples)
X[st:end, :] = X[st+p, :]
Y[st:end, :] = Y[st+p, :]
#print(trainXs.shape, trainYs.shape, testXs.shape, testYs.shape)
#print(X[st:sp, :].shape, Y[st:sp, :].shape, X[sp:end, :].shape, Y[sp:end, :].shape)
trainXs.append(X[st:sp, :])
trainYs.append(Y[st:sp, :])
testXs.append(X[sp:end, :])
testYs.append(Y[sp:end, :])
print ("train: ", psutil.virtual_memory())
#Not needed anymore free memory
X = 0
Y = 0
del X
del Y
print ("free X Y: ", psutil.virtual_memory())
#Join list of arrays in just one
trainXs = np.concatenate(trainXs)
trainYs = np.concatenate(trainYs)
testXs = np.concatenate(testXs)
testYs = np.concatenate(testYs)
print ("concatenate: ", psutil.virtual_memory())
trainXs = np.copy(trainXs)
trainYs = np.copy(trainYs)
testXs = np.copy(testXs)
testYs = np.copy(testYs)
print ("copy: ", psutil.virtual_memory())
#Not needed anymore free memory
gc.collect()
time.sleep(10)
print ("gc: ", psutil.virtual_memory())
#Faults are ordered by classes
#this shuffle faults order
print('shuffle classes')
p = np.random.permutation(trainXs.shape[0])
print('shuffle classes')
trainXs = trainXs[p]
print('shuffle classes')
trainYs = trainYs[p]
p = np.random.permutation(testXs.shape[0])
print('shuffle classes')
testXs = testXs[p]
print('shuffle classes')
testYs = testYs[p]
print ("shuffle: ", psutil.virtual_memory())
return trainXs, trainYs, testXs, testYs
'''
#Main
#if __name__ == "main":
#C00_C01_C03_C08_C10_C15_C30_S50_L1_Ch_01_to_06
if useColab:
storagepath='gdrive/My Drive/Colab Notebooks/classifier/data/'
else:
storagepath='/home/hdaniel/Downloads/'
#define individual channels data
loadFNs = []
loadFNs.append(storagepath + 'raw_50000_003.mat')
loadFNs.append(storagepath + 'raw_50000_004.mat')
loadFNs.append(storagepath + 'raw_50000_005.mat')
featuresName = 'features'
labelsName = 'labels'
numFiles = len(loadFNs)
#output dataset
saveFN = storagepath + 'raw_3channels.mat'
save = True
#Config dataset generation
faults = [0, 1, 3, 8, 10, 15, 30]
numClasses = len(faults)
split = 0.8
#%whos
outputData = []
for i in range(numFiles):
filename = loadFNs[i]
print("Loading: ", filename, psutil.virtual_memory())
inData = loadData(filename)
if i == 0:
fshape = inData[featuresName].shape
lshape = inData[labelsName].shape
print("Data shape", fshape, lshape)
#p = np.random.permutation(---HOW to do it--- samples)
else:
if fshape != inData[featuresName].shape or lshape != inData[labelsName].shape:
raise Exception('channel data shape expected {} and {}, but got {} and {}'.format(
fshape, lshape, inData[featuresName].shape, inData[labelsName].shape))
#trainXs, trainYs, testXs, testYs = createDataset(inData, faults, split, featuresName, labelsName)
print("dataset created", psutil.virtual_memory())
samplesByClass(inData[labelsName], printClasses=False)
'''
trainXs, trainYs, testXs, testYs = shuffleData(data, numClasses, split, 1, featuresName, labelsName)
samplesByClass(trainYs, printClasses=False)
samplesByClass(testYs, printClasses=False)
'''
plt.plot(inData[featuresName][23,:])
#plt.plot(trainXs[23,:])
plt.show()
| [
"hdf5storage.loadmat",
"numpy.unique",
"google.colab.drive.mount",
"matplotlib.pyplot.plot",
"psutil.virtual_memory",
"numpy.array",
"matplotlib.pyplot.show"
] | [((5277, 5314), 'matplotlib.pyplot.plot', 'plt.plot', (['inData[featuresName][23, :]'], {}), '(inData[featuresName][23, :])\n', (5285, 5314), True, 'import matplotlib.pyplot as plt\n'), ((5339, 5349), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5347, 5349), True, 'import matplotlib.pyplot as plt\n'), ((182, 212), 'google.colab.drive.mount', 'drive.mount', (['"""/content/gdrive"""'], {}), "('/content/gdrive')\n", (193, 212), False, 'from google.colab import drive\n'), ((290, 311), 'hdf5storage.loadmat', 'hdf.loadmat', (['filename'], {}), '(filename)\n', (301, 311), True, 'import hdf5storage as hdf\n'), ((5001, 5024), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (5022, 5024), False, 'import time, psutil, sys, gc\n'), ((1134, 1157), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (1155, 1157), False, 'import time, psutil, sys, gc\n'), ((1279, 1302), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (1300, 1302), False, 'import time, psutil, sys, gc\n'), ((1479, 1502), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (1500, 1502), False, 'import time, psutil, sys, gc\n'), ((4312, 4335), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (4333, 4335), False, 'import time, psutil, sys, gc\n'), ((440, 474), 'numpy.array', 'np.array', (['trainXs'], {'dtype': '"""float32"""'}), "(trainXs, dtype='float32')\n", (448, 474), True, 'import numpy as np\n'), ((513, 544), 'numpy.array', 'np.array', (['trainYs'], {'dtype': '"""int8"""'}), "(trainYs, dtype='int8')\n", (521, 544), True, 'import numpy as np\n'), ((581, 614), 'numpy.array', 'np.array', (['testXs'], {'dtype': '"""float32"""'}), "(testXs, dtype='float32')\n", (589, 614), True, 'import numpy as np\n'), ((649, 679), 'numpy.array', 'np.array', (['testYs'], {'dtype': '"""int8"""'}), "(testYs, dtype='int8')\n", (657, 679), True, 'import numpy as np\n'), ((977, 1020), 'numpy.unique', 'np.unique', (['data'], {'return_counts': '(True)', 'axis': '(0)'}), '(data, return_counts=True, axis=0)\n', (986, 1020), True, 'import numpy as np\n'), ((875, 918), 'numpy.unique', 'np.unique', (['data'], {'return_counts': '(True)', 'axis': '(0)'}), '(data, return_counts=True, axis=0)\n', (884, 918), True, 'import numpy as np\n')] |
# MAIN
import simplex
import search
import tree
import marking
import numpy as np
from prettytable import PrettyTable
# Данные варианта 18
c = np.array( [7, 7, 6], float)
b = np.array( [8, 2, 6], float)
A = np.array( [ [2, 1, 1],
[1, 2, 0],
[0, 0.5, 4] ], float)
print ("ДАННЫЕ ВАРИАНТ №18")
print ("c =", c)
print ("b =", b)
print ("A =", A)
# Программа
# Шаг 0 (Создаем массивы обозначений X для графического вывода)
x_col, x_row = marking.fillMarks(c, b)
F = 0
# Шаг 1 (Получаем значения F и Х-ов)
step_1 = simplex.Simplex(c, b, A, x_col, x_row)
default_param = np.array(x_col)
default_param = np.append(default_param, x_row)
default_value = np.zeros(np.size(default_param))
for i in range(np.size(step_1.arr_row)):
for j in range(np.size(default_param)):
if step_1.arr_row[i] == default_param[j]:
default_value[j] = step_1.table[i][0]
F = abs(default_value[-1])
default_table = PrettyTable()
default_table.field_names = [item for item in default_param]
default_table.add_row([item for item in default_value])
print ("\nЗначения после первого шага\n")
print(default_table, "\n")
# Шаг 2 (Полный перебор системы ограничений)
search_value, search_F = search.bruteForce(c, b, A, F)
brute_param = np.array(x_col)
brute_param = np.append(brute_param, "F")
brute_value = np.array(search_value)
brute_value = np.append(brute_value, search_F)
brute_table = PrettyTable()
brute_table.field_names = [item for item in brute_param]
brute_table.add_row([item for item in brute_value])
print ("\nРешение задачи целочисленного ЛП\n")
print(brute_table, "\n")
# Шаг 3 (Ветвление)
step_branch = tree.Branch(c, b, A, x_col, x_row)
print ("\nРекорд метод Ветвей и границ\n")
branch_table = PrettyTable()
branch_table.field_names = [item for item in step_branch.record_marks]
branch_table.add_row([item for item in step_branch.record_value])
print(branch_table, "\n")
| [
"prettytable.PrettyTable",
"tree.Branch",
"marking.fillMarks",
"numpy.size",
"search.bruteForce",
"numpy.append",
"numpy.array",
"simplex.Simplex"
] | [((146, 172), 'numpy.array', 'np.array', (['[7, 7, 6]', 'float'], {}), '([7, 7, 6], float)\n', (154, 172), True, 'import numpy as np\n'), ((178, 204), 'numpy.array', 'np.array', (['[8, 2, 6]', 'float'], {}), '([8, 2, 6], float)\n', (186, 204), True, 'import numpy as np\n'), ((211, 263), 'numpy.array', 'np.array', (['[[2, 1, 1], [1, 2, 0], [0, 0.5, 4]]', 'float'], {}), '([[2, 1, 1], [1, 2, 0], [0, 0.5, 4]], float)\n', (219, 263), True, 'import numpy as np\n'), ((477, 500), 'marking.fillMarks', 'marking.fillMarks', (['c', 'b'], {}), '(c, b)\n', (494, 500), False, 'import marking\n'), ((556, 594), 'simplex.Simplex', 'simplex.Simplex', (['c', 'b', 'A', 'x_col', 'x_row'], {}), '(c, b, A, x_col, x_row)\n', (571, 594), False, 'import simplex\n'), ((612, 627), 'numpy.array', 'np.array', (['x_col'], {}), '(x_col)\n', (620, 627), True, 'import numpy as np\n'), ((644, 675), 'numpy.append', 'np.append', (['default_param', 'x_row'], {}), '(default_param, x_row)\n', (653, 675), True, 'import numpy as np\n'), ((938, 951), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (949, 951), False, 'from prettytable import PrettyTable\n'), ((1212, 1241), 'search.bruteForce', 'search.bruteForce', (['c', 'b', 'A', 'F'], {}), '(c, b, A, F)\n', (1229, 1241), False, 'import search\n'), ((1257, 1272), 'numpy.array', 'np.array', (['x_col'], {}), '(x_col)\n', (1265, 1272), True, 'import numpy as np\n'), ((1287, 1314), 'numpy.append', 'np.append', (['brute_param', '"""F"""'], {}), "(brute_param, 'F')\n", (1296, 1314), True, 'import numpy as np\n'), ((1329, 1351), 'numpy.array', 'np.array', (['search_value'], {}), '(search_value)\n', (1337, 1351), True, 'import numpy as np\n'), ((1366, 1398), 'numpy.append', 'np.append', (['brute_value', 'search_F'], {}), '(brute_value, search_F)\n', (1375, 1398), True, 'import numpy as np\n'), ((1414, 1427), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (1425, 1427), False, 'from prettytable import PrettyTable\n'), ((1647, 1681), 'tree.Branch', 'tree.Branch', (['c', 'b', 'A', 'x_col', 'x_row'], {}), '(c, b, A, x_col, x_row)\n', (1658, 1681), False, 'import tree\n'), ((1742, 1755), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (1753, 1755), False, 'from prettytable import PrettyTable\n'), ((701, 723), 'numpy.size', 'np.size', (['default_param'], {}), '(default_param)\n', (708, 723), True, 'import numpy as np\n'), ((741, 764), 'numpy.size', 'np.size', (['step_1.arr_row'], {}), '(step_1.arr_row)\n', (748, 764), True, 'import numpy as np\n'), ((783, 805), 'numpy.size', 'np.size', (['default_param'], {}), '(default_param)\n', (790, 805), True, 'import numpy as np\n')] |
import numpy as np
import random
import time
from sudoku.node import Node
class Sudoku():
def __init__(self, size=9, custom=None, verbose=False, debug=False):
# assume size is perfect square (TODO: assert square)
# size is defined as the length of one side
"""
Custom should be a list of lists containing each row of the sudoku.
Empty spots should be represented by a 0.
"""
self.verbose = verbose
self.debug = debug
self.size = size
self._tilesize = int(np.sqrt(size))
initstart = time.time()
self.nodes, self._rows, self._cols, self._tiles = self.initnodes()
self.connect_nodes()
after_init = time.time() - initstart
self.print(f'Node initialisation took {after_init}s')
if custom is not None:
startcustom = time.time()
self.fillgrid(custom)
self.print(f'Loading custom input took {time.time() - startcustom}s')
def get_all_rows(self):
return self._rows
def get_row(self, row):
return self._rows[row]
def get_col(self, col):
return self._cols[col]
def get_tile(self, tile):
return self._tiles[tile]
def initnodes(self):
nodes, rows, cols, tiles = [], [[] for _ in range(self.size)], [[] for _ in range(self.size)], [[] for _ in range(self.size)]
for row in range(self.size):
for col in range(self.size):
node = Node(row, col)
nodes.append(node)
rows[row].append(node)
cols[col].append(node)
# Tiles are for example the 3*3 squares in default sudoku
tilenr = self.calculate_tile(row, col)
tiles[tilenr].append(node)
return nodes, rows, cols, tiles
def calculate_tile(self, row, col):
tilerow = row // self._tilesize
tilecol = col // self._tilesize
return tilerow * self._tilesize + tilecol
def connect_nodes(self):
for node in self.nodes:
for connected_node in self.get_row(node.row) + self.get_col(node.col) + self.get_tile(self.calculate_tile(node.row, node.col)):
node.connected_nodes.add(connected_node)
node.connected_nodes -= set([node])
def fillgrid(self, custom):
try:
for i, row in enumerate(self._rows):
for j, node in enumerate(row):
if custom[i][j] != 0:
node.original = True
node.value = custom[i][j]
except IndexError:
raise IndexError("Custom sudoku layout was not of the right format!")
except Exception as e: # Other error, just raise
raise e
self.print("Custom input submitted and processed:")
self.print(self)
@property
def empty(self):
empty = 0
for node in self.nodes:
if node.value == 0:
empty += 1
self.print(f'{empty} empty values')
return empty
@property
def is_valid(self):
for node in self.nodes:
if not node.is_valid:
return False
return True
def print(self, msg):
if self.verbose:
print(msg)
def equals(self, other):
try:
for i, row in enumerate(self._rows):
for j, node in enumerate(row):
if not node.equals(other.get_row(i)[j]):
return False
except Exception:
return False
return True
def __eq__(self, other):
if not isinstance(other, Sudoku):
return False
return self.equals(other)
def __ne__(self, other):
if not isinstance(other, Sudoku):
return False
return not self.equals(other)
def copy(self):
"""
Returns new sudoku instance with new nodes containing the same values.
"""
custom_input = [[node.value for node in row] for row in self._rows]
self.print('Copying data into new Sudoku.')
newSudoku = Sudoku(size=self.size, custom=custom_input, verbose=self.verbose)
self.print('Verifying data of new Sudoku.')
# Check for original
for node in self.nodes:
for newnode in newSudoku.nodes:
if node.equals(newnode):
newnode.original = node.original
self.print('Data verified.\n')
return newSudoku
def get_options(self, node):
return list(set([i for i in range(1, self.size + 1)]) - node.get_neighbor_values())
def __str__(self):
result = ""
for row in self._rows:
result += str([node.value for node in row]) + '\n'
return result
def solve_smart(self, returnBranching=False, test_unique=False):
to_solve = self.copy()
# This needs to be an object to be easily modified in executeFill
unique = {'solved_once': False} # Used in testing uniqueness
def gather_best_node(sudoku):
"""
Searches nodes with least amount of options, selects one randomly
"""
best_nodes = []
current_min_options = sudoku.size
# Gather a list of nodes with the least
for node in sudoku.nodes:
if not node.value == 0:
continue
options = sudoku.get_options(node)
if len(options) < current_min_options:
# New best node found
best_nodes = [node]
current_min_options = len(options)
elif len(options) == current_min_options:
best_nodes.append(node)
return random.choice(best_nodes) if len(best_nodes) != 0 else None
def executeFill(depth=0):
if self.debug and depth % 50 == 0 and depth != 0:
to_solve.print(f'On rec depth {depth}')
to_solve.print(to_solve)
node = gather_best_node(to_solve)
if node is None:
return {'result': True, 'branchfactor': 1}
options = to_solve.get_options(node)
random.shuffle(options)
branch = 1 # for detetermining branch factor (difficulty)
for option in options:
node.value = option
results = executeFill(depth=depth + 1)
if results['result']:
if test_unique and unique['solved_once']:
# not unique, return as a valid response
return {'result': True}
elif test_unique and not unique['solved_once']:
# first solution found, keep searching
# while keeping track of solution found
unique['solved_once'] = True
continue
else:
if returnBranching:
branch = (branch - 1)**2
branch += results['branchfactor'] # keeping summation going
return {'result': True, 'branchfactor': branch}
branch += 1
# base case
node.value = 0
return {'result': False}
queue = [node for node in to_solve.nodes if not node.original]
if len(queue) == 0:
# The sudoku was already completely full, check if valid or not
if not to_solve.is_valid:
to_solve.print("Given solution is not valid!")
to_solve.print(to_solve)
return False
else:
to_solve.print("Success! Given solution was valid!")
to_solve.print(to_solve)
return True
to_solve.print('Trying to fill board...')
starttime = time.time()
executionResults = executeFill()
interval = time.time() - starttime
to_solve.calculation_time = interval * 1000 # Calc_time in ms
if (not executionResults['result']) or (not to_solve.is_valid):
if test_unique and unique['solved_once']:
return True
to_solve.print("Unable to fill board!")
raise Exception("Unable to fill board!")
else: # Successfully filled the board!
if test_unique:
return not unique['solved_once']
branchingFactor = executionResults.get('branchfactor', None)
to_solve.print("Filled board!")
to_solve.print(f"\nSolution:\n{to_solve}")
to_solve.print(f"Solution found in {interval}s")
if returnBranching:
return to_solve, branchingFactor
return to_solve
@property
def is_unique(self):
return self.solve_smart(test_unique=True)
def _reset_random_node(self):
random.choice(self.nodes).value = 0
return True
def make_puzzle(self, diff=500, retry=5):
if not self.is_valid:
# Self is assumed to be a filled grid
raise ValueError('Sudoku should be a filled grid in order to make a puzzle.')
puzzle = self.copy()
cur_diff = 0
tries = 0
while diff > cur_diff:
prev_diff = cur_diff
prev_puzzle = puzzle.copy()
puzzle._reset_random_node()
if not puzzle.is_unique:
# Puzzle was not unique anymore: if too many retries, return previous iteration
tries += 1
if tries > retry:
puzzle.print('Retried too much!')
return prev_puzzle, prev_diff
else:
puzzle, cur_diff = prev_puzzle, prev_diff
else:
tries = 0
cur_diff = puzzle.estimate_difficulty(iterations=50)
# Sometimes difficulty lowers, only take max diff
if (cur_diff < prev_diff):
puzzle, cur_diff = prev_puzzle, prev_diff
return puzzle, cur_diff
def _diff_from_branching(self, branching):
return branching * 100 + self.empty
def estimate_difficulty(self, iterations=20):
total = 0
for i in range(iterations):
total += self._diff_from_branching(self.solve_smart(returnBranching=True)[1])
return int(total / iterations)
| [
"random.choice",
"numpy.sqrt",
"random.shuffle",
"sudoku.node.Node",
"time.time"
] | [((596, 607), 'time.time', 'time.time', ([], {}), '()\n', (605, 607), False, 'import time\n'), ((8201, 8212), 'time.time', 'time.time', ([], {}), '()\n', (8210, 8212), False, 'import time\n'), ((560, 573), 'numpy.sqrt', 'np.sqrt', (['size'], {}), '(size)\n', (567, 573), True, 'import numpy as np\n'), ((736, 747), 'time.time', 'time.time', ([], {}), '()\n', (745, 747), False, 'import time\n'), ((882, 893), 'time.time', 'time.time', ([], {}), '()\n', (891, 893), False, 'import time\n'), ((6460, 6483), 'random.shuffle', 'random.shuffle', (['options'], {}), '(options)\n', (6474, 6483), False, 'import random\n'), ((8275, 8286), 'time.time', 'time.time', ([], {}), '()\n', (8284, 8286), False, 'import time\n'), ((9244, 9269), 'random.choice', 'random.choice', (['self.nodes'], {}), '(self.nodes)\n', (9257, 9269), False, 'import random\n'), ((1534, 1548), 'sudoku.node.Node', 'Node', (['row', 'col'], {}), '(row, col)\n', (1538, 1548), False, 'from sudoku.node import Node\n'), ((5991, 6016), 'random.choice', 'random.choice', (['best_nodes'], {}), '(best_nodes)\n', (6004, 6016), False, 'import random\n'), ((982, 993), 'time.time', 'time.time', ([], {}), '()\n', (991, 993), False, 'import time\n')] |
# neural network functions and classes
import numpy as np
import random
import json
import cma
from es import SimpleGA, CMAES, PEPG, OpenES
from env import make_env
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def relu(x):
return np.maximum(x, 0)
def passthru(x):
return x
# useful for discrete actions
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
# useful for discrete actions
def sample(p):
return np.argmax(np.random.multinomial(1, p))
"""
learning the model
"""
class RNNCell:
def __init__(self, input_size, weight, bias):
self.input_size=input_size
self.weight = weight
self.bias = bias
def __call__(self, x, h):
concat = np.concatenate((x, h), axis=1)
hidden = np.matmul(concat, self.weight)+self.bias
return np.tanh(hidden)
# LSTM in a few lines of numpy
class LSTMCell:
'''Numpy LSTM cell used for inference only.'''
def __init__(self, input_size, weight, bias, forget_bias=1.0):
self.input_size=input_size
self.W_full=weight # np.concatenate((Wxh, Whh), axis=0)
self.bias=bias
self.forget_bias=1.0
def __call__(self, x, h, c):
concat = np.concatenate((x, h), axis=1)
hidden = np.matmul(concat, self.W_full)+self.bias
i, g, f, o = np.split(hidden, 4, axis=1)
i = sigmoid(i)
g = np.tanh(g)
f = sigmoid(f+self.forget_bias)
o = sigmoid(o)
new_c = np.multiply(c, f) + np.multiply(g, i)
new_h = np.multiply(np.tanh(new_c), o)
return new_h, new_c
class RNNModel:
def __init__(self, game):
self.env_name = game.env_name
self.hidden_size = game.layers[0]
self.layer_1 = game.layers[1]
self.layer_2 = game.layers[2]
self.rnn_mode = True
self.input_size = game.input_size
self.output_size = game.output_size
self.render_mode = False
self.shapes = [ (self.input_size + self.hidden_size, 1*self.hidden_size), # RNN weights
(self.input_size + self.hidden_size, self.layer_1),# predict actions output
(self.layer_1, self.output_size)] # predict actions output
self.weight = []
self.bias = []
self.param_count = 0
idx = 0
for shape in self.shapes:
self.weight.append(np.zeros(shape=shape))
self.bias.append(np.zeros(shape=shape[1]))
self.param_count += (np.product(shape) + shape[1])
idx += 1
self.init_h = np.zeros((1, self.hidden_size))
self.h = self.init_h
self.param_count += 1*self.hidden_size
self.rnn = RNNCell(self.input_size, self.weight[0], self.bias[0])
def reset(self):
self.h = self.init_h
def make_env(self, seed=-1, render_mode=False):
self.render_mode = render_mode
self.env = make_env(self.env_name, seed=seed, render_mode=render_mode)
def get_action(self, real_obs):
obs = real_obs.reshape(1, 3)
# update rnn:
#update_obs = np.concatenate([obs, action], axis=1)
self.h = self.rnn(obs, self.h)
# get action
total_obs = np.concatenate([obs, self.h], axis=1)
# calculate action using 2 layer network from output
hidden = np.tanh(np.matmul(total_obs, self.weight[1]) + self.bias[1])
action = np.tanh(np.matmul(hidden, self.weight[2]) + self.bias[2])
return action[0]
def set_model_params(self, model_params):
pointer = 0
for i in range(len(self.shapes)):
w_shape = self.shapes[i]
b_shape = self.shapes[i][1]
s_w = np.product(w_shape)
s = s_w + b_shape
chunk = np.array(model_params[pointer:pointer+s])
self.weight[i] = chunk[:s_w].reshape(w_shape)
self.bias[i] = chunk[s_w:].reshape(b_shape)
pointer += s
# rnn states
s = self.hidden_size
self.init_h = model_params[pointer:pointer+s].reshape((1, self.hidden_size))
self.h = self.init_h
self.rnn = RNNCell(self.input_size, self.weight[0], self.bias[0])
def load_model(self, filename):
with open(filename) as f:
data = json.load(f)
print('loading file %s' % (filename))
self.data = data
model_params = np.array(data[0]) # assuming other stuff is in data
self.set_model_params(model_params)
def get_random_model_params(self, stdev=0.1):
return np.random.randn(self.param_count)*stdev
| [
"numpy.product",
"numpy.multiply",
"numpy.tanh",
"numpy.random.multinomial",
"numpy.exp",
"numpy.array",
"numpy.split",
"numpy.zeros",
"numpy.max",
"numpy.matmul",
"numpy.concatenate",
"json.load",
"env.make_env",
"numpy.maximum",
"numpy.random.randn"
] | [((236, 252), 'numpy.maximum', 'np.maximum', (['x', '(0)'], {}), '(x, 0)\n', (246, 252), True, 'import numpy as np\n'), ((455, 482), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'p'], {}), '(1, p)\n', (476, 482), True, 'import numpy as np\n'), ((694, 724), 'numpy.concatenate', 'np.concatenate', (['(x, h)'], {'axis': '(1)'}), '((x, h), axis=1)\n', (708, 724), True, 'import numpy as np\n'), ((790, 805), 'numpy.tanh', 'np.tanh', (['hidden'], {}), '(hidden)\n', (797, 805), True, 'import numpy as np\n'), ((1149, 1179), 'numpy.concatenate', 'np.concatenate', (['(x, h)'], {'axis': '(1)'}), '((x, h), axis=1)\n', (1163, 1179), True, 'import numpy as np\n'), ((1252, 1279), 'numpy.split', 'np.split', (['hidden', '(4)'], {'axis': '(1)'}), '(hidden, 4, axis=1)\n', (1260, 1279), True, 'import numpy as np\n'), ((1308, 1318), 'numpy.tanh', 'np.tanh', (['g'], {}), '(g)\n', (1315, 1318), True, 'import numpy as np\n'), ((2388, 2419), 'numpy.zeros', 'np.zeros', (['(1, self.hidden_size)'], {}), '((1, self.hidden_size))\n', (2396, 2419), True, 'import numpy as np\n'), ((2709, 2768), 'env.make_env', 'make_env', (['self.env_name'], {'seed': 'seed', 'render_mode': 'render_mode'}), '(self.env_name, seed=seed, render_mode=render_mode)\n', (2717, 2768), False, 'from env import make_env\n'), ((2981, 3018), 'numpy.concatenate', 'np.concatenate', (['[obs, self.h]'], {'axis': '(1)'}), '([obs, self.h], axis=1)\n', (2995, 3018), True, 'import numpy as np\n'), ((4036, 4053), 'numpy.array', 'np.array', (['data[0]'], {}), '(data[0])\n', (4044, 4053), True, 'import numpy as np\n'), ((201, 211), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (207, 211), True, 'import numpy as np\n'), ((348, 357), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (354, 357), True, 'import numpy as np\n'), ((738, 768), 'numpy.matmul', 'np.matmul', (['concat', 'self.weight'], {}), '(concat, self.weight)\n', (747, 768), True, 'import numpy as np\n'), ((1193, 1223), 'numpy.matmul', 'np.matmul', (['concat', 'self.W_full'], {}), '(concat, self.W_full)\n', (1202, 1223), True, 'import numpy as np\n'), ((1391, 1408), 'numpy.multiply', 'np.multiply', (['c', 'f'], {}), '(c, f)\n', (1402, 1408), True, 'import numpy as np\n'), ((1411, 1428), 'numpy.multiply', 'np.multiply', (['g', 'i'], {}), '(g, i)\n', (1422, 1428), True, 'import numpy as np\n'), ((1453, 1467), 'numpy.tanh', 'np.tanh', (['new_c'], {}), '(new_c)\n', (1460, 1467), True, 'import numpy as np\n'), ((3420, 3439), 'numpy.product', 'np.product', (['w_shape'], {}), '(w_shape)\n', (3430, 3439), True, 'import numpy as np\n'), ((3478, 3521), 'numpy.array', 'np.array', (['model_params[pointer:pointer + s]'], {}), '(model_params[pointer:pointer + s])\n', (3486, 3521), True, 'import numpy as np\n'), ((3941, 3953), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3950, 3953), False, 'import json\n'), ((4188, 4221), 'numpy.random.randn', 'np.random.randn', (['self.param_count'], {}), '(self.param_count)\n', (4203, 4221), True, 'import numpy as np\n'), ((2221, 2242), 'numpy.zeros', 'np.zeros', ([], {'shape': 'shape'}), '(shape=shape)\n', (2229, 2242), True, 'import numpy as np\n'), ((2267, 2291), 'numpy.zeros', 'np.zeros', ([], {'shape': 'shape[1]'}), '(shape=shape[1])\n', (2275, 2291), True, 'import numpy as np\n'), ((2320, 2337), 'numpy.product', 'np.product', (['shape'], {}), '(shape)\n', (2330, 2337), True, 'import numpy as np\n'), ((3098, 3134), 'numpy.matmul', 'np.matmul', (['total_obs', 'self.weight[1]'], {}), '(total_obs, self.weight[1])\n', (3107, 3134), True, 'import numpy as np\n'), ((3172, 3205), 'numpy.matmul', 'np.matmul', (['hidden', 'self.weight[2]'], {}), '(hidden, self.weight[2])\n', (3181, 3205), True, 'import numpy as np\n')] |
import copy
import numpy as np
class Objective():
pass
class MeanSquaredError():
def calc_acc(self,y_hat,y):
return 0
def calc_loss(self,y_hat,y):
loss = np.mean(np.sum(np.power(y_hat-y,2),axis=1))
return 0.5*loss
def backward(self,y_hat,y):
return y_hat-y
class MeanAbsoluteError(Objective):
# def __init__(self):
# super(MeanAbsoluteError, self).__init__('linear')
def calc_acc(self,y_hat,y):
return 0
def calc_loss(self, y_hat, y):
return np.mean(np.sum(np.absolute(y_hat - y), axis=1)).tolist()
def backward(self, y_hat, y):
pos=np.where((y_hat-y)<0)
mask=np.ones_like(y_hat)
mask[pos]=-1
return mask
class BinaryCrossEntropy(Objective):
# def __init__(self):
# super(BinaryCrossEntropy, self).__init__('sigmoid')
def calc_acc(self,y_hat,y):
y_pred = y_hat >= 0.5
acc = np.mean(y_pred == y).tolist()
return acc
def calc_loss(self,y_hat,y):
loss=-np.multiply(y,np.log(y_hat))-np.multiply(1-y,np.log(1-y_hat))
return np.mean(np.sum(loss,axis=1)).tolist()
def backward(self,y_hat,y):
avg = np.prod(np.asarray(y_hat.shape[:-1]))
return (np.divide(1-y,1-y_hat)-np.divide(y,y_hat))/avg
class SparseCategoricalCrossEntropy(Objective):
def calc_acc(self,y_hat,y):
acc = (np.argmax(y_hat, axis=-1) == np.argmax(y, axis=-1))
acc = np.mean(acc).tolist()
return acc
def calc_loss(self,y_hat,y):
avg=np.prod(np.asarray(y_hat.shape[:-1]))
loss=-np.sum(np.multiply(y,np.log(y_hat)))/avg
return loss.tolist()
def backward(self,y_hat,y_true):
avg = np.prod(np.asarray(y_hat.shape[:-1]))
return (y_hat-y_true)/avg
class CategoricalCrossEntropy(Objective):
def calc_acc(self,y_hat,y):
acc = (np.argmax(y_hat, axis=-1) == y)
acc = np.mean(acc).tolist()
return acc
def calc_loss(self,y_hat,y_true):
to_sum_dim=np.prod(y_hat.shape[:-1])
last_dim=y_hat.shape[-1]
N=y_hat.shape[0]
probs=y_hat.reshape(-1,last_dim)
y_flat=y_true.reshape(to_sum_dim)
loss = -np.sum(np.log(probs[np.arange(to_sum_dim), y_flat])) / N
return loss
# to_sum_shape=np.asarray(y_hat.shape[:-1])
# avg=np.prod(to_sum_shape)
# idx=[]
# for s in to_sum_shape:
# idx.append(np.arange(s).tolist())
# idx.append(y.flatten().tolist())
#
# loss=-np.sum(np.log(y_hat[idx]))/avg
# return loss.tolist()
def backward(self,y_hat,y_true):
# to_sum_shape = np.asarray(y_hat.shape[:-1])
# avg = np.prod(to_sum_shape)
# idx = []
# for s in to_sum_shape:
# idx.append(np.arange(s).tolist())
# idx.append(y_true.flatten().tolist())
#
# y_hat[idx]-=1
# return y_hat/avg
to_sum_dim=np.prod(y_hat.shape[:-1])
last_dim=y_hat.shape[-1]
N=y_hat.shape[0]
probs=y_hat.reshape(-1,last_dim)
y_flat = y_true.reshape(to_sum_dim)
probs[np.arange(to_sum_dim), y_flat] -= 1
probs/=N
output=probs.reshape(y_hat.shape)
return output
def get_objective(objective):
if objective.__class__.__name__=='str':
objective=objective.lower()
if objective in['categoricalcrossentropy','categorical_crossentropy','categorical_cross_entropy']:
return CategoricalCrossEntropy()
elif objective in['sparsecategoricalcrossentropy','sparse_categorical_crossentropy','sparse_categorical_cross_entropy']:
return SparseCategoricalCrossEntropy()
elif objective in ['binarycrossentropy','binary_cross_entropy','binary_crossentropy']:
return BinaryCrossEntropy()
elif objective in ['meansquarederror','mean_squared_error','mse']:
return MeanSquaredError()
elif objective in ['meanabsoluteerror','mean_absolute_error','mae']:
return MeanAbsoluteError()
elif isinstance(objective,Objective):
return copy.deepcopy(objective)
else:
raise ValueError('unknown objective type!')
| [
"numpy.ones_like",
"numpy.prod",
"numpy.mean",
"numpy.power",
"numpy.where",
"numpy.arange",
"numpy.absolute",
"numpy.log",
"numpy.asarray",
"numpy.argmax",
"numpy.sum",
"copy.deepcopy",
"numpy.divide"
] | [((687, 710), 'numpy.where', 'np.where', (['(y_hat - y < 0)'], {}), '(y_hat - y < 0)\n', (695, 710), True, 'import numpy as np\n'), ((723, 742), 'numpy.ones_like', 'np.ones_like', (['y_hat'], {}), '(y_hat)\n', (735, 742), True, 'import numpy as np\n'), ((2168, 2193), 'numpy.prod', 'np.prod', (['y_hat.shape[:-1]'], {}), '(y_hat.shape[:-1])\n', (2175, 2193), True, 'import numpy as np\n'), ((3140, 3165), 'numpy.prod', 'np.prod', (['y_hat.shape[:-1]'], {}), '(y_hat.shape[:-1])\n', (3147, 3165), True, 'import numpy as np\n'), ((1282, 1310), 'numpy.asarray', 'np.asarray', (['y_hat.shape[:-1]'], {}), '(y_hat.shape[:-1])\n', (1292, 1310), True, 'import numpy as np\n'), ((1486, 1511), 'numpy.argmax', 'np.argmax', (['y_hat'], {'axis': '(-1)'}), '(y_hat, axis=-1)\n', (1495, 1511), True, 'import numpy as np\n'), ((1515, 1536), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (1524, 1536), True, 'import numpy as np\n'), ((1656, 1684), 'numpy.asarray', 'np.asarray', (['y_hat.shape[:-1]'], {}), '(y_hat.shape[:-1])\n', (1666, 1684), True, 'import numpy as np\n'), ((1843, 1871), 'numpy.asarray', 'np.asarray', (['y_hat.shape[:-1]'], {}), '(y_hat.shape[:-1])\n', (1853, 1871), True, 'import numpy as np\n'), ((2014, 2039), 'numpy.argmax', 'np.argmax', (['y_hat'], {'axis': '(-1)'}), '(y_hat, axis=-1)\n', (2023, 2039), True, 'import numpy as np\n'), ((4344, 4368), 'copy.deepcopy', 'copy.deepcopy', (['objective'], {}), '(objective)\n', (4357, 4368), False, 'import copy\n'), ((223, 245), 'numpy.power', 'np.power', (['(y_hat - y)', '(2)'], {}), '(y_hat - y, 2)\n', (231, 245), True, 'import numpy as np\n'), ((1003, 1023), 'numpy.mean', 'np.mean', (['(y_pred == y)'], {}), '(y_pred == y)\n', (1010, 1023), True, 'import numpy as np\n'), ((1151, 1168), 'numpy.log', 'np.log', (['(1 - y_hat)'], {}), '(1 - y_hat)\n', (1157, 1168), True, 'import numpy as np\n'), ((1329, 1356), 'numpy.divide', 'np.divide', (['(1 - y)', '(1 - y_hat)'], {}), '(1 - y, 1 - y_hat)\n', (1338, 1356), True, 'import numpy as np\n'), ((1352, 1371), 'numpy.divide', 'np.divide', (['y', 'y_hat'], {}), '(y, y_hat)\n', (1361, 1371), True, 'import numpy as np\n'), ((1553, 1565), 'numpy.mean', 'np.mean', (['acc'], {}), '(acc)\n', (1560, 1565), True, 'import numpy as np\n'), ((2061, 2073), 'numpy.mean', 'np.mean', (['acc'], {}), '(acc)\n', (2068, 2073), True, 'import numpy as np\n'), ((3330, 3351), 'numpy.arange', 'np.arange', (['to_sum_dim'], {}), '(to_sum_dim)\n', (3339, 3351), True, 'import numpy as np\n'), ((1120, 1133), 'numpy.log', 'np.log', (['y_hat'], {}), '(y_hat)\n', (1126, 1133), True, 'import numpy as np\n'), ((1192, 1212), 'numpy.sum', 'np.sum', (['loss'], {'axis': '(1)'}), '(loss, axis=1)\n', (1198, 1212), True, 'import numpy as np\n'), ((593, 615), 'numpy.absolute', 'np.absolute', (['(y_hat - y)'], {}), '(y_hat - y)\n', (604, 615), True, 'import numpy as np\n'), ((1722, 1735), 'numpy.log', 'np.log', (['y_hat'], {}), '(y_hat)\n', (1728, 1735), True, 'import numpy as np\n'), ((2376, 2397), 'numpy.arange', 'np.arange', (['to_sum_dim'], {}), '(to_sum_dim)\n', (2385, 2397), True, 'import numpy as np\n')] |
from __future__ import print_function, absolute_import, division
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from IPython.core.pylabtools import figsize
figsize(12, 4)
import os
import sys
os.environ['THEANO_FLAGS'] = "device=cpu,optimizer=fast_run"
DATA_DIR = os.path.join('/res', 'data')
sys.path.append(os.path.join('/res', 'src'))
import numpy as np
X = np.linspace(-10, 10, 1000)
y = X
plt.figure()
plt.plot(X, y)
plt.show()
def sigmoid(x):
return 1 / (1 + np.exp(-x))
X = np.linspace(-10, 10, 1000)
y = sigmoid(X)
plt.figure()
plt.plot(X, y)
plt.show()
X = np.linspace(-12, 12, 1000)
y = np.tanh(X)
plt.figure()
plt.plot(X, y)
plt.show()
X = np.linspace(-12, 12, 1000)
y = [max(i, 0) for i in X]
plt.figure()
plt.plot(X, y)
plt.show()
X = np.linspace(-12, 12, 1000)
y = np.where(X > 0, X, 0.01 * X)
plt.figure()
plt.plot(X, y)
plt.show()
| [
"IPython.core.pylabtools.figsize",
"matplotlib.use",
"numpy.where",
"matplotlib.pyplot.plot",
"os.path.join",
"numpy.tanh",
"numpy.exp",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.show"
] | [((83, 104), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (97, 104), False, 'import matplotlib\n'), ((186, 200), 'IPython.core.pylabtools.figsize', 'figsize', (['(12)', '(4)'], {}), '(12, 4)\n', (193, 200), False, 'from IPython.core.pylabtools import figsize\n'), ((295, 323), 'os.path.join', 'os.path.join', (['"""/res"""', '"""data"""'], {}), "('/res', 'data')\n", (307, 323), False, 'import os\n'), ((394, 420), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(1000)'], {}), '(-10, 10, 1000)\n', (405, 420), True, 'import numpy as np\n'), ((427, 439), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (437, 439), True, 'from matplotlib import pyplot as plt\n'), ((440, 454), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'y'], {}), '(X, y)\n', (448, 454), True, 'from matplotlib import pyplot as plt\n'), ((455, 465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (463, 465), True, 'from matplotlib import pyplot as plt\n'), ((520, 546), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(1000)'], {}), '(-10, 10, 1000)\n', (531, 546), True, 'import numpy as np\n'), ((562, 574), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (572, 574), True, 'from matplotlib import pyplot as plt\n'), ((575, 589), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'y'], {}), '(X, y)\n', (583, 589), True, 'from matplotlib import pyplot as plt\n'), ((590, 600), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (598, 600), True, 'from matplotlib import pyplot as plt\n'), ((606, 632), 'numpy.linspace', 'np.linspace', (['(-12)', '(12)', '(1000)'], {}), '(-12, 12, 1000)\n', (617, 632), True, 'import numpy as np\n'), ((637, 647), 'numpy.tanh', 'np.tanh', (['X'], {}), '(X)\n', (644, 647), True, 'import numpy as np\n'), ((648, 660), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (658, 660), True, 'from matplotlib import pyplot as plt\n'), ((661, 675), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'y'], {}), '(X, y)\n', (669, 675), True, 'from matplotlib import pyplot as plt\n'), ((676, 686), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (684, 686), True, 'from matplotlib import pyplot as plt\n'), ((692, 718), 'numpy.linspace', 'np.linspace', (['(-12)', '(12)', '(1000)'], {}), '(-12, 12, 1000)\n', (703, 718), True, 'import numpy as np\n'), ((746, 758), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (756, 758), True, 'from matplotlib import pyplot as plt\n'), ((759, 773), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'y'], {}), '(X, y)\n', (767, 773), True, 'from matplotlib import pyplot as plt\n'), ((774, 784), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (782, 784), True, 'from matplotlib import pyplot as plt\n'), ((790, 816), 'numpy.linspace', 'np.linspace', (['(-12)', '(12)', '(1000)'], {}), '(-12, 12, 1000)\n', (801, 816), True, 'import numpy as np\n'), ((821, 849), 'numpy.where', 'np.where', (['(X > 0)', 'X', '(0.01 * X)'], {}), '(X > 0, X, 0.01 * X)\n', (829, 849), True, 'import numpy as np\n'), ((850, 862), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (860, 862), True, 'from matplotlib import pyplot as plt\n'), ((863, 877), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'y'], {}), '(X, y)\n', (871, 877), True, 'from matplotlib import pyplot as plt\n'), ((878, 888), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (886, 888), True, 'from matplotlib import pyplot as plt\n'), ((340, 367), 'os.path.join', 'os.path.join', (['"""/res"""', '"""src"""'], {}), "('/res', 'src')\n", (352, 367), False, 'import os\n'), ((503, 513), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (509, 513), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
#####
Tools
#####
*Created on Thu Jul 2 10:07:56 2015 by <NAME>*
A set of tools to use with the `RDKit <http://rdkit.org>`_ in the IPython notebook.
"""
import time
import sys
import base64
import os
import os.path as op
import random
import csv
import gzip
import math
import pickle
from copy import deepcopy
from itertools import product
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import Draw
import rdkit.Chem.Descriptors as Desc
# imports for similarity search
from rdkit.Chem.Fingerprints import FingerprintMols
from rdkit import DataStructs
from rdkit.SimDivFilters.rdSimDivPickers import MaxMinPicker
import rdkit.Chem.Scaffolds.MurckoScaffold as MurckoScaffold
try:
Draw.DrawingOptions.atomLabelFontFace = "DejaVu Sans"
Draw.DrawingOptions.atomLabelFontSize = 18
except KeyError: # Font "DejaVu Sans" is not available
pass
from PIL import Image, ImageChops
import numpy as np
from . import html_templates as html
try:
import ipywidgets as ipyw
WIDGETS = True
except ImportError:
WIDGETS = False
from IPython.core.display import HTML, display
if sys.version_info[0] > 2:
PY3 = True
from io import BytesIO as IO
else:
PY3 = False
from cStringIO import StringIO as IO
try:
from . import bokeh_tools as bkt
PLOT_TOOL = "bokeh"
except ImportError:
print(" * could not import Bokeh, plotting with Highcharts instead.")
PLOT_TOOL = "highcharts"
from . import hc_tools as hct
try:
from misc_tools import apl_tools as apt
AP_TOOLS = True
except ImportError:
AP_TOOLS = False
USE_FP = "morgan" # other options: "avalon", "default"
try:
# Try to import Avalon so it can be used for generation of 2d coordinates.
from rdkit.Avalon import pyAvalonTools as pyAv
USE_AVALON_2D = True
except ImportError:
print(" * Avalon not available. Using RDKit for 2d coordinate generation.")
USE_AVALON_2D = False
try:
from Contrib.SA_Score import sascorer
SASCORER = True
except ImportError:
print("* SA scorer not available. RDKit's Contrib dir needs to be in the Python import path...")
SASCORER = False
if AP_TOOLS:
#: Library version
VERSION = apt.get_commit(__file__)
# I use this to keep track of the library versions I use in my project notebooks
print("{:45s} (commit: {})".format(__name__, VERSION))
else:
print("{:45s} ({})".format(__name__, time.strftime("%y%m%d-%H:%M", time.localtime(op.getmtime(__file__)))))
if op.isfile("lib/jsme/jsme.nocache.js"):
JSME_LOCATION = "lib"
else:
print("- no local installation of JSME found, using web version.")
JSME_LOCATION = "http://peter-ertl.com/jsme/JSME_2017-02-26"
BGCOLOR = "#94CAEF"
IMG_GRID_SIZE = 235
# A list of partial property strings to use for ordering of properties:
DEFAULT_ORDER = ["_id", "supplier", "producer", "activity|pic50",
"hit", "actass", "pure_flag", "purity", "identity", "lcms"]
JSME_OPTIONS = {"css": ["css/style.css", "css/collapsible_list.css"],
"scripts": ["lib/jsme/jsme.nocache.js"]}
TBL_JAVASCRIPT = '''<script type="text/javascript">
function toggleCpd(cpdIdent)
{{
listPos = document.id_list{ts}.data.value.indexOf(cpdIdent);
cpdIdentCell = document.getElementById(cpdIdent+"_{ts}");
if (listPos == -1)
{{
if (document.id_list{ts}.remark.checked == true)
{{
rem = "\\t" + prompt("Remark (Enter for none):", "");
}}
else
{{
rem = "";
}}
document.id_list{ts}.data.value = document.id_list{ts}.data.value + cpdIdent + rem + "\\n";
cpdIdentCell.style.backgroundColor = "yellow";
}}
else
{{
removeStr = cpdIdent;
tempStr2 = document.id_list{ts}.data.value;
if (listPos > 0) {{
tempStr1 = tempStr2.substring(0, listPos);
tempStr2 = tempStr2.substring(listPos, tempStr2.length);
}} else {{
tempStr1 = "";
}}
listPos = tempStr2.indexOf("\\n");
if (listPos < tempStr2.length - 1) {{
tempStr1 = tempStr1 + tempStr2.substring(listPos+1, tempStr2.length)
}}
document.id_list{ts}.data.value = tempStr1;
cpdIdentCell.style.backgroundColor = "{bgcolor}";
}}
show_number_selected();
}}
function show_number_selected() {{
// display the number of selected compounds:
var count = (document.id_list{ts}.data.value.match(/\\n/g) || []).length;
document.getElementById("selection_title{ts}").innerHTML = "Selection (" + count + "):";
}}
function highlight_cpds() {{
// highlights compounds that were pasted into the selection list
// and keeps those that could be found
var lines = document.id_list{ts}.data.value.split("\\n");
var found = "";
for (var idx = 0; idx < lines.length; idx++) {{
var cpd = lines[idx];
var cpdIdentCell = document.getElementById(cpd+"_{ts}");
if (cpdIdentCell != null) {{
cpdIdentCell.style.backgroundColor = "yellow";
found = found + cpd + "\\n";
}}
}}
// set the value of the selection list to the found compound Ids
document.id_list{ts}.data.value = found;
show_number_selected();
}}
function myShowSelection() {{
document.location.hash = "#SelectionList";
}}
</script>
'''
ID_LIST = """<br><b><a name="SelectionList" id="selection_title{ts}">Selection (0):</a></b>
<form name="id_list{ts}">
<input type="checkbox" name="remark" value="prompt" > Prompt for Remarks<br>
<textarea name="data" cols="70" rows="10"></textarea>
<input type="button" name="highlight" value="highlight compounds" onclick="highlight_cpds()"
title="Paste a list of Compound_Ids here and press this button. The compounds will be highlighted in the report above. Compounds which were not found are removed from the list.">
</form>
"""
JSME_FORM = '''<script type="text/javascript" src="{jsme_loc}/jsme/jsme.nocache.js"></script>
<script type="text/javascript">
function jsmeOnLoad() {{
//arguments: HTML id, width, height (must be string not number!)
jsmeApplet{ts} = new JSApplet.JSME("appletContainer{ts}", "380px", "340px", {{
//optional parameters
"options" : "query,hydrogens"
}});
}}
function onSubmit() {{
var drawing = jsmeApplet{ts}.molFile();
// document.getElementById('jsme_smiles{ts}').value = drawing;
var command = '{var_name} = Chem.MolFromMolBlock("""' + drawing + '""")';
console.log("Executing Command: " + command);
var kernel = IPython.notebook.kernel;
kernel.execute(command);
}}
</script>
<table align="left" style="border: none;">
<tr style="border: none;">
<td id="appletContainer{ts}" style="border: none;"></td>
<td style="vertical-align: bottom; border: none;">
<button onclick="onSubmit()">done !</button>
</td>
</tr>
</table>
'''
class NoFieldTypes(Exception):
def __str__(self):
return repr("FieldTypeError: field types could not be extracted from Mol_List")
class Mol_List(list):
"""Enables display of molecule lists as HTML tables in IPython notebook just by-call
(via _repr_html)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.order = None
self.ia = False # wether the table and grid views are interactive or no
self.plot_tool = PLOT_TOOL
self.id_prop = None
self.recalc_needed = {}
self._set_recalc_needed()
def _pass_properties(self, new_list):
new_list.order = self.order
new_list.ia = self.ia
new_list.plot_tool = self.plot_tool
def __getitem__(self, item):
result = list.__getitem__(self, item)
try:
new_list = type(self)(result)
# pass on properties
self._pass_properties(new_list)
return new_list
except TypeError:
return result
def new(self, *args):
new_list = type(self)(*args)
# pass on properties
self._pass_properties(new_list)
return new_list
def _repr_html_(self):
id_prop = guess_id_prop(list_fields(self)) if self.ia else None
return mol_table(self, id_prop=id_prop, order=self.order)
def _set_recalc_needed(self):
"""Make sure that the expensive calculations are not done too often."""
self.len = len(self)
self.recalc_needed["plot_tool"] = PLOT_TOOL
keys = ["d", "fields", "field_types", "id_prop"]
for k in keys:
self.recalc_needed[k] = True
def _get_field_types(self):
"""Detect all the property field types.
Returns:
Dict with the property names as keys and the types as values."""
print(" > detecting field types...")
field_types = {}
if len(self) > 100:
sdf_sample = random.sample(self, len(self) // 2)
else:
sdf_sample = self
for mol in sdf_sample:
prop_names = mol.GetPropNames()
for prop in prop_names:
prop_type = "number"
prop_str = mol.GetProp(prop)
try:
float(prop_str)
if prop.lower().endswith("id"):
prop_type = "key"
except ValueError:
prop_type = "str"
if prop in field_types:
if field_types[prop] in ["number", "key"] and prop_type == "str":
# "str" overrides everything: if one string is among the values
# of a property, all values become type "str"
field_types[prop] = prop_type
else:
field_types[prop] = prop_type
if not field_types:
raise NoFieldTypes()
return field_types
def _calc_d(self):
self._d = {x: [] for x in self.fields}
self._d["mol"] = []
for mol in self:
if not mol: continue
if self.plot_tool == "bokeh":
img_tag = b64_img(mol)
else:
img_tag = '<img src="data:image/png;base64,{}" alt="Mol"/>'.format(b64_img(mol))
self._d["mol"].append(img_tag)
for prop in self.fields:
if mol.HasProp(prop):
self._d[prop].append(get_value(mol.GetProp(prop)))
else:
self._d[prop].append(np.nan)
def append(self, other):
self._set_recalc_needed()
super().append(other)
def extend(self, other):
self._set_recalc_needed()
super().extend(other)
def align(self, mol_or_smiles=None, in_place=True):
"""Align the Mol_list to the common substructure provided as Mol or Smiles.
Args:
mol_or_smiles (bool): The substructure to which to align.
If None, then the method uses rdFMCS to determine the MCSS
of the Mol_List."""
self.recalc_needed["d"] = True
if in_place:
align(self, mol_or_smiles)
else:
new_list = self.new()
for mol in self:
new_list.append(mol)
align(new_list, mol_or_smiles)
return new_list
def add_id(self, id_prop="molid"):
"""Add an Id property ``id_prop`` to the Mol_List.
By default, "molid" is used."""
for idx, mol in enumerate(self, 1): # start at index 1
mol.SetProp(id_prop, str(idx))
def write_sdf(self, fn, conf_id=-1):
"""Write Mol_List instance as SD File"""
writer = Chem.SDWriter(fn)
# try to save the column order
first_mol = True
for mol in self:
if first_mol:
order = None
try:
order = self.order
except AttributeError:
pass
if order:
mol.SetProp("order", ";".join(order))
try:
mol.GetConformer()
except ValueError: # no 2D coords... calculate them
mol.Compute2DCoords()
writer.write(mol, confId=conf_id)
# remove the order property again from mol_list
if first_mol:
first_mol = False
mol.ClearProp("order")
writer.close()
def write_csv(self, fn="mols.csv", props=None, include_smiles=True, isomeric=True):
"""Writes the Mol_List as a csv file to disk.
Parameters:
fn (str): Filename.
props (list[string]): An optional list of molecule properties to write.
If `props` is None, all props are written.
include_smiles (bool): If true, the Smiles will be calculated on the fly
and written to the csv.
isomeric (bool): If True, the generated Smiles will be isomeric."""
if props is None:
props = self.fields
if not isinstance(props, list):
props = [props]
csv_fields = props.copy()
if include_smiles:
csv_fields.append("Smiles")
with open(fn, "w") as f:
writer = csv.DictWriter(f, csv_fields, dialect="excel-tab")
writer.writeheader()
for mol in self:
row = {}
if include_smiles:
smi = Chem.MolToSmiles(mol, isomericSmiles=isomeric)
row["Smiles"] = smi
for prop in props:
if mol.HasProp(prop):
val = mol.GetProp(prop)
if val != "":
row[prop] = val
writer.writerow(row)
def sort_list(self, field, reverse=True):
"""Sort the Mol_List according to <field>."""
self.sort(key=lambda x: _key_get_prop(x, field, reverse=reverse), reverse=reverse)
def order_props(self, order="default"):
"""Arrange the display order of the properties."""
order_props(self, order)
def sample_random(self, size):
"""Return a random sample of size `size`."""
return self.new(random.sample(self, size))
def sample_diverse(self, size, fp=None):
"""Return a diverse sample of size `size`.
Fingerprint options: None (default), Morgan.
(see http://rdkit.blogspot.de/2014/08/picking-diverse-compounds-from-large.html)."""
return self.new(sample_diverse(self, size, fp))
def split(self, ratio=0.5):
"""Split the mol_list in two halves of the specified `ratio`.
Two Mol_Lists are returned"""
l1 = self.new()
l2 = self.new()
for mol in self:
mol_copy = deepcopy(mol)
if random.random() < ratio:
l1.append(mol_copy)
else:
l2.append(mol_copy)
return l1, l2
def mols_with_prop(self, prop):
"""Returns:
Am iterator of molecules in the list where mol and prop are defined."""
for mol in self:
if mol and mol.HasProp(prop):
yield mol
def prop_filter(self, query, invert=False, sorted=True, reverse=True, field_types=None,
make_copy=True, show=True):
"""Return a new Mol_List based on the property filtering.
By default it creates an independent copy of the mol objects.
With `show == True` (default), the resulting numbers of the search will be printed."""
result_list = self.new()
if self.order:
result_list.order = self.order.copy()
result_list.ia = self.ia
mol_counter_out = 0
if not field_types:
field_types = self.field_types
if not field_types:
print(" # no field type information available! -aborted.")
return None
field = None
for el in query.split(" "):
if el in field_types:
field = el
break
if not field:
print(" # field could not be extracted from query! -aborted.")
return None
print(" > field {} extracted from query: {}.".format(field, query))
query_mod = query.replace(field, "val")
for mol_counter_in, mol in enumerate(self):
if not mol:
continue
hit = False
if field in mol.GetPropNames():
val = mol.GetProp(field).lower()
if field_types[field] in ["number", "key"]:
try:
val_float = float(val)
except ValueError:
continue
val_int = int(val_float)
if val_int == val_float:
val = val_int
else:
val = val_float
hit = eval(query_mod)
if invert:
hit = not hit
if hit:
mol_counter_out += 1
if make_copy:
mol = deepcopy(mol)
result_list.append(mol)
if show:
print("> processed: {:7d} found: {:6d}".format(mol_counter_in + 1, mol_counter_out))
if sorted:
result_list.sort_list(field, reverse=reverse)
return result_list
def mol_filter(self, query, smarts=False, invert=False,
align=None, add_h=False, make_copy=True, show=True):
"""Returns a new Mol_List containing the substructure matches.
By default it creates an independent copy of the mol objects.
With `show == True` (default), the resulting numbers of the search will be printed."""
result_list = self.new()
if self.order:
result_list.order = self.order.copy()
result_list.ia = self.ia
mol_counter_out = 0
if isinstance(query, str):
if "[H]" in query or "#1" in query:
add_h = True
print("> explicit hydrogens turned on (add_h = True)")
if add_h or "#6" in query or "#7" in query:
smarts = True
if smarts:
query_mol = Chem.MolFromSmarts(query)
if align is None: # Aligning to mol generated from Smarts does not work
align = False
else:
query_mol = Chem.MolFromSmiles(query)
if align is None:
align = True
else:
query_mol = query
if align is None:
atm = query_mol.GetAtomWithIdx(1)
if atm.HasQuery(): # True for molecules that were generated from Smarts
align = False # Aligning to mol generated from Smarts does not work
else:
align = True
if not query_mol:
print("* ERROR: could not generate query molecule. Try smarts=True")
return None
for mol_counter_in, mol in enumerate(self):
if not mol: continue
hit = False
if add_h:
mol_with_h = Chem.AddHs(mol)
if mol_with_h.HasSubstructMatch(query_mol):
hit = True
else:
if mol.HasSubstructMatch(query_mol):
hit = True
if invert:
# reverse logic
hit = not hit
if hit:
mol_counter_out += 1
if make_copy:
mol = deepcopy(mol)
result_list.append(mol)
if align and len(result_list) > 0:
result_list.align(query_mol)
if show:
print("> processed: {:7d} found: {:6d}".format(mol_counter_in + 1, mol_counter_out))
return result_list
def has_prop_filter(self, prop, invert=False, make_copy=True, show=True):
"""Returns a new Mol_list with molecules containing the property `prop`.
By default it creates an independent copy of the mol objects.
With `show == True` (default), the resulting numbers of the search will be printed."""
result_list = self.new()
if self.order:
result_list.order = self.order.copy()
result_list.ia = self.ia
mol_counter_out = 0
for mol_counter_in, mol in enumerate(self):
if not mol: continue
hit = False
if mol.HasProp(prop):
hit = True
if invert:
hit = not hit
if hit:
mol_counter_out += 1
if make_copy:
mol = deepcopy(mol)
result_list.append(mol)
if show:
print("> processed: {:7d} found: {:6d}".format(mol_counter_in + 1, mol_counter_out))
return result_list
def get_ids(self):
"""Get the list of Compound IDs in the Mol_List
Parameters:
id_prop (None, str): (optional) The name of the id_prop, if None, it will be guessed."
Returns:
A list of compound ids"""
prop_list = self.fields
if self.id_prop is not None:
if self.id_prop not in prop_list:
raise LookupError("id_prop not found in data set.")
else: # try to guess an id_prop
self.id_prop = guess_id_prop(prop_list)
if self.id_prop is None:
raise LookupError("no id prop could be found in data set.")
id_list = []
for mol in self:
if mol:
if mol.HasProp(self.id_prop):
val = get_value(mol.GetProp(self.id_prop))
id_list.append(val)
return id_list
def new_list_from_ids(self, id_list, invert=False, make_copy=True):
"""Creates a new Mol_List out of the given IDs.
Parameters:
id_list (list): The list of IDs
id_prop (None, str): (optional) The name of the id_prop, if None, it will be guessed.
Returns:
A new Mol_List from a list of Ids.
By default it creates an independent copy of the mol objects."""
if not isinstance(id_list, list):
id_list = [id_list]
id_all = set(self.get_ids())
id_set = set(id_list)
if invert:
id_keep = id_all - id_set
else:
id_keep = id_set.intersection(id_all)
new_list = self.new()
if self.order:
new_list.order = self.order.copy()
new_list.ia = self.ia
for mol in self:
if mol:
if mol.HasProp(self.id_prop):
val = get_value(mol.GetProp(self.id_prop))
if val in id_keep:
if make_copy:
mol = deepcopy(mol)
new_list.append(mol)
return new_list
def show_cpd(self, id_no, is_cpd_id=True, make_copy=True, show_smiles=True):
"""Display a single compound together with its Smiles.
With is_cpd_id == True (default), the given id_no is interpreted as a Compound_Id.
Otherwise it is used as index in the list."""
new_list = self.new()
if self.order:
new_list.order = self.order.copy()
new_list.ia = self.ia
new_list.id_prop = self.id_prop
if not is_cpd_id:
idx = id_no
if make_copy:
mol = deepcopy(self[id_no])
else:
mol = self[id_no]
new_list.append(mol)
else:
if self.id_prop is None:
self.id_prop = guess_id_prop(self.fields)
if self.id_prop is None:
raise LookupError("Id property {} could not be found in the Mol_List.".format(self.id_prop))
for idx, mol in enumerate(self):
if mol:
if mol.HasProp(self.id_prop):
val = get_value(mol.GetProp(self.id_prop))
if val == id_no:
if make_copy:
mol = deepcopy(mol)
new_list.append(mol)
if len(new_list) == 0:
raise LookupError("no molecule with {}: {} could be found in the Mol_List.".format(self.id_prop, id_no))
if show_smiles:
print("idx: {:3d} Smiles: {}".format(idx, Chem.MolToSmiles(new_list[0])))
return new_list
def add_props_from_dictlist(self, dl, id_prop=None):
"""Add properties from a dictionary list to the Mol_List.
[{"Compound_Id": 123456, "Prop1": 1, "Prop2": 2}, {"Compound_Id: 123457, ...}]"""
if id_prop is None:
if self.id_prop is None:
self.id_prop = guess_id_prop(self)
else:
self.id_prop = id_prop
if self.id_prop is None:
raise LookupError("id_prop is required.")
# get a list of the Compound Ids
cpd_ids = [d[self.id_prop] for d in dl]
for mol in self:
cpd_id = get_value(mol.GetProp(self.id_prop))
if cpd_id in cpd_ids:
pos = cpd_ids.index(cpd_id)
props = dl[pos]
for prop in props:
if prop == self.id_prop: continue
mol.SetProp(prop, str(props[prop]))
def set_prop_on_mol(self, id_no, prop_name, prop_value, is_cpd_id=True):
"""Change the value of a property in the Mol_List.
prop_name (str) is the name of the property,
prop_value (str) the value to which it will be set (using mol.SetProp()).
With is_cpd_id == True (default), the given id_no is interpreted as a Compound_Id.
Otherwise it is used as index in the list."""
mol = self.show_cpd(id_no, is_cpd_id=is_cpd_id, make_copy=False, show_smiles=False)[0]
mol.SetProp(prop_name, prop_value)
def calc_props(self, props, force2d=False, **kwargs):
"""Calculate properties from the Mol_List.
props can be a single property or a list of properties.
Calculable properties:
2d, date, formula, smiles, hba, hbd, logp, molid, mw, rotb,
nha (number of heavy atoms),
sa (synthetic accessibility), tpsa, murcko (MurckoScaffold as Smiles).
sim (similarity of the Murcko scaffold relative to `sim_mol_or_smiles`
or the mol with `sim_id`).
smiles (isomeric=True/False)
Synthetic Accessibility (normalized):
0: hard to synthesize; 1: easy access
as described in:
| Estimation of Synthetic Accessibility Score of Drug-like Molecules based on Molecular Complexity and Fragment Contributions
| *<NAME> and <NAME>*
| Journal of Cheminformatics 1:8 (2009) (`link <http://www.jcheminf.com/content/1/1/8>`_)
"""
sim_mol_or_smiles = kwargs.get("sim_mol_or_smiles", None)
sim_id = kwargs.get("sim_id", None)
query_fp = None
if not isinstance(props, list):
props = [props]
# make all props lower-case:
props = list(map(lambda x: x.lower(), props))
if sim_id is not None: # sim_id represents a Compound_Id,
# which is then taken as the Similarity base
sim_mol_or_smiles = self.show_cpd(sim_id, is_cpd_id=True,
make_copy=True, show_smiles=False)[0]
if sim_mol_or_smiles is not None:
if isinstance(sim_mol_or_smiles, str):
sim_mol_or_smiles = Chem.MolFromSmiles(sim_mol_or_smiles)
# use pre-calculated fingerprints whenever possible
if sim_mol_or_smiles.HasProp("FP_b64"):
query_fp = pickle.loads(base64.b64decode(sim_mol_or_smiles.GetProp("FP_b64")))
else:
murcko_mol = MurckoScaffold.GetScaffoldForMol(sim_mol_or_smiles)
if USE_FP == "morgan":
query_fp = Desc.rdMolDescriptors.GetMorganFingerprintAsBitVect(murcko_mol, 2)
elif USE_FP == "avalon":
query_fp = pyAv.GetAvalonFP(murcko_mol, 1024)
else:
query_fp = FingerprintMols.FingerprintMol(murcko_mol)
ctr = 0
calculated_props = set()
for mol in self:
if not mol: continue
if "molid" in props:
ctr += 1
mol.SetProp("Mol_Id", str(ctr))
calculated_props.add("molid")
calc_props(mol, props, force2d=force2d, query_fp=query_fp,
calculated_props=calculated_props, **kwargs)
self._set_recalc_needed()
not_calculated = set(props) - calculated_props
if not_calculated:
print("* these props could not be calculated:", not_calculated)
def remove_props(self, props):
"""Remove properties from the Mol_List.
props can be a single property or a list of properties."""
for mol in self:
if mol:
remove_props_from_mol(mol, props)
self._set_recalc_needed()
def remove_empty_props(self):
remove_empty_props(self)
self._set_recalc_needed()
def keep_props(self, props):
"""Keep properties in the Mol_List.
props can be a single property or a list of properties."""
if not isinstance(props, list):
props = [props]
for mol in self:
if mol:
keep_props_in_mol(mol, props)
self.order = props.copy()
self._set_recalc_needed()
def keep_largest_fragment(self):
"""Removes salts, etc.
Returns a new Mol_List instance. The original properties are copied over."""
frag_counter = 0
new_list = self.new()
if self.order:
new_list.order = self.order.copy()
new_list.ia = self.ia
for mol in self:
mols = Chem.GetMolFrags(mol, asMols=True)
if len(mols) > 1:
frag_counter += 1
mols = sorted(mols, key=Desc.HeavyAtomCount, reverse=True)
new_mol = mols[0]
copy_mol_props(mol, new_mol)
else:
new_mol = deepcopy(mol)
new_list.append(new_mol)
print(" > small fragments were removed in {} molecules.".format(frag_counter))
return new_list
def copy_prop(self, prop_orig, prop_copy, move=False):
"""Copy or rename a property in the Mol_List."""
for mol in self.mols_with_prop(prop_orig):
val_orig = mol.GetProp(prop_orig)
mol.SetProp(prop_copy, val_orig)
if move:
mol.ClearProp(prop_orig)
self._set_recalc_needed()
def rename_prop(self, prop_orig, prop_new):
"""Convenience wrapper around copy_prop"""
self.copy_prop(prop_orig, prop_new, move=True)
def remove_dups_by_id(self, id_prop=None, make_copy=True):
"""Remove duplicate records by Compound Id.
Parameters:
id_prop (None, str): The name of the Id property, if *None*, it will be guessed.
Returns:
new Mol_list without the duplicate Ids.
By default it creates an independent copy of the mol objects."""
new_list = self.new()
if self.order:
new_list.order = self.order.copy()
new_list.ia = self.ia
id_list = []
if not id_prop:
id_prop = guess_id_prop(list_fields(self))
if not id_prop:
print("* could not determine Id property.")
return None
for mol in self:
if not mol: continue
mol_id = mol.GetProp(id_prop)
if mol_id in id_list: continue
id_list.append(mol_id)
if make_copy:
mol = deepcopy(mol)
new_list.append(mol)
return new_list
def remove_by_id(self, cpd_id, id_prop=None, make_copy=True):
"""Remove molecules records by Compound Id.
Parameters:
id_prop (None, str): The name of the Id property, if *None*, it will be guessed.
Returns:
new Mol_list without the duplicate Ids.
By default it creates an independent copy of the mol objects."""
if not isinstance(cpd_id, list):
cpd_id = [cpd_id]
new_list = self.new()
if self.order:
new_list.order = self.order.copy()
new_list.ia = self.ia
if not id_prop:
id_prop = guess_id_prop(list_fields(self))
if not id_prop:
print("* could not determine Id property.")
return None
for mol in self:
if not mol: continue
mol_id = get_value(mol.GetProp(id_prop))
if mol_id in cpd_id: continue
if make_copy:
mol = deepcopy(mol)
new_list.append(mol)
return new_list
def remove_dups_by_struct(self, make_copy=True):
"""Remove duplicates by structure. Duplicates are determined by Smiles.
Returns:
new Mol_List without the duplicate structures.
By default it creates an independent copy of the mol objects. """
new_list = self.new()
if self.order:
new_list.order = self.order.copy()
new_list.ia = self.ia
smiles_list = []
for mol in self:
if not mol: continue
smiles = Chem.MolToSmiles(mol, isomericSmiles=True) # needed to distinguish between stereoisomers
if smiles in smiles_list: continue
smiles_list.append(smiles)
if make_copy:
mol = deepcopy(mol)
new_list.append(mol)
return new_list
def enum_racemates(self, find_only=True):
"""returns: result_sdf::list<mol>, racemic_molids::list<int>
find_only==True: return new sdf as list which contains all the racemates of the input sdf.
find_only==False: return new sdf as list with ALL input structures, where the racemates are
replaced by their two enantiomers. The returned sdf is always
equal in size or larger as the input sdf.
Multiple stereo centers are not yet handled.
In the new sdf the molids are no longer unique and should be reassigned
(remove molid and run calc_props(sdf))."""
chirality = {"R": Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
"S": Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW}
prop_list = self.fields
if self.id_prop is not None:
if self.id_prop not in prop_list:
raise LookupError("id_prop not found in data set.")
else: # try to guess an id_prop
self.id_prop = guess_id_prop(prop_list)
result = self.new()
racemic_molids = []
for mol in self:
chiral_centers = Chem.FindMolChiralCenters(mol, includeUnassigned=True)
undefined_centers = [center[0] for center in chiral_centers if center[1] == "?"]
if undefined_centers:
racemic_molids.append(get_value(mol.GetProp(self.id_prop)))
if find_only:
result.append(mol)
continue
else:
num_stereocenters = len(undefined_centers)
stereocenters = product("RS", repeat=num_stereocenters)
for stereo in stereocenters:
new_mol = Chem.Mol(mol)
for idx, center in enumerate(undefined_centers):
new_mol.GetAtomWithIdx(center).SetChiralTag(chirality[stereo[idx]])
result.append(new_mol)
else:
if not find_only: # return ALL mols
result.append(mol)
return result, racemic_molids
def join_data_from_file(self, fn, id_prop=None, decimals=2):
"""Joins data from a file with name ``fn`` by Id property ``id_prop``. If no Id property is given, it will be guessed.
CAUTION: The records from the file are loaded into memory!
Parameters:
decimals (int): number of decimal places for floating point values."""
if not id_prop:
id_prop = guess_id_prop(self.field_types)
file_d = {}
for line in csv_supplier(fn):
rec_id = get_value(line.pop(id_prop))
file_d[rec_id] = line
for mol in self:
mol_id = get_prop_val(mol, id_prop)
if mol_id in file_d:
records = file_d[mol_id]
for rec in records:
val = get_value(records[rec])
if val is None: continue
if isinstance(val, float):
mol.SetProp(rec, "{val:.{decimals}f}".format(val=val, decimals=decimals))
else:
mol.SetProp(rec, str(val))
self._set_recalc_needed()
def set_default(self, prop, def_val, condition=None):
"""Set a default value in all mols, in which ``prop`` is either not defined (``condition`` == None) or
is evaluating ``condition`` to true."""
failed = 0
if condition and not isinstance(condition, str):
raise TypeError("condition needs to be of type str.")
for mol in self:
if not mol: continue
if not condition:
if not mol.HasProp(prop):
mol.SetProp(prop, str(def_val))
else:
if mol.HasProp(prop):
prop_val = get_value(mol.GetProp(prop))
if isinstance(prop_val, str):
eval_templ = """'{}' {}"""
else:
eval_templ = """{} {}"""
try:
if eval(eval_templ.format(prop_val, condition)):
mol.SetProp(prop, str(def_val))
except SyntaxError:
failed += 1
self.recalc_needed["d"] = True
self.recalc_needed["field_types"] = True
if failed > 0:
print("# {} records could not be processed.".format(failed))
def table(self, pagesize=25, highlight=None, show_hidden=False, img_dir=None, raw=False):
"""Return the Mol_List as HTML table.
Either as raw HTML (raw==True) or as HTML object for display in IPython notebook.
Parameters:
show_hidden (bool): Whether to show hidden properties (name starts with _) or not.
Default is False.
raw (bool): If True, return the HTML mol grid as text.
If False, return a HTML object, that can be displayed in the Jupyter Notebook.
Default is False.
img_dir (str or None): The directory, in which the molecule images are written. The directory has to exist.
Implies raw=True. If None, then the images are stored in the HTML object. Default is None."""
if self.id_prop is None:
self.id_prop = guess_id_prop(list_fields(self))
if img_dir is not None:
raw = True
if raw:
return mol_table(self, id_prop=self.id_prop, highlight=highlight, interact=self.ia,
order=self.order, img_dir=img_dir, show_hidden=show_hidden)
else:
return table_pager(self, pagesize=pagesize, id_prop=self.id_prop, interact=self.ia, highlight=highlight, order=self.order,
show_hidden=show_hidden)
def nested(self, pagesize=10, props=None, img_dir=None, raw=False):
if self.id_prop is None:
self.id_prop = guess_id_prop(list_fields(self))
if img_dir is not None:
raw = True
if raw:
return nested_table(self, id_prop=self.id_prop, props=props, order=self.order, img_dir=img_dir)
else:
return nested_pager(self, pagesize=pagesize, id_prop=self.id_prop, props=props, order=self.order)
def grid(self, pagesize=12, props=None, highlight=None, mols_per_row=4, size=IMG_GRID_SIZE, img_dir=None, raw=False):
"""Returns:
The Mol_List as HTML grid table. Either as raw HTML (raw==True) or as HTML object for display in IPython notebook.
Parameters:
props: A property or a list of properties to include in the display.
raw (bool): If True, return the HTML mol grid as text.
If False, return a HTML object, that can be displayed in the Jupyter Notebook.
Default is False.
img_dir (str or None): The directory, in which the molecule images are written. The directory has to exist.
Implies raw=True. If None, then the images are stored in the HTML object. Default is None."""
if self.id_prop is None:
self.id_prop = guess_id_prop(list_fields(self))
if img_dir is not None:
raw = True
if raw:
return mol_sheet(self, props=props, id_prop=self.id_prop, interact=self.ia,
highlight=highlight, mols_per_row=mols_per_row, size=size, img_dir=img_dir)
else:
return grid_pager(self, pagesize, props=props, id_prop=self.id_prop, interact=self.ia, highlight=highlight,
mols_per_row=mols_per_row, size=size)
def write_table(self, highlight=None, header=None, summary=None, img_dir=None,
title="Results", fn="mol_table.html"):
html.write(html.page(self.table(highlight=highlight, raw=True, img_dir=img_dir),
header=header, summary=summary, title=title), fn=fn)
return HTML('<a href="{}">{}</a>'.format(fn, fn))
def write_nested(self, header=None, summary=None, img_dir=None, title="Results", fn="nested_table.html"):
html.write(html.page(self.nested(raw=True, img_dir=img_dir),
header=header, summary=summary, title=title), fn=fn)
return HTML('<a href="{}">{}</a>'.format(fn, fn))
def write_grid(self, props=None, highlight=None, mols_per_row=5, size=IMG_GRID_SIZE,
header=None, summary=None, img_dir=None, title="Grid", fn="mol_grid.html"):
html.write(html.page(self.grid(props=props, highlight=highlight,
mols_per_row=mols_per_row, size=size, img_dir=img_dir, raw=True), header=header, summary=summary, title=title), fn=fn)
return HTML('<a href="{}">{}</a>'.format(fn, fn))
def scatter(self, x, y, r=7, tooltip=None, **kwargs):
"""Displays a Highcharts plot in the IPython Notebook.
Uses Bokeh (preferred) or the Highcharts javascript library, either locally under lib/ relative to the Notebook
or the web version at http://code.highcharts.com.
If ``tooltip`` is *None*, then structure tooltips will be shown for Mol_Lists with
less than or equal 150 records, if the Mol_List has more records, no structure tooltips
will be shown. The bevaviour can be forced by either providing ``tooltip="struct"`` for tooltips
or ``tooltip=""`` for no tooltips. Properties in the ``jitter`` list (only when used for x or y)
will be jittered by a magnitude of ``mag``.
callback (str): clicking on a point will link to the given HTML address. `@<IdProperty>` can be used as placeholder for the point id (e.g. Compound_Id). Default is None."""
if tooltip is None:
if len(self) > 150:
tooltip = ""
else:
tooltip = "struct"
if self.plot_tool == "bokeh":
return bkt.cpd_scatter(self.d, x, y, r=r, pid=self.id_prop, tooltip=tooltip, **kwargs)
else:
return hct.cpd_scatter(self.d, x, y, r=r, pid=self.id_prop, tooltip=tooltip, **kwargs)
def hist(self, field, bins=10, title="Distribution", xlabel=None, ylabel="Occurrence", normed=False, show=True, **kwargs):
"""Displays a Bokeh histogram. See bokeh_tools for documentation.
Possible useful additional kwargs include: plot_width, plot_height, y_axis_type="log"."""
if xlabel is None:
xlabel = field
hist = bkt.Hist(title=title, xlabel=xlabel, ylabel=ylabel, **kwargs)
hist.add_data(self.d[field], bins=bins, normed=normed)
if show:
return hist.show()
else:
return hist.plot
def bar(self, x, show=True, **kwargs):
"""Displays a bar chart for the occurrence of the given x-value.
This plot type is especially useful for plotting the occurrence of categorical data,
where only a small number (<= 10) of different values are present.
This function is directly calling the advanced bokeh bar chart type,
therefore no additional class is used.
Useful kwargs include: title, plot_height, plot_width."""
if show:
bkt.bar_chart(self.d, x, show=True, **kwargs)
else:
return bkt.bar_chart(self.d, x, show=False, **kwargs)
def summary(self, text_only=False):
"""Output a summary of the Mol_List and its properties.
If ``text_only``is True only a text version is printed.
``mean`` and ``median`` are calculated with numpy."""
field_types = self.field_types
ln = len(self)
max_max = 0
sum_d = {}
for prop in field_types:
value_list = [get_value(mol.GetProp(prop)) for mol in self.mols_with_prop(prop)]
num_val = len(value_list)
sum_d[prop] = {"num_values": num_val}
sum_d[prop]["type"] = field_types[prop]
if field_types[prop] == "number":
sum_d[prop]["min"] = min(value_list)
sum_d[prop]["max"] = max(value_list)
if sum_d[prop]["max"] > max_max:
max_max = sum_d[prop]["max"]
sum_d[prop]["mean"] = np.mean(value_list)
sum_d[prop]["median"] = np.median(value_list)
n_digits = str(np.floor(np.log10(max_max)) + 5.3) + "f" # digits for formatting
if text_only:
print("number of records:", ln)
for prop in sum_d:
print("\n{} ({}, {}):".format(prop, sum_d[prop]["type"], sum_d[prop]["num_values"]))
if field_types[prop] == "number":
for sum_item in ["min", "max", "mean", "median"]:
print("{:6s}: {:{digits}}".format(sum_item, sum_d[prop][sum_item], digits=n_digits), end=" | ")
print()
else:
rows = []
cells = []
opt1 = {"align": "center", "bgcolor": "#94CAEF"}
opt2 = {"align": "center", "bgcolor": "#94CAEF", "colspan": 7}
cell = html.td(html.b("Summary ({} records)".format(ln)), options=opt2)
rows.extend(html.tr(cell))
for cell in ["Property", "Type", "Num Values", "Min", "Max", "Mean", "Median"]:
cells.extend(html.td(html.b(cell), options=opt1))
rows.extend(html.tr(cells))
opt1 = {"align": "center"}
for prop in sum_d:
cells = []
cells.extend(html.td(prop, options=opt1))
cells.extend(html.td(sum_d[prop]["type"], options=opt1))
cells.extend(html.td(str(sum_d[prop]["num_values"]), options=opt1))
if field_types[prop] == "number":
for sum_item in ["min", "max", "mean", "median"]:
cells.extend(html.td("{:.3f}".format(sum_d[prop][sum_item]), options=opt1))
else:
for i in range(4): # insert empty cells
cells.extend(html.td("", options=opt1))
rows.extend(html.tr(cells))
table = html.table(rows)
return HTML("".join(table))
def correlate(self, min_corr=0.4, text_only=False):
"""Display correlations between the properties in the Mol_List.
Calculated by np.corrcoef, only abs. values are used, higher value means higer correlation.
Only correlations greater or to equal to ``min_corr`` are shown (default=0.4).
If ``text_only`` is True only a text version is printed."""
number_fields = [f for f in self.field_types if self.field_types[f] == "number"]
n = len(number_fields)
pair_format = str(max(len(i) for i in number_fields) * 2 + 7) + "s"
corr_d = {}
ln = len(self)
for left in range(n):
left_values = [get_prop_val(mol, number_fields[left]) for mol in self]
for right in range(left + 1, n):
right_values = [get_prop_val(mol, number_fields[right]) for mol in self]
both_y = []
both_x = []
for i in range(ln):
if left_values[i] is None or right_values[i] is None:
continue
both_y.append(left_values[i])
both_x.append(right_values[i])
corr = np.corrcoef(both_y, both_x)
corr_val = abs(corr[0][1])
if corr_val >= min_corr:
k = "{} vs. {}".format(number_fields[left], number_fields[right])
corr_d[k] = corr_val
if text_only:
print("Property Correlation Coefficients:")
for pair in sorted(corr_d, key=corr_d.get, reverse=True):
print("{pair:{pair_format}}: {corr:.3f}".format(pair=pair,
pair_format=pair_format, corr=corr_d[pair]))
else:
rows = []
cells = []
opt1 = {"align": "center", "bgcolor": "#94CAEF"}
opt2 = {"align": "center", "bgcolor": "#94CAEF", "colspan": 2}
opt3 = {"align": "center", "bgcolor": "#94CAEF", "colspan": 3}
cell = html.td(html.b("Property Correlation Coefficients"), options=opt3)
rows.extend(html.tr(cell))
cells.extend(html.td(html.b("A vs. B"), options=opt2))
cells.extend(html.td(html.b("Correlation"), options=opt1))
rows.extend(html.tr(cells))
opt1 = {"align": "center"}
for pair in sorted(corr_d, key=corr_d.get, reverse=True):
cells = []
cells.extend(html.td(pair.split(" vs. ")[0], options=opt1))
cells.extend(html.td(pair.split(" vs. ")[1], options=opt1))
cells.extend(html.td("{:.3f}".format(corr_d[pair]), options=opt1))
rows.extend(html.tr(cells))
table = html.table(rows)
return HTML("".join(table))
@property
def fields(self):
"""A List of properties that are present in the Mol_List (property)."""
if self.len != len(self):
self._set_recalc_needed()
if self.recalc_needed["fields"]:
self._fields = list_fields(self)
self.recalc_needed["fields"] = False
return self._fields
@property
def field_types(self):
"""A dictionary of properties and their derived types (property)."""
if self.len != len(self):
self._set_recalc_needed()
if self.recalc_needed["field_types"]:
self._field_types = get_field_types(self)
self.recalc_needed["field_types"] = False
return self._field_types
@property
def d(self):
"""Representation of the Mol_List as a dictionary for plotting (property)."""
if self.len != len(self):
self._set_recalc_needed()
if self.recalc_needed["d"] or self.plot_tool != self.recalc_needed["plot_tool"]:
self._calc_d()
self.recalc_needed["d"] = False
self.recalc_needed["plot_tool"] = self.plot_tool
return self._d
def _key_get_prop(mol, field, reverse=False):
if reverse:
not_found = -1000000.0
else:
not_found = 1000000.0
try:
val = float(mol.GetProp(field))
except ValueError: # GetProp value could not be converted to float
val = 0
except KeyError: # field is not present in the mol properties
val = not_found
return val
def create_dir_if_not_exist(dir_name):
if not op.exists(dir_name):
print(" * target folder does not exist, creating {}...".format(dir_name))
os.makedirs(dir_name)
def autocrop(im, bgcolor="white"):
if im.mode != "RGB":
im = im.convert("RGB")
bg = Image.new("RGB", im.size, bgcolor)
diff = ImageChops.difference(im, bg)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
return None # no contents
def copy_mol_props(mol1, mol2):
"""Copy properties fom `mol1` to `mol2`."""
for prop in mol1.GetPropNames():
prop_val = mol1.GetProp(prop)
mol2.SetProp(prop, prop_val)
def try_remove(lst, item):
"""When the item is present in the list, remove it; otherwise do nothing."""
if item in lst:
lst.remove(item)
def save_obj(obj, fn="object.pkl"):
"""Save a generic python object through pickling."""
with open(fn, "wb") as f:
pickle.dump(obj, f)
def load_obj(fn="object.pkl"):
"""Load and return a previously pickled object."""
with open(fn, "rb") as f:
obj = pickle.load(f)
return obj
def list_fields(sdf_list):
field_list = []
for mol in sdf_list:
field_list.extend(mol.GetPropNames())
return list(set(field_list))
def load_sdf(file_name_or_obj="testset.sdf", order="default"):
"""Create a Mol_List instance from an SD File.
Accepts a string filename or a file object as input.
order: "default" or None."""
if isinstance(file_name_or_obj, str):
if PY3:
file_obj = open(file_name_or_obj, "rb")
else:
file_obj = open(file_name_or_obj)
else:
file_obj = file_name_or_obj
reader = Chem.ForwardSDMolSupplier(file_obj)
sdf_list = Mol_List()
# try to load the column order
first_mol = True
for mol in reader:
if mol:
if first_mol:
first_mol = False
prop_order = None
try:
prop_order = mol.GetProp("order")
remove_props_from_mol(mol, "order")
except KeyError: # first mol does not contain an order field
pass
if prop_order is not None:
try:
sdf_list.order = prop_order.split(";")
except AttributeError: # sdf_list is not a Mol_List
pass
sdf_list.append(mol)
if sdf_list.id_prop is None:
sdf_list.id_prop = guess_id_prop(sdf_list.fields)
if sdf_list.order is None and order == "default": # only when no order is already present.
sdf_list.order_props(order=order)
if isinstance(file_name_or_obj, str):
print(" > sdf {} loaded with {} records.".format(file_name_or_obj.split(".")[0], len(sdf_list)))
else:
print(" > sdf loaded with {} records.".format(len(sdf_list)))
return sdf_list
def load_csv(fn, smiles_col="Smiles"):
"""Reads a csv file and returns a Mol_List instance. The molecules are generated from the Smiles column, which has to be present."""
with open(fn) as f:
ctr = 0
sdf_list = Mol_List()
reader = csv.DictReader(f, dialect="excel-tab")
for row_dict in reader:
if smiles_col not in row_dict: continue
smi = row_dict.pop("Smiles", "")
mol = Chem.MolFromSmiles(smi)
if not mol: continue
for prop in row_dict:
val = row_dict[prop]
if val != "":
mol.SetProp(prop, val)
sdf_list.append(mol)
ctr += 1
print("> {} loaded into Mol_List ({} records).".format(fn, ctr))
return sdf_list
def order_props(sdf_list, order="default"):
"""Order fields. First Compound_Id, Supplier, Producer;
then the activity fields, then the physicochemical properties and LCMS"""
if order == "default":
prop_order = []
fields = sorted(sdf_list.fields)
for def_ord in DEFAULT_ORDER:
def_ord_items = def_ord.split("|")
fields_found = []
for f in fields:
for item in def_ord_items:
if item in f.lower():
prop_order.append(f)
fields_found.append(f)
# Remove the fields that are now already on the prop_order list
# from the original field list.
# This way they will not be added multiple times
for f in fields_found:
fields.remove(f)
if len(fields) > 0: # add the remaining fields in alphabetical order
prop_order.extend(fields)
sdf_list.order = prop_order
def write_ids(id_list, fn="id_list.txt"):
"""Write a list of compound ids to a file. The list will be sorted by Id."""
id_str = "\n".join(sorted([str(i) for i in id_list]))
id_str = "Compound_Id\n" + id_str
f = open(fn, "w")
f.write(id_str)
f.close()
def csv_supplier(fn):
"""Returns a dictionary generator."""
if ".gz" in fn:
f = gzip.open(fn, mode="rt")
else:
f = open(fn)
reader = csv.DictReader(f, dialect="excel-tab")
for row_dict in reader:
yield row_dict
f.close()
def keep_props_in_mol(mol, prop_or_propslist):
if not isinstance(prop_or_propslist, list):
prop_or_propslist = [prop_or_propslist]
mol_props = mol.GetPropNames()
for prop in mol_props:
if prop not in prop_or_propslist:
mol.ClearProp(prop)
def remove_props_from_mol(mol, prop_or_propslist):
if not isinstance(prop_or_propslist, list):
prop_or_propslist = [prop_or_propslist]
for prop in prop_or_propslist:
if prop in mol.GetPropNames():
mol.ClearProp(prop)
def remove_props(mol_or_sdf_list, props):
if isinstance(mol_or_sdf_list, list):
for mol in mol_or_sdf_list:
if mol:
remove_props_from_mol(mol, props)
else:
remove_props_from_mol(mol_or_sdf_list, props)
def remove_empty_props(mol_list):
for mol in mol_list:
props = mol.GetPropNames()
for prop in props:
if mol.GetProp(prop) == "":
mol.ClearProp(prop)
def unit_factor(unit):
"""Return the factor corresponding to the unit, e.g. 1E-9 for nM.
Known units are: mM, uM, nM, pM. Raises ValueError for unknown unit."""
units = ["mm", "um", "nm", "pm"]
pos = units.index(unit.lower()) + 1
factor = 10 ** -(pos * 3)
return factor
def pic50(ic50, unit=None, digits=3):
"""Calculate pIC50 from IC50. Optionally, a unit for the input IC50 value may be given.
Known units are: mM, uM, nM, pM"""
if unit is not None:
ic50 *= unit_factor(unit)
return np.round(-math.log10(ic50), decimals=digits)
def ic50(pic50, unit=None, digits=3):
"""Calculate IC50 from pIC50. Optionally, a unit for the returned IC50 value may be given.
Known units are: mM, uM, nM, pM"""
ic50 = 10 ** (-pic50)
if unit is not None:
ic50 /= unit_factor(unit)
return np.round(ic50, digits)
def set_margin(container, margin=10):
"""Recursively set margins on all widgets of a toplevel container (...Box())."""
if hasattr(container, "children"):
for ch in container.children:
set_margin(ch, margin)
else:
container.margin = margin
def ia_remove_props(mol_list):
"""Interactively remove properties from a Mol_List.
Uses IPython widgets to display the properties to be selected for removal."""
all_props = list_fields(mol_list)
def on_btn_clicked(b):
remove_props(mol_list, props=list(w_sm.selected_labels))
w_sm = ipyw.SelectMultiple(description="Properties to remove:", options=all_props)
w_btn = ipyw.Button(description="Done !")
w_btn.on_click(on_btn_clicked)
w_hb = ipyw.HBox(children=[w_sm, w_btn])
display(w_hb)
def ia_keep_props(mol_list):
"""Interactively keep properties from a Mol_List.
Uses IPython widgets to display the properties to be selected for keeping."""
all_props = list_fields(mol_list)
def on_btn_clicked():
props_to_remove = list(set(all_props) - set(w_sm.selected_labels))
remove_props(mol_list, props=props_to_remove)
w_sm = ipyw.SelectMultiple(description="Properties to keep:", options=all_props)
w_btn = ipyw.Button(description="Done !")
w_btn.on_click(on_btn_clicked)
w_hb = ipyw.HBox(children=[w_sm, w_btn])
display(w_hb)
def ia_smiles_from_smiles():
"""Sounds silly, but generates RDKit Smiles out of Smiles that were generated by other tools.
May still be silly..."""
smiles = input("Smiles: ")
mol = Chem.MolFromSmiles(smiles)
print(Chem.MolToSmiles(mol))
def check_2d_coords(mol, force=False):
"""Check if a mol has 2D coordinates and if not, calculate them."""
if not force:
try:
mol.GetConformer()
except ValueError:
force = True # no 2D coords... calculate them
if force:
if USE_AVALON_2D:
pyAv.Generate2DCoords(mol)
else:
mol.Compute2DCoords()
def calc_props(mol, props, force2d=False, calculated_props=None, **kwargs):
"""calculated_props can be None or of type set()."""
sim_mol_or_smiles = kwargs.get("sim_mol_or_smiles", None)
isomeric = kwargs.get("isomeric", True)
query_fp = kwargs.get("query_fp", None)
if not isinstance(props, list):
props = [props]
for prop in props:
if "2d" in props:
check_2d_coords(mol, force2d)
if calculated_props is not None:
calculated_props.add("2d")
if "date" in props:
mol.SetProp("Date", time.strftime("%Y%m%d"))
if calculated_props is not None:
calculated_props.add("date")
if "formula" in props:
mol.SetProp("Formula", Chem.CalcMolFormula(mol))
if calculated_props is not None:
calculated_props.add("formula")
if "smiles" in props:
mol.SetProp("Smiles", Chem.MolToSmiles(mol, isomericSmiles=isomeric))
calculated_props.add("smiles")
if "hba" in props:
mol.SetProp("HBA", str(Desc.NOCount(mol)))
if calculated_props is not None:
calculated_props.add("hba")
if "hbd" in props:
mol.SetProp("HBD", str(Desc.NHOHCount(mol)))
if calculated_props is not None:
calculated_props.add("hbd")
if "nha" in props:
mol.SetProp("NHA", str(mol.GetNumAtoms()))
if calculated_props is not None:
calculated_props.add("nha")
if "logp" in props:
mol.SetProp("LogP", "{:.3f}".format(Desc.MolLogP(mol)))
if calculated_props is not None:
calculated_props.add("logp")
if "mw" in props:
mol.SetProp("MW", "{:.3f}".format(Desc.MolWt(mol)))
if calculated_props is not None:
calculated_props.add("mw")
if "rotb" in props:
mol.SetProp("RotB", str(Desc.NumRotatableBonds(mol)))
if calculated_props is not None:
calculated_props.add("rotb")
if SASCORER and "sa" in props:
score = sascorer.calculateScore(mol)
norm_score = 1 - (score / 10)
mol.SetProp("SA", "{:.3f}".format(norm_score))
if calculated_props is not None:
calculated_props.add("sa")
if "tpsa" in props:
mol.SetProp("TPSA", str(int(Desc.TPSA(mol))))
if calculated_props is not None:
calculated_props.add("tpsa")
if "murcko" in props:
msmiles = MurckoScaffold.MurckoScaffoldSmiles(mol=mol)
mol.SetProp("Murcko", msmiles)
calculated_props.add("murcko")
if "sim" in props:
if query_fp is None:
if sim_mol_or_smiles is not None:
if isinstance(sim_mol_or_smiles, str):
sim_mol_or_smiles = Chem.MolFromSmiles(sim_mol_or_smiles)
murcko_mol = MurckoScaffold.GetScaffoldForMol(sim_mol_or_smiles)
if USE_FP == "morgan":
query_fp = Desc.rdMolDescriptors.GetMorganFingerprintAsBitVect(murcko_mol, 2)
elif USE_FP == "avalon":
query_fp = pyAv.GetAvalonFP(murcko_mol, 1024)
else:
query_fp = FingerprintMols.FingerprintMol(murcko_mol)
if query_fp is not None:
if mol.HasProp("FP_b64"):
mol_fp = pickle.loads(base64.b64decode(mol.GetProp("FP_b64")))
else:
murcko_mol = MurckoScaffold.GetScaffoldForMol(mol)
if USE_FP == "morgan":
mol_fp = Desc.rdMolDescriptors.GetMorganFingerprintAsBitVect(murcko_mol, 2)
elif USE_FP == "avalon":
mol_fp = pyAv.GetAvalonFP(murcko_mol, 1024)
else:
mol_fp = FingerprintMols.FingerprintMol(murcko_mol)
sim = DataStructs.FingerprintSimilarity(query_fp, mol_fp)
mol.SetProp("Sim", "{:.3f}".format(sim * 100))
calculated_props.add("sim")
def calc_murcko_scaf(mol):
"Calculate the Murcko scaffold from a molecule and return as Smiles."
return MurckoScaffold.MurckoScaffoldSmiles(mol=mol)
def calc_scaffolds(parent_list):
"Returns a list of the BRICS fragments as Smiles, sorted by decreasing size."
scaf_set = set()
for parent in parent_list:
frags = Chem.FragmentOnBRICSBonds(parent)
frag_list = Chem.MolToSmiles(frags).split(".")
frag_list = [f.replace("*", "H") for f in frag_list]
for frag in frag_list:
mol = Chem.MolFromSmiles(frag)
if not mol: continue
if Desc.RingCount(mol) > 0:
murcko = calc_murcko_scaf(mol)
scaf_set.add(murcko)
scaf_list = sorted(scaf_set, key=len, reverse=True)
return scaf_list
def find_mcs(mol_list):
"""Returns the MCS ring molecule object for a set of molecules or None if not found."""
if len(mol_list) < 2: return None
scaf_list = calc_scaffolds(mol_list)
if len(scaf_list) == 0: return None
for scaf in scaf_list:
found = True
scaf_mol = Chem.MolFromSmiles(scaf)
for mol in mol_list:
if not mol.HasSubstructMatch(scaf_mol):
found = False
if found:
return scaf_mol
return None
def align(mol_list, mol_or_smiles=None):
"""Align the Mol_list to the common substructure provided as Mol or Smiles.
Parameters:
mol_list: A list of RDKit molecules.
mol_or_smiles (None, str, mol or list thereof): The substructure(s) to which to align.
If None, then the method uses rdFMCS to determine the MCSS
of the mol_list."""
if mol_or_smiles is None:
# determine the MCSS
mol_or_smiles = find_mcs(mol_list)
if mol_or_smiles is None:
return
if not isinstance(mol_or_smiles, list):
mol_or_smiles = [mol_or_smiles]
align_mols = []
for el in mol_or_smiles:
if isinstance(el, str):
mol = Chem.MolFromSmiles(el)
else:
mol = deepcopy(el)
check_2d_coords(mol)
align_mols.append(mol)
for mol in mol_list:
if mol:
check_2d_coords(mol)
for align_mol in align_mols:
# only align when the match is unique
if len(mol.GetSubstructMatches(align_mol)) == 1:
Chem.GenerateDepictionMatching2DStructure(mol, align_mol)
break
def guess_id_prop(prop_list): # try to guess an id_prop
for prop in prop_list:
if prop.lower().endswith("id"):
return prop
return None
def get_field_types(mol_list):
"""Detect all the property field types and return as dict"""
field_types = {}
if len(mol_list) > 100:
sdf_sample = random.sample(mol_list, len(mol_list) // 5)
else:
sdf_sample = mol_list
for mol in sdf_sample:
prop_names = mol.GetPropNames()
for prop in prop_names:
prop_type = "number"
prop_str = mol.GetProp(prop)
try:
float(prop_str)
if prop.lower().endswith("id"):
prop_type = "key"
except ValueError:
prop_type = "str"
if prop in field_types:
if field_types[prop] in ["number", "key"] and prop_type == "str":
# "str" overrides everything: if one string is among the values
# of a property, all values become type "str"
field_types[prop] = prop_type
else:
field_types[prop] = prop_type
if not field_types:
raise NoFieldTypes()
return field_types
def get_value(str_val):
if not str_val:
return None
try:
val = float(str_val)
if "." not in str_val:
val = int(val)
except ValueError:
val = str_val
return val
def isnumber(x):
"""Returns True, if x is a number (i.e. can be converted to float)."""
if x is None: return False
try:
float(x)
return True
except ValueError:
return False
def get_prop_val(mol, prop, default=None):
"""Returns the value of the molecule's property or the default value, if it is not defined."""
if mol.HasProp(prop):
return get_value(mol.GetProp(prop))
else:
return default
def b64_img(mol, size=300):
img_file = IO()
img = autocrop(Draw.MolToImage(mol, size=(size, size)))
img.save(img_file, format='PNG')
b64 = base64.b64encode(img_file.getvalue())
if PY3:
b64 = b64.decode()
img_file.close()
return b64
def mol_table(sdf_list, id_prop=None, interact=False, highlight=None, show_hidden=False, order=None, img_dir=None, size=300):
"""Parameters:
sdf_list (Mol_List): List of RDKit molecules
highlight (dict): Dict of properties (special: *all*) and values to highlight cells,
e.g. {"activity": "< 50"}
show_hidden (bool): Whether to show hidden properties (name starts with _) or not.
Defaults to *False*.
link (str): column used for linking out
target (str): column used as link target
order (list): A list of substrings to match with the field names for ordering in the table header
img_dir (str): if None, the molecule images are embedded in the HTML doc.
Otherwise the images will be stored in img_dir and linked in the doc.
Returns:
HTML table as TEXT to embed in IPython or a web page."""
time_stamp = time.strftime("%y%m%d%H%M%S")
td_opt = {"style": "text-align: center;"}
header_opt = {"bgcolor": "#94CAEF", "style": "text-align: center;"}
table_list = []
prop_list = list_fields(sdf_list)
if isinstance(order, list):
for k in reversed(order):
prop_list.sort(key=lambda x: k.lower() in x.lower(), reverse=True)
if id_prop is None:
guessed_id = guess_id_prop(prop_list)
else:
guessed_id = id_prop
if interact and guessed_id is not None:
table_list.append(TBL_JAVASCRIPT.format(ts=time_stamp, bgcolor="transparent"))
if id_prop is not None:
if id_prop not in prop_list:
raise LookupError("Id property {} not found in data set.".format(id_prop))
if guessed_id:
# make sure that the id_prop (or the guessed id prop) is first:
prop_list.pop(prop_list.index(guessed_id))
tmp_list = [guessed_id]
tmp_list.extend(prop_list)
prop_list = tmp_list
cells = html.td(html.b("#"), header_opt)
cells.extend(html.td(html.b("Molecule"), header_opt))
for prop in prop_list:
cells.extend(html.td(html.b(prop), header_opt))
rows = html.tr(cells)
for idx, mol in enumerate(sdf_list):
cells = []
mol_props = mol.GetPropNames()
if guessed_id:
id_prop_val = mol.GetProp(guessed_id)
img_id = id_prop_val
cell_opt = {"id": "{}_{}".format(id_prop_val, time_stamp)}
else:
img_id = idx
cell_opt = {"id": str(idx)}
cell = html.td(str(idx), cell_opt)
cells.extend(cell)
if not mol:
cells.extend(html.td("no structure"))
else:
if img_dir is None: # embed the images in the doc
b64 = b64_img(mol, size * 2)
img_src = "data:image/png;base64,{}".format(b64)
else: # write them out to img_dir
img_file = op.join(img_dir, "img_{}.png".format(img_id))
img = autocrop(Draw.MolToImage(mol, size=(size * 2, size * 2)))
img.save(img_file, format='PNG')
img_src = img_file
cell_opt = {}
if interact and guessed_id is not None:
img_opt = {"title": "Click to select / unselect",
"onclick": "toggleCpd('{}')".format(id_prop_val)}
else:
img_opt = {"title": str(img_id)}
# img_opt["width"] = size
# img_opt["height"] = size
img_opt["style"] = 'max-width: {}px; max-height: {}px; display: block; margin: auto;'.format(size, size)
cell = html.img(img_src, img_opt)
cells.extend(html.td(cell, cell_opt))
for prop in prop_list:
td_opt = {"style": "text-align: center;"}
if prop in mol_props:
if not show_hidden and prop.startswith("_"): continue
td_opt["title"] = prop
prop_val = mol.GetProp(prop)
if highlight:
eval_str = None
if "*all*" in highlight:
if not guessed_id or (guessed_id and prop != guessed_id):
eval_str = " ".join([prop_val, highlight["*all*"]])
else:
if prop in highlight:
eval_str = " ".join([prop_val, highlight[prop]])
if eval_str and eval(eval_str):
td_opt["bgcolor"] = "#99ff99"
cells.extend(html.td(prop_val, td_opt))
else:
cells.extend(html.td("", td_opt))
rows.extend(html.tr(cells))
table_list.extend(html.table(rows))
if interact and guessed_id is not None:
table_list.append(ID_LIST.format(ts=time_stamp))
# print(table_list)
return "".join(table_list)
def mol_sheet(sdf_list, props=None, id_prop=None, interact=False, highlight=None, mols_per_row=4, size=IMG_GRID_SIZE, img_dir=None):
"""Creates a HTML grid out of the Mol_List input.
Parameters:
sdf_list (Mol_List): list of RDKit molecules
highlight (dict): dict of properties (a.t.m only one) and values to highlight cells,
e.g. {"activity": "< 50"}
order (list): a list of substrings to match with the field names for ordering in the table header
img_dir (str): if None, the molecule images are embedded in the HTML doc.
Otherwise the images will be stored in img_dir and linked in the doc.
Returns:
HTML table as TEXT with molecules in grid-like layout to embed in IPython or a web page."""
time_stamp = time.strftime("%y%m%d%H%M%S")
# td_opt = {"align": "center"}
td_opt = {"style": "text-align: center;"}
header_opt = {"bgcolor": BGCOLOR}
table_list = []
prop_list = list_fields(sdf_list)
if props and not isinstance(props, list):
props = [props]
if id_prop is None:
guessed_id = guess_id_prop(prop_list)
else:
guessed_id = id_prop
if interact and guessed_id is not None:
table_list.append(TBL_JAVASCRIPT.format(ts=time_stamp, bgcolor=BGCOLOR))
if props is not None:
td_opt["colspan"] = "2"
prop_row_cells = {k: [] for k, _ in enumerate(props)}
rows = []
id_cells = []
mol_cells = []
for idx, mol in enumerate(sdf_list, 1):
if guessed_id:
id_prop_val = mol.GetProp(guessed_id)
img_id = id_prop_val
cell_opt = {"id": "{}_{}".format(id_prop_val, time_stamp)}
cell_opt.update(td_opt)
cell_opt.update(header_opt)
id_cells.extend(html.td(id_prop_val, cell_opt))
else:
img_id = idx
if not mol:
cell = ["no structure"]
else:
if img_dir is None: # embed the images in the doc
b64 = b64_img(mol, size * 2)
img_src = "data:image/png;base64,{}".format(b64)
else:
img_file = op.join(img_dir, "img_{}.png".format(img_id))
img = autocrop(Draw.MolToImage(mol, size=(size * 2, size * 2)))
img.save(img_file, format='PNG')
img_src = img_file
if interact and guessed_id is not None:
img_opt = {"title": "Click to select / unselect",
"onclick": "toggleCpd('{}')".format(id_prop_val)}
else:
img_opt = {"title": str(img_id)}
# img_opt["width"] = size
# img_opt["height"] = size
img_opt["style"] = 'max-width: {}px; max-height: {}px; display: block; margin: auto;'.format(size, size)
cell = html.img(img_src, img_opt)
# td_opt = {"align": "center"}
td_opt = {"style": "text-align: center;", "bgcolor": "#FFFFFF"}
if props is not None:
td_opt["colspan"] = "2"
if highlight:
eval_str = None
prop = highlight.keys()[0] # only one highlight key supported a.t.m.
prop_val = mol.GetProp(prop)
eval_str = " ".join([prop_val, highlight[prop]])
if eval_str and eval(eval_str):
td_opt["bgcolor"] = "#99ff99"
mol_cells.extend(html.td(cell, td_opt))
if props:
for prop_no, prop in enumerate(props):
prop_opt = {"style": "text-align: left;"}
val_opt = {"style": "text-align: left;"}
prop_cells = []
prop_val = ""
if mol.HasProp(prop):
prop_val = mol.GetProp(prop)
if prop == "Hit" and mol.HasProp("ActAss"):
val_opt["title"] = mol.GetProp("ActAss")
elif prop == "Pure_Flag" and prop_val != "" and prop_val != "n.d." and mol.HasProp("Purity") and mol.HasProp("LCMS_Date"):
val_opt["title"] = "{}% ({})".format(mol.GetProp("Purity"), mol.GetProp("LCMS_Date"))
prop_cells.extend(html.td(prop[:25], prop_opt))
prop_cells.extend(html.td(prop_val[:8], val_opt))
prop_row_cells[prop_no].extend(prop_cells)
if idx % mols_per_row == 0 or idx == len(sdf_list):
if guessed_id:
rows.extend(html.tr(id_cells))
rows.extend(html.tr(mol_cells))
if props is not None:
colspan_factor = 2
for prop_no in sorted(prop_row_cells):
rows.extend(html.tr(prop_row_cells[prop_no]))
prop_row_cells = {k: [] for k, _ in enumerate(props)}
else:
colspan_factor = 1
empty_row_options = {"colspan": mols_per_row * colspan_factor}
empty_row_options["style"] = "border: none;"
empty_row = html.tr(html.td(" ", options=empty_row_options))
rows.extend(empty_row)
id_cells = []
mol_cells = []
table_list.extend(html.table(rows))
if interact and guessed_id is not None:
table_list.append(ID_LIST.format(ts=time_stamp))
# print(table_list)
return "".join(table_list)
def nested_table(mol_list, id_prop=None, props=None, order=None, size=300, img_dir=None):
prop_list = list_fields(mol_list)
if props is not None:
if not isinstance(props, list):
props = [props]
order = props.copy()
if order is None:
order = ["Supplier", "Producer", "Hit", "ActAss"]
if id_prop is None:
guessed_id = guess_id_prop(prop_list)
else:
guessed_id = id_prop
if guessed_id is not None:
# make sure, guessed_id is at the beginning
old_order = order.copy()
if guessed_id in old_order:
pos = old_order.index(guessed_id)
old_order.pop(pos)
order = [guessed_id]
order.extend(old_order)
order_rev = order.copy()
order_rev.reverse()
for k in order_rev:
prop_list.sort(key=lambda x: k.lower() in x.lower(), reverse=True)
header_opt = {"bgcolor": "#94CAEF", "style": "text-align: center;"}
table = []
rows = []
cells = []
# first line
cells.extend(html.td(html.b("Molecule"), options=header_opt))
header_opt["colspan"] = 2
cells.extend(html.td(html.b("Properties"), options=header_opt))
rows.extend(html.tr(cells))
cells = []
bgcolor = "#F2F2F2"
for idx, mol in enumerate(mol_list, 1):
if not mol:
continue
# alternating background colors for easier distinction between records
if "F2" in bgcolor:
bgcolor = "#FFFFFF"
else:
bgcolor = "#F2F2F2"
# How many properties have to be displayed for the mol?
mol_props = mol.GetPropNames()
if props is None:
props_to_show = mol_props
row_span = len(mol_props)
else:
props_to_show = list(set(props).intersection(mol_props))
row_span = len(props_to_show)
td_opt = {"align": "center", "rowspan": row_span}
if guessed_id:
id_prop_val = mol.GetProp(guessed_id)
img_id = id_prop_val
else:
img_id = idx
if img_dir is None: # embed the images in the doc
b64 = b64_img(mol, size * 2)
img_src = "data:image/png;base64,{}".format(b64)
else:
img_file = op.join(img_dir, "img_{}.png".format(img_id))
img = autocrop(Draw.MolToImage(mol, size=(size * 2, size * 2)))
img.save(img_file, format='PNG')
img_src = img_file
img_opt = {"title": str(img_id)}
img_opt["style"] = 'max-width: {}px; max-height: {}px; display: block; margin: auto;'.format(size, size)
cells.extend(html.td(html.img(img_src, img_opt), td_opt))
# prop_opt = {}
td_opt = {"bgcolor": bgcolor}
for prop in prop_list:
if prop not in props_to_show: continue
cells.extend(html.td(prop, td_opt))
cells.extend(html.td(mol.GetProp(prop), td_opt))
rows.extend(html.tr(cells))
cells = []
table = html.table(rows)
return "".join(table)
def show_table(sdf_list, id_prop=None, interact=False, highlight=None, order=None):
return HTML(mol_table(sdf_list, id_prop, interact=interact, highlight=highlight, order=order))
def show_sheet(sdf_list, props=None, id_prop=None, interact=False, highlight=None, mols_per_row=4):
return HTML(mol_sheet(sdf_list, props, id_prop, interact=interact, highlight=highlight, mols_per_row=mols_per_row))
def table_pager(mol_list, id_prop=None, interact=False, pagesize=25, highlight=None, order=None, show_hidden=False):
ln = len(mol_list)
num_pages = ln // pagesize
if not WIDGETS or ln <= pagesize:
return HTML(mol_table(mol_list, id_prop=id_prop, highlight=highlight,
order=order, show_hidden=show_hidden))
ipyw.interact(
lambda page: HTML(mol_table(mol_list[page * pagesize:(page + 1) * pagesize],
id_prop=id_prop, interact=interact, order=order,
show_hidden=show_hidden)),
page=ipyw.IntSlider(min=0, max=num_pages, step=1, value=0)
)
def nested_pager(mol_list, pagesize=10, id_prop=None, props=None, order=None):
ln = len(mol_list)
num_pages = ln // pagesize
if not WIDGETS or ln <= pagesize:
return HTML(nested_table(mol_list, id_prop=id_prop, props=props, order=order))
ipyw.interact(
lambda page: HTML(nested_table(mol_list[page * pagesize:(page + 1) * pagesize],
id_prop=id_prop, props=props, order=order)),
page=ipyw.IntSlider(min=0, max=num_pages, step=1, value=0)
)
def grid_pager(mol_list, pagesize=20, id_prop=None, interact=False, highlight=None, props=None, mols_per_row=4, size=IMG_GRID_SIZE):
ln = len(mol_list)
num_pages = ln // pagesize
if not WIDGETS or ln <= pagesize:
return HTML(mol_sheet(mol_list, id_prop=id_prop, props=props, size=size))
ipyw.interact(
lambda page: HTML(mol_sheet(mol_list[page * pagesize:(page + 1) * pagesize],
id_prop=id_prop, interact=interact, highlight=highlight,
props=props, size=size)),
page=ipyw.IntSlider(min=0, max=num_pages, step=1, value=0)
)
def sample_diverse(mol_list, size, fp=None):
"""Returns a diverse selection of the mol_list with length `size`.
`fp` is the type of fingerprint to use (None (default), Morgan, Avalon)"""
def distij(i, j):
return 1 - DataStructs.DiceSimilarity(fp_list[i], fp_list[j])
if fp is None:
fp = USE_FP
else:
fp = fp.lower()
ctr = 0
fp_list = []
for mol in mol_list:
if mol.HasProp("FP_b64"):
fp_list.append(pickle.loads(base64.b64decode(mol.GetProp("FP_b64"))))
else:
ctr += 1
if fp == "morgan":
mol_fp = Desc.rdMolDescriptors.GetMorganFingerprintAsBitVect(mol, 2)
elif fp == "avalon":
mol_fp = pyAv.GetAvalonFP(mol, 1024)
else:
mol_fp = FingerprintMols.FingerprintMol(mol)
fp_list.append(mol_fp)
if ctr > 0:
print("{} {} fingerprints were calculated.".format(ctr, fp.capitalize()))
print("{} Fingerprints available.".format(len(fp_list)))
picker = MaxMinPicker()
pick_idx_list = picker.LazyPick(distij, len(fp_list), size, seed=0xF00D)
result_list = Mol_List()
for idx in pick_idx_list:
mol = deepcopy(mol_list[idx]) # make an independent copy
result_list.append(mol)
return result_list
def jsme(name="mol"):
"""displays a JSME molecule editor widget in the notebook
and stores the resulting mol in the variable that <name> assigns."""
time_stamp = time.strftime("%y%m%d%H%M%S")
return HTML(JSME_FORM.format(jsme_loc=JSME_LOCATION, ts=time_stamp, var_name=name))
def dict_from_sdf_list(sdf_list, id_prop=None, props=None, prop_list=None):
"""Generate a dictionary from the properties of a list of molecules.
Currently not including the structure.
If <props> contains a list of property names, then only these properties plus the <id_prop> are returned.
Returns dict"""
if not prop_list:
prop_list = list_fields(sdf_list)
if id_prop:
if id_prop not in prop_list:
raise LookupError("id_prop not found in data set.")
guessed_id = id_prop
else:
guessed_id = guess_id_prop(prop_list)
if not props:
props = prop_list
if guessed_id and guessed_id not in props:
props.append(guessed_id)
df_dict = {prop: [] for prop in props}
for mol in sdf_list:
mol_props = list(mol.GetPropNames())
for prop in props:
if prop in mol_props:
df_dict[prop].append(get_value(mol.GetProp(prop)))
else:
df_dict[prop].append(np.NaN)
return df_dict
# some convenience functions
def mol_3d(smiles_or_mol):
"""return a 3d optimized molecule from a Smiles or 2d mol input"""
if isinstance(smiles_or_mol, str): # input is Smiles
smiles_or_mol = Chem.MolFromSmiles(smiles_or_mol)
mh = Chem.AddHs(smiles_or_mol)
Chem.Compute2DCoords(mh)
Chem.EmbedMolecule(mh)
Chem.MMFFOptimizeMolecule(mh)
return mh
def mol_grid(sdf_list, props, fn=None, mols_per_row=5, sub_img_size=(200, 200)):
"""Draw a molecule grid from the input <sdf_list>. An inline graphics will be returned
in addition to writing the image to <fn> (if defined).
The given sdf <props> (as a list) will be concatenated to the molecules' legends."""
if not isinstance(props, list):
props = [props]
legends = []
for mol in sdf_list:
leg = [mol.GetProp(prop) for prop in props]
leg_str = "_".join(leg)
legends.append(leg_str)
img = Draw.MolsToGridImage(sdf_list, molsPerRow=mols_per_row, subImgSize=sub_img_size, legends=legends)
if fn:
img.save(fn)
return img
def o3da(input_list, ref, fn="aligned.sdf"):
"""Takes a list of molecules and align them to ref.
Writes the result as SD file to fn."""
ref_pymp = Chem.MMFFGetMoleculeProperties(ref)
mol_list = deepcopy(input_list)
writer = Chem.SDWriter(fn)
print("N\t\tscore\t\trmsd")
for ctr, mol in enumerate(mol_list, 1):
mol_pymp = Chem.MMFFGetMoleculeProperties(mol)
o3a = Chem.GetO3A(mol, ref, mol_pymp, ref_pymp)
print("{}\t\t{:.3f}\t\t{:.3f}".format(ctr, o3a.Score(), o3a.Align()))
writer.write(mol)
writer.close()
| [
"csv.DictWriter",
"rdkit.Chem.AllChem.CalcMolFormula",
"csv.DictReader",
"rdkit.Chem.AllChem.GetMolFrags",
"numpy.log10",
"IPython.core.display.display",
"gzip.open",
"PIL.Image.new",
"rdkit.Chem.AllChem.MolFromSmiles",
"rdkit.Chem.AllChem.Compute2DCoords",
"rdkit.Chem.Descriptors.MolLogP",
"r... | [((2527, 2564), 'os.path.isfile', 'op.isfile', (['"""lib/jsme/jsme.nocache.js"""'], {}), "('lib/jsme/jsme.nocache.js')\n", (2536, 2564), True, 'import os.path as op\n'), ((2236, 2260), 'misc_tools.apl_tools.get_commit', 'apt.get_commit', (['__file__'], {}), '(__file__)\n', (2250, 2260), True, 'from misc_tools import apl_tools as apt\n'), ((52863, 52897), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'im.size', 'bgcolor'], {}), "('RGB', im.size, bgcolor)\n", (52872, 52897), False, 'from PIL import Image, ImageChops\n'), ((52909, 52938), 'PIL.ImageChops.difference', 'ImageChops.difference', (['im', 'bg'], {}), '(im, bg)\n', (52930, 52938), False, 'from PIL import Image, ImageChops\n'), ((54294, 54329), 'rdkit.Chem.AllChem.ForwardSDMolSupplier', 'Chem.ForwardSDMolSupplier', (['file_obj'], {}), '(file_obj)\n', (54319, 54329), True, 'from rdkit.Chem import AllChem as Chem\n'), ((57771, 57809), 'csv.DictReader', 'csv.DictReader', (['f'], {'dialect': '"""excel-tab"""'}), "(f, dialect='excel-tab')\n", (57785, 57809), False, 'import csv\n'), ((59720, 59742), 'numpy.round', 'np.round', (['ic50', 'digits'], {}), '(ic50, digits)\n', (59728, 59742), True, 'import numpy as np\n'), ((60339, 60414), 'ipywidgets.SelectMultiple', 'ipyw.SelectMultiple', ([], {'description': '"""Properties to remove:"""', 'options': 'all_props'}), "(description='Properties to remove:', options=all_props)\n", (60358, 60414), True, 'import ipywidgets as ipyw\n'), ((60427, 60460), 'ipywidgets.Button', 'ipyw.Button', ([], {'description': '"""Done !"""'}), "(description='Done !')\n", (60438, 60460), True, 'import ipywidgets as ipyw\n'), ((60508, 60541), 'ipywidgets.HBox', 'ipyw.HBox', ([], {'children': '[w_sm, w_btn]'}), '(children=[w_sm, w_btn])\n', (60517, 60541), True, 'import ipywidgets as ipyw\n'), ((60547, 60560), 'IPython.core.display.display', 'display', (['w_hb'], {}), '(w_hb)\n', (60554, 60560), False, 'from IPython.core.display import HTML, display\n'), ((60935, 61008), 'ipywidgets.SelectMultiple', 'ipyw.SelectMultiple', ([], {'description': '"""Properties to keep:"""', 'options': 'all_props'}), "(description='Properties to keep:', options=all_props)\n", (60954, 61008), True, 'import ipywidgets as ipyw\n'), ((61021, 61054), 'ipywidgets.Button', 'ipyw.Button', ([], {'description': '"""Done !"""'}), "(description='Done !')\n", (61032, 61054), True, 'import ipywidgets as ipyw\n'), ((61102, 61135), 'ipywidgets.HBox', 'ipyw.HBox', ([], {'children': '[w_sm, w_btn]'}), '(children=[w_sm, w_btn])\n', (61111, 61135), True, 'import ipywidgets as ipyw\n'), ((61141, 61154), 'IPython.core.display.display', 'display', (['w_hb'], {}), '(w_hb)\n', (61148, 61154), False, 'from IPython.core.display import HTML, display\n'), ((61354, 61380), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (61372, 61380), True, 'from rdkit.Chem import AllChem as Chem\n'), ((66167, 66211), 'rdkit.Chem.Scaffolds.MurckoScaffold.MurckoScaffoldSmiles', 'MurckoScaffold.MurckoScaffoldSmiles', ([], {'mol': 'mol'}), '(mol=mol)\n', (66202, 66211), True, 'import rdkit.Chem.Scaffolds.MurckoScaffold as MurckoScaffold\n'), ((70520, 70524), 'cStringIO.StringIO', 'IO', ([], {}), '()\n', (70522, 70524), True, 'from cStringIO import StringIO as IO\n'), ((71666, 71695), 'time.strftime', 'time.strftime', (['"""%y%m%d%H%M%S"""'], {}), "('%y%m%d%H%M%S')\n", (71679, 71695), False, 'import time\n'), ((76373, 76402), 'time.strftime', 'time.strftime', (['"""%y%m%d%H%M%S"""'], {}), "('%y%m%d%H%M%S')\n", (76386, 76402), False, 'import time\n'), ((87235, 87249), 'rdkit.SimDivFilters.rdSimDivPickers.MaxMinPicker', 'MaxMinPicker', ([], {}), '()\n', (87247, 87249), False, 'from rdkit.SimDivFilters.rdSimDivPickers import MaxMinPicker\n'), ((87686, 87715), 'time.strftime', 'time.strftime', (['"""%y%m%d%H%M%S"""'], {}), "('%y%m%d%H%M%S')\n", (87699, 87715), False, 'import time\n'), ((89103, 89128), 'rdkit.Chem.AllChem.AddHs', 'Chem.AddHs', (['smiles_or_mol'], {}), '(smiles_or_mol)\n', (89113, 89128), True, 'from rdkit.Chem import AllChem as Chem\n'), ((89133, 89157), 'rdkit.Chem.AllChem.Compute2DCoords', 'Chem.Compute2DCoords', (['mh'], {}), '(mh)\n', (89153, 89157), True, 'from rdkit.Chem import AllChem as Chem\n'), ((89162, 89184), 'rdkit.Chem.AllChem.EmbedMolecule', 'Chem.EmbedMolecule', (['mh'], {}), '(mh)\n', (89180, 89184), True, 'from rdkit.Chem import AllChem as Chem\n'), ((89189, 89218), 'rdkit.Chem.AllChem.MMFFOptimizeMolecule', 'Chem.MMFFOptimizeMolecule', (['mh'], {}), '(mh)\n', (89214, 89218), True, 'from rdkit.Chem import AllChem as Chem\n'), ((89786, 89888), 'rdkit.Chem.Draw.MolsToGridImage', 'Draw.MolsToGridImage', (['sdf_list'], {'molsPerRow': 'mols_per_row', 'subImgSize': 'sub_img_size', 'legends': 'legends'}), '(sdf_list, molsPerRow=mols_per_row, subImgSize=\n sub_img_size, legends=legends)\n', (89806, 89888), False, 'from rdkit.Chem import Draw\n'), ((90092, 90127), 'rdkit.Chem.AllChem.MMFFGetMoleculeProperties', 'Chem.MMFFGetMoleculeProperties', (['ref'], {}), '(ref)\n', (90122, 90127), True, 'from rdkit.Chem import AllChem as Chem\n'), ((90143, 90163), 'copy.deepcopy', 'deepcopy', (['input_list'], {}), '(input_list)\n', (90151, 90163), False, 'from copy import deepcopy\n'), ((90177, 90194), 'rdkit.Chem.AllChem.SDWriter', 'Chem.SDWriter', (['fn'], {}), '(fn)\n', (90190, 90194), True, 'from rdkit.Chem import AllChem as Chem\n'), ((11547, 11564), 'rdkit.Chem.AllChem.SDWriter', 'Chem.SDWriter', (['fn'], {}), '(fn)\n', (11560, 11564), True, 'from rdkit.Chem import AllChem as Chem\n'), ((52627, 52646), 'os.path.exists', 'op.exists', (['dir_name'], {}), '(dir_name)\n', (52636, 52646), True, 'import os.path as op\n'), ((52739, 52760), 'os.makedirs', 'os.makedirs', (['dir_name'], {}), '(dir_name)\n', (52750, 52760), False, 'import os\n'), ((53520, 53539), 'pickle.dump', 'pickle.dump', (['obj', 'f'], {}), '(obj, f)\n', (53531, 53539), False, 'import pickle\n'), ((53672, 53686), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (53683, 53686), False, 'import pickle\n'), ((55796, 55834), 'csv.DictReader', 'csv.DictReader', (['f'], {'dialect': '"""excel-tab"""'}), "(f, dialect='excel-tab')\n", (55810, 55834), False, 'import csv\n'), ((57702, 57726), 'gzip.open', 'gzip.open', (['fn'], {'mode': '"""rt"""'}), "(fn, mode='rt')\n", (57711, 57726), False, 'import gzip\n'), ((61391, 61412), 'rdkit.Chem.AllChem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (61407, 61412), True, 'from rdkit.Chem import AllChem as Chem\n'), ((66397, 66430), 'rdkit.Chem.AllChem.FragmentOnBRICSBonds', 'Chem.FragmentOnBRICSBonds', (['parent'], {}), '(parent)\n', (66422, 66430), True, 'from rdkit.Chem import AllChem as Chem\n'), ((67160, 67184), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['scaf'], {}), '(scaf)\n', (67178, 67184), True, 'from rdkit.Chem import AllChem as Chem\n'), ((70544, 70583), 'rdkit.Chem.Draw.MolToImage', 'Draw.MolToImage', (['mol'], {'size': '(size, size)'}), '(mol, size=(size, size))\n', (70559, 70583), False, 'from rdkit.Chem import Draw\n'), ((87401, 87424), 'copy.deepcopy', 'deepcopy', (['mol_list[idx]'], {}), '(mol_list[idx])\n', (87409, 87424), False, 'from copy import deepcopy\n'), ((89059, 89092), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles_or_mol'], {}), '(smiles_or_mol)\n', (89077, 89092), True, 'from rdkit.Chem import AllChem as Chem\n'), ((90291, 90326), 'rdkit.Chem.AllChem.MMFFGetMoleculeProperties', 'Chem.MMFFGetMoleculeProperties', (['mol'], {}), '(mol)\n', (90321, 90326), True, 'from rdkit.Chem import AllChem as Chem\n'), ((90341, 90382), 'rdkit.Chem.AllChem.GetO3A', 'Chem.GetO3A', (['mol', 'ref', 'mol_pymp', 'ref_pymp'], {}), '(mol, ref, mol_pymp, ref_pymp)\n', (90352, 90382), True, 'from rdkit.Chem import AllChem as Chem\n'), ((13132, 13182), 'csv.DictWriter', 'csv.DictWriter', (['f', 'csv_fields'], {'dialect': '"""excel-tab"""'}), "(f, csv_fields, dialect='excel-tab')\n", (13146, 13182), False, 'import csv\n'), ((14107, 14132), 'random.sample', 'random.sample', (['self', 'size'], {}), '(self, size)\n', (14120, 14132), False, 'import random\n'), ((14672, 14685), 'copy.deepcopy', 'deepcopy', (['mol'], {}), '(mol)\n', (14680, 14685), False, 'from copy import deepcopy\n'), ((30083, 30117), 'rdkit.Chem.AllChem.GetMolFrags', 'Chem.GetMolFrags', (['mol'], {'asMols': '(True)'}), '(mol, asMols=True)\n', (30099, 30117), True, 'from rdkit.Chem import AllChem as Chem\n'), ((33640, 33682), 'rdkit.Chem.AllChem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {'isomericSmiles': '(True)'}), '(mol, isomericSmiles=True)\n', (33656, 33682), True, 'from rdkit.Chem import AllChem as Chem\n'), ((35117, 35171), 'rdkit.Chem.AllChem.FindMolChiralCenters', 'Chem.FindMolChiralCenters', (['mol'], {'includeUnassigned': '(True)'}), '(mol, includeUnassigned=True)\n', (35142, 35171), True, 'from rdkit.Chem import AllChem as Chem\n'), ((55982, 56005), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (56000, 56005), True, 'from rdkit.Chem import AllChem as Chem\n'), ((59415, 59431), 'math.log10', 'math.log10', (['ic50'], {}), '(ic50)\n', (59425, 59431), False, 'import math\n'), ((61728, 61754), 'rdkit.Avalon.pyAvalonTools.Generate2DCoords', 'pyAv.Generate2DCoords', (['mol'], {}), '(mol)\n', (61749, 61754), True, 'from rdkit.Avalon import pyAvalonTools as pyAv\n'), ((63977, 64005), 'Contrib.SA_Score.sascorer.calculateScore', 'sascorer.calculateScore', (['mol'], {}), '(mol)\n', (64000, 64005), False, 'from Contrib.SA_Score import sascorer\n'), ((64425, 64469), 'rdkit.Chem.Scaffolds.MurckoScaffold.MurckoScaffoldSmiles', 'MurckoScaffold.MurckoScaffoldSmiles', ([], {'mol': 'mol'}), '(mol=mol)\n', (64460, 64469), True, 'import rdkit.Chem.Scaffolds.MurckoScaffold as MurckoScaffold\n'), ((66596, 66620), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['frag'], {}), '(frag)\n', (66614, 66620), True, 'from rdkit.Chem import AllChem as Chem\n'), ((68083, 68105), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['el'], {}), '(el)\n', (68101, 68105), True, 'from rdkit.Chem import AllChem as Chem\n'), ((68138, 68150), 'copy.deepcopy', 'deepcopy', (['el'], {}), '(el)\n', (68146, 68150), False, 'from copy import deepcopy\n'), ((84982, 85035), 'ipywidgets.IntSlider', 'ipyw.IntSlider', ([], {'min': '(0)', 'max': 'num_pages', 'step': '(1)', 'value': '(0)'}), '(min=0, max=num_pages, step=1, value=0)\n', (84996, 85035), True, 'import ipywidgets as ipyw\n'), ((85494, 85547), 'ipywidgets.IntSlider', 'ipyw.IntSlider', ([], {'min': '(0)', 'max': 'num_pages', 'step': '(1)', 'value': '(0)'}), '(min=0, max=num_pages, step=1, value=0)\n', (85508, 85547), True, 'import ipywidgets as ipyw\n'), ((86116, 86169), 'ipywidgets.IntSlider', 'ipyw.IntSlider', ([], {'min': '(0)', 'max': 'num_pages', 'step': '(1)', 'value': '(0)'}), '(min=0, max=num_pages, step=1, value=0)\n', (86130, 86169), True, 'import ipywidgets as ipyw\n'), ((86415, 86465), 'rdkit.DataStructs.DiceSimilarity', 'DataStructs.DiceSimilarity', (['fp_list[i]', 'fp_list[j]'], {}), '(fp_list[i], fp_list[j])\n', (86441, 86465), False, 'from rdkit import DataStructs\n'), ((14701, 14716), 'random.random', 'random.random', ([], {}), '()\n', (14714, 14716), False, 'import random\n'), ((18193, 18218), 'rdkit.Chem.AllChem.MolFromSmarts', 'Chem.MolFromSmarts', (['query'], {}), '(query)\n', (18211, 18218), True, 'from rdkit.Chem import AllChem as Chem\n'), ((18388, 18413), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['query'], {}), '(query)\n', (18406, 18413), True, 'from rdkit.Chem import AllChem as Chem\n'), ((19134, 19149), 'rdkit.Chem.AllChem.AddHs', 'Chem.AddHs', (['mol'], {}), '(mol)\n', (19144, 19149), True, 'from rdkit.Chem import AllChem as Chem\n'), ((23481, 23502), 'copy.deepcopy', 'deepcopy', (['self[id_no]'], {}), '(self[id_no])\n', (23489, 23502), False, 'from copy import deepcopy\n'), ((27683, 27720), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['sim_mol_or_smiles'], {}), '(sim_mol_or_smiles)\n', (27701, 27720), True, 'from rdkit.Chem import AllChem as Chem\n'), ((27980, 28031), 'rdkit.Chem.Scaffolds.MurckoScaffold.GetScaffoldForMol', 'MurckoScaffold.GetScaffoldForMol', (['sim_mol_or_smiles'], {}), '(sim_mol_or_smiles)\n', (28012, 28031), True, 'import rdkit.Chem.Scaffolds.MurckoScaffold as MurckoScaffold\n'), ((30380, 30393), 'copy.deepcopy', 'deepcopy', (['mol'], {}), '(mol)\n', (30388, 30393), False, 'from copy import deepcopy\n'), ((32000, 32013), 'copy.deepcopy', 'deepcopy', (['mol'], {}), '(mol)\n', (32008, 32013), False, 'from copy import deepcopy\n'), ((33042, 33055), 'copy.deepcopy', 'deepcopy', (['mol'], {}), '(mol)\n', (33050, 33055), False, 'from copy import deepcopy\n'), ((33864, 33877), 'copy.deepcopy', 'deepcopy', (['mol'], {}), '(mol)\n', (33872, 33877), False, 'from copy import deepcopy\n'), ((46271, 46290), 'numpy.mean', 'np.mean', (['value_list'], {}), '(value_list)\n', (46278, 46290), True, 'import numpy as np\n'), ((46331, 46352), 'numpy.median', 'np.median', (['value_list'], {}), '(value_list)\n', (46340, 46352), True, 'import numpy as np\n'), ((49427, 49454), 'numpy.corrcoef', 'np.corrcoef', (['both_y', 'both_x'], {}), '(both_y, both_x)\n', (49438, 49454), True, 'import numpy as np\n'), ((62391, 62414), 'time.strftime', 'time.strftime', (['"""%Y%m%d"""'], {}), "('%Y%m%d')\n", (62404, 62414), False, 'import time\n'), ((62573, 62597), 'rdkit.Chem.AllChem.CalcMolFormula', 'Chem.CalcMolFormula', (['mol'], {}), '(mol)\n', (62592, 62597), True, 'from rdkit.Chem import AllChem as Chem\n'), ((62757, 62803), 'rdkit.Chem.AllChem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {'isomericSmiles': 'isomeric'}), '(mol, isomericSmiles=isomeric)\n', (62773, 62803), True, 'from rdkit.Chem import AllChem as Chem\n'), ((65894, 65945), 'rdkit.DataStructs.FingerprintSimilarity', 'DataStructs.FingerprintSimilarity', (['query_fp', 'mol_fp'], {}), '(query_fp, mol_fp)\n', (65927, 65945), False, 'from rdkit import DataStructs\n'), ((66451, 66474), 'rdkit.Chem.AllChem.MolToSmiles', 'Chem.MolToSmiles', (['frags'], {}), '(frags)\n', (66467, 66474), True, 'from rdkit.Chem import AllChem as Chem\n'), ((66669, 66688), 'rdkit.Chem.Descriptors.RingCount', 'Desc.RingCount', (['mol'], {}), '(mol)\n', (66683, 66688), True, 'import rdkit.Chem.Descriptors as Desc\n'), ((68451, 68508), 'rdkit.Chem.AllChem.GenerateDepictionMatching2DStructure', 'Chem.GenerateDepictionMatching2DStructure', (['mol', 'align_mol'], {}), '(mol, align_mol)\n', (68492, 68508), True, 'from rdkit.Chem import AllChem as Chem\n'), ((83249, 83296), 'rdkit.Chem.Draw.MolToImage', 'Draw.MolToImage', (['mol'], {'size': '(size * 2, size * 2)'}), '(mol, size=(size * 2, size * 2))\n', (83264, 83296), False, 'from rdkit.Chem import Draw\n'), ((86802, 86861), 'rdkit.Chem.Descriptors.rdMolDescriptors.GetMorganFingerprintAsBitVect', 'Desc.rdMolDescriptors.GetMorganFingerprintAsBitVect', (['mol', '(2)'], {}), '(mol, 2)\n', (86853, 86861), True, 'import rdkit.Chem.Descriptors as Desc\n'), ((2497, 2518), 'os.path.getmtime', 'op.getmtime', (['__file__'], {}), '(__file__)\n', (2508, 2518), True, 'import os.path as op\n'), ((13331, 13377), 'rdkit.Chem.AllChem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {'isomericSmiles': 'isomeric'}), '(mol, isomericSmiles=isomeric)\n', (13347, 13377), True, 'from rdkit.Chem import AllChem as Chem\n'), ((19544, 19557), 'copy.deepcopy', 'deepcopy', (['mol'], {}), '(mol)\n', (19552, 19557), False, 'from copy import deepcopy\n'), ((20661, 20674), 'copy.deepcopy', 'deepcopy', (['mol'], {}), '(mol)\n', (20669, 20674), False, 'from copy import deepcopy\n'), ((24453, 24482), 'rdkit.Chem.AllChem.MolToSmiles', 'Chem.MolToSmiles', (['new_list[0]'], {}), '(new_list[0])\n', (24469, 24482), True, 'from rdkit.Chem import AllChem as Chem\n'), ((28102, 28168), 'rdkit.Chem.Descriptors.rdMolDescriptors.GetMorganFingerprintAsBitVect', 'Desc.rdMolDescriptors.GetMorganFingerprintAsBitVect', (['murcko_mol', '(2)'], {}), '(murcko_mol, 2)\n', (28153, 28168), True, 'import rdkit.Chem.Descriptors as Desc\n'), ((35596, 35635), 'itertools.product', 'product', (['"""RS"""'], {'repeat': 'num_stereocenters'}), "('RS', repeat=num_stereocenters)\n", (35603, 35635), False, 'from itertools import product\n'), ((62911, 62928), 'rdkit.Chem.Descriptors.NOCount', 'Desc.NOCount', (['mol'], {}), '(mol)\n', (62923, 62928), True, 'import rdkit.Chem.Descriptors as Desc\n'), ((63083, 63102), 'rdkit.Chem.Descriptors.NHOHCount', 'Desc.NHOHCount', (['mol'], {}), '(mol)\n', (63097, 63102), True, 'import rdkit.Chem.Descriptors as Desc\n'), ((63443, 63460), 'rdkit.Chem.Descriptors.MolLogP', 'Desc.MolLogP', (['mol'], {}), '(mol)\n', (63455, 63460), True, 'import rdkit.Chem.Descriptors as Desc\n'), ((63626, 63641), 'rdkit.Chem.Descriptors.MolWt', 'Desc.MolWt', (['mol'], {}), '(mol)\n', (63636, 63641), True, 'import rdkit.Chem.Descriptors as Desc\n'), ((63797, 63824), 'rdkit.Chem.Descriptors.NumRotatableBonds', 'Desc.NumRotatableBonds', (['mol'], {}), '(mol)\n', (63819, 63824), True, 'import rdkit.Chem.Descriptors as Desc\n'), ((64841, 64892), 'rdkit.Chem.Scaffolds.MurckoScaffold.GetScaffoldForMol', 'MurckoScaffold.GetScaffoldForMol', (['sim_mol_or_smiles'], {}), '(sim_mol_or_smiles)\n', (64873, 64892), True, 'import rdkit.Chem.Scaffolds.MurckoScaffold as MurckoScaffold\n'), ((65475, 65512), 'rdkit.Chem.Scaffolds.MurckoScaffold.GetScaffoldForMol', 'MurckoScaffold.GetScaffoldForMol', (['mol'], {}), '(mol)\n', (65507, 65512), True, 'import rdkit.Chem.Scaffolds.MurckoScaffold as MurckoScaffold\n'), ((73705, 73752), 'rdkit.Chem.Draw.MolToImage', 'Draw.MolToImage', (['mol'], {'size': '(size * 2, size * 2)'}), '(mol, size=(size * 2, size * 2))\n', (73720, 73752), False, 'from rdkit.Chem import Draw\n'), ((77824, 77871), 'rdkit.Chem.Draw.MolToImage', 'Draw.MolToImage', (['mol'], {'size': '(size * 2, size * 2)'}), '(mol, size=(size * 2, size * 2))\n', (77839, 77871), False, 'from rdkit.Chem import Draw\n'), ((86920, 86947), 'rdkit.Avalon.pyAvalonTools.GetAvalonFP', 'pyAv.GetAvalonFP', (['mol', '(1024)'], {}), '(mol, 1024)\n', (86936, 86947), True, 'from rdkit.Avalon import pyAvalonTools as pyAv\n'), ((86991, 87026), 'rdkit.Chem.Fingerprints.FingerprintMols.FingerprintMol', 'FingerprintMols.FingerprintMol', (['mol'], {}), '(mol)\n', (87021, 87026), False, 'from rdkit.Chem.Fingerprints import FingerprintMols\n'), ((17053, 17066), 'copy.deepcopy', 'deepcopy', (['mol'], {}), '(mol)\n', (17061, 17066), False, 'from copy import deepcopy\n'), ((28241, 28275), 'rdkit.Avalon.pyAvalonTools.GetAvalonFP', 'pyAv.GetAvalonFP', (['murcko_mol', '(1024)'], {}), '(murcko_mol, 1024)\n', (28257, 28275), True, 'from rdkit.Avalon import pyAvalonTools as pyAv\n'), ((28329, 28371), 'rdkit.Chem.Fingerprints.FingerprintMols.FingerprintMol', 'FingerprintMols.FingerprintMol', (['murcko_mol'], {}), '(murcko_mol)\n', (28359, 28371), False, 'from rdkit.Chem.Fingerprints import FingerprintMols\n'), ((35719, 35732), 'rdkit.Chem.AllChem.Mol', 'Chem.Mol', (['mol'], {}), '(mol)\n', (35727, 35732), True, 'from rdkit.Chem import AllChem as Chem\n'), ((46386, 46403), 'numpy.log10', 'np.log10', (['max_max'], {}), '(max_max)\n', (46394, 46403), True, 'import numpy as np\n'), ((64264, 64278), 'rdkit.Chem.Descriptors.TPSA', 'Desc.TPSA', (['mol'], {}), '(mol)\n', (64273, 64278), True, 'import rdkit.Chem.Descriptors as Desc\n'), ((64770, 64807), 'rdkit.Chem.AllChem.MolFromSmiles', 'Chem.MolFromSmiles', (['sim_mol_or_smiles'], {}), '(sim_mol_or_smiles)\n', (64788, 64807), True, 'from rdkit.Chem import AllChem as Chem\n'), ((64971, 65037), 'rdkit.Chem.Descriptors.rdMolDescriptors.GetMorganFingerprintAsBitVect', 'Desc.rdMolDescriptors.GetMorganFingerprintAsBitVect', (['murcko_mol', '(2)'], {}), '(murcko_mol, 2)\n', (65022, 65037), True, 'import rdkit.Chem.Descriptors as Desc\n'), ((65589, 65655), 'rdkit.Chem.Descriptors.rdMolDescriptors.GetMorganFingerprintAsBitVect', 'Desc.rdMolDescriptors.GetMorganFingerprintAsBitVect', (['murcko_mol', '(2)'], {}), '(murcko_mol, 2)\n', (65640, 65655), True, 'import rdkit.Chem.Descriptors as Desc\n'), ((22835, 22848), 'copy.deepcopy', 'deepcopy', (['mol'], {}), '(mol)\n', (22843, 22848), False, 'from copy import deepcopy\n'), ((65118, 65152), 'rdkit.Avalon.pyAvalonTools.GetAvalonFP', 'pyAv.GetAvalonFP', (['murcko_mol', '(1024)'], {}), '(murcko_mol, 1024)\n', (65134, 65152), True, 'from rdkit.Avalon import pyAvalonTools as pyAv\n'), ((65214, 65256), 'rdkit.Chem.Fingerprints.FingerprintMols.FingerprintMol', 'FingerprintMols.FingerprintMol', (['murcko_mol'], {}), '(murcko_mol)\n', (65244, 65256), False, 'from rdkit.Chem.Fingerprints import FingerprintMols\n'), ((65734, 65768), 'rdkit.Avalon.pyAvalonTools.GetAvalonFP', 'pyAv.GetAvalonFP', (['murcko_mol', '(1024)'], {}), '(murcko_mol, 1024)\n', (65750, 65768), True, 'from rdkit.Avalon import pyAvalonTools as pyAv\n'), ((65828, 65870), 'rdkit.Chem.Fingerprints.FingerprintMols.FingerprintMol', 'FingerprintMols.FingerprintMol', (['murcko_mol'], {}), '(murcko_mol)\n', (65858, 65870), False, 'from rdkit.Chem.Fingerprints import FingerprintMols\n'), ((24160, 24173), 'copy.deepcopy', 'deepcopy', (['mol'], {}), '(mol)\n', (24168, 24173), False, 'from copy import deepcopy\n')] |
from torchtext import data
from torch.utils.data import DataLoader
from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset
from modules import make_translation_model
from optim import get_wrapper
from loss import LabelSmoothing
import numpy as np
import torch as th
import torch.optim as optim
import argparse
import yaml
import os
def run(proc_id, n_gpus, devices, config, checkpoint):
th.manual_seed(config['seed'])
np.random.seed(config['seed'])
th.cuda.manual_seed_all(config['seed'])
dev_id = devices[proc_id]
if n_gpus > 1:
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip='127.0.0.1', master_port='12345')
world_size = n_gpus
th.distributed.init_process_group(backend="nccl",
init_method=dist_init_method,
world_size=world_size,
rank=dev_id)
_dataset = config['dataset']
grad_accum = config['grad_accum']
if _dataset == 'iwslt':
TEXT = [data.Field(batch_first=True) for _ in range(2)]
dataset = get_mt_dataset('iwslt')
train, dev, test = dataset.splits(exts=('.tc.zh', '.tc.en'), fields=TEXT, root='./data')
train = DocumentMTDataset(train, context_length=config['context_len'], part=(proc_id, n_gpus))
dev = DocumentMTDataset(dev, context_length=config['context_len'])
test = DocumentMTDataset(test, context_length=config['context_len'])
vocab_zh, vocab_en = dataset.load_vocab(root='./data')
print('vocab size: ', len(vocab_zh), len(vocab_en))
vocab_sizes = [len(vocab_zh), len(vocab_en)]
TEXT[0].vocab = vocab_zh
TEXT[1].vocab = vocab_en
batcher = MTBatcher(TEXT, graph_type=config['graph_type'], **config.get('graph_attrs', {}))
train_loader = DataLoader(dataset=train,
batch_size=config['batch_size'] // n_gpus,
collate_fn=batcher,
shuffle=True,
num_workers=6)
dev_loader = DataLoader(dataset=dev,
batch_size=config['dev_batch_size'],
collate_fn=batcher,
shuffle=False)
test_loader = DataLoader(dataset=test,
batch_size=config['dev_batch_size'],
collate_fn=batcher,
shuffle=False)
elif _dataset == 'wmt':
TEXT = data.Field(batch_first=True)
dataset = get_mt_dataset('wmt14')
train, dev, test = dataset.splits(exts=['.en', '.de'], fields=[TEXT, TEXT], root='./data')
train = MTDataset(train, part=(proc_id, n_gpus))
dev = MTDataset(dev)
test = MTDataset(test)
vocab = dataset.load_vocab(root='./data')[0]
print('vocab size: ', len(vocab))
vocab_sizes = [len(vocab)]
TEXT.vocab = vocab
batcher = MTBatcher(TEXT, graph_type=config['graph_type'], **config.get('graph_attrs', {}))
train_loader = DataLoader(dataset=train,
batch_size=config['batch_size'] // n_gpus,
collate_fn=batcher,
shuffle=True,
num_workers=6)
dev_loader = DataLoader(dataset=dev,
batch_size=config['dev_batch_size'],
collate_fn=batcher,
shuffle=False)
test_loader = DataLoader(dataset=test,
batch_size=config['dev_batch_size'],
collate_fn=batcher,
shuffle=False)
elif _dataset == 'multi':
TEXT = [data.Field(batch_first=True) for _ in range(2)]
dataset = get_mt_dataset('multi30k')
train, dev, test = dataset.splits(exts=['.en.atok', '.de.atok'], fields=TEXT, root='./data')
train = MTDataset(train, part=(proc_id, n_gpus))
dev = MTDataset(dev)
test = MTDataset(test)
vocab_en, vocab_de = dataset.load_vocab(root='./data')
print('vocab size: ', len(vocab_en), len(vocab_de))
vocab_sizes = [len(vocab_en), len(vocab_de)]
TEXT[0].vocab = vocab_en
TEXT[1].vocab = vocab_de
batcher = MTBatcher(TEXT, graph_type=config['graph_type'], **config.get('graph_attrs', {}))
train_loader = DataLoader(dataset=train,
batch_size=config['batch_size'] // n_gpus,
collate_fn=batcher,
shuffle=True,
num_workers=6)
dev_loader = DataLoader(dataset=dev,
batch_size=config['dev_batch_size'],
collate_fn=batcher,
shuffle=False)
test_loader = DataLoader(dataset=test,
batch_size=config['dev_batch_size'],
collate_fn=batcher,
shuffle=False)
dim_model = config['dim_model']
dim_ff = config['dim_ff']
num_heads = config['num_heads']
n_layers = config['n_layers']
m_layers = config['m_layers']
dropouti = config['dropouti']
dropouth = config['dropouth']
dropouta = config['dropouta']
dropoutc = config['dropoutc']
rel_pos = config['rel_pos']
model = make_translation_model(vocab_sizes, dim_model, dim_ff, num_heads,
n_layers, m_layers,
dropouti=dropouti, dropouth=dropouth,
dropouta=dropouta, dropoutc=dropoutc,
rel_pos=rel_pos)
if checkpoint != -1:
with open('checkpoints/{}-{}.pkl'.format(checkpoint, config['save_name']), 'rb') as f:
state_dict = th.load(f, map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict)
# tie weight
if config.get('share_weight', False):
model.embed[-1].lut.weight = model.generator.proj.weight
criterion = LabelSmoothing(vocab_sizes[-1], smoothing=0.1)
device = th.device(dev_id)
th.cuda.set_device(device)
model, criterion = model.to(device), criterion.to(device)
n_epochs = config['n_epochs']
optimizer = get_wrapper('noam')(
dim_model, config['factor'], config.get('warmup', 4000),
optim.Adam(model.parameters(), lr=config['lr'], betas=(0.9, 0.98), eps=1e-9,
weight_decay=config.get('weight_decay', 0)))
for _ in range(checkpoint + 1):
for _ in range(len(train_loader)):
optimizer.step()
log_interval = config['log_interval']
for epoch in range(checkpoint + 1, n_epochs):
if proc_id == 0:
print("epoch {}".format(epoch))
print("training...")
model.train()
tot = 0
hit = 0
loss_accum = 0
for i, batch in enumerate(train_loader):
batch.y = batch.y.to(device)
batch.g_enc.edata['etype'] = batch.g_enc.edata['etype'].to(device)
batch.g_enc.ndata['x'] = batch.g_enc.ndata['x'].to(device)
batch.g_enc.ndata['pos'] = batch.g_enc.ndata['pos'].to(device)
batch.g_dec.edata['etype'] = batch.g_dec.edata['etype'].to(device)
batch.g_dec.ndata['x'] = batch.g_dec.ndata['x'].to(device)
batch.g_dec.ndata['pos'] = batch.g_dec.ndata['pos'].to(device)
out = model(batch)
loss = criterion(out, batch.y) / len(batch.y)
loss_accum += loss.item() * len(batch.y)
tot += len(batch.y)
hit += (out.max(dim=-1)[1] == batch.y).sum().item()
if proc_id == 0:
if (i + 1) % log_interval == 0:
print('step {}, loss : {}, acc : {}'.format(i, loss_accum / tot, hit / tot))
tot = 0
hit = 0
loss_accum = 0
loss.backward()
if (i + 1) % grad_accum == 0:
for param in model.parameters():
if param.requires_grad and param.grad is not None:
if n_gpus > 1:
th.distributed.all_reduce(param.grad.data,
op=th.distributed.ReduceOp.SUM)
param.grad.data /= (n_gpus * grad_accum)
optimizer.step()
optimizer.zero_grad()
model.eval()
tot = 0
hit = 0
loss_accum = 0
for batch in dev_loader:
with th.no_grad():
batch.y = batch.y.to(device)
batch.g_enc.edata['etype'] = batch.g_enc.edata['etype'].to(device)
batch.g_enc.ndata['x'] = batch.g_enc.ndata['x'].to(device)
batch.g_enc.ndata['pos'] = batch.g_enc.ndata['pos'].to(device)
batch.g_dec.edata['etype'] = batch.g_dec.edata['etype'].to(device)
batch.g_dec.ndata['x'] = batch.g_dec.ndata['x'].to(device)
batch.g_dec.ndata['pos'] = batch.g_dec.ndata['pos'].to(device)
out = model(batch)
loss_accum += criterion(out, batch.y)
tot += len(batch.y)
hit += (out.max(dim=-1)[1] == batch.y).sum().item()
if n_gpus > 1:
th.distributed.barrier()
if proc_id == 0:
print('evaluate...')
print('loss : {}, acc : {}'.format(loss_accum / tot, hit / tot))
tot = 0
hit = 0
loss_accum = 0
for batch in test_loader:
with th.no_grad():
batch.y = batch.y.to(device)
batch.g_enc.edata['etype'] = batch.g_enc.edata['etype'].to(device)
batch.g_enc.ndata['x'] = batch.g_enc.ndata['x'].to(device)
batch.g_enc.ndata['pos'] = batch.g_enc.ndata['pos'].to(device)
batch.g_dec.edata['etype'] = batch.g_dec.edata['etype'].to(device)
batch.g_dec.ndata['x'] = batch.g_dec.ndata['x'].to(device)
batch.g_dec.ndata['pos'] = batch.g_dec.ndata['pos'].to(device)
out = model(batch)
loss_accum += criterion(out, batch.y)
tot += len(batch.y)
hit += (out.max(dim=-1)[1] == batch.y).sum().item()
if n_gpus > 1:
th.distributed.barrier()
if proc_id == 0:
print('testing...')
print('loss : {}, acc : {}'.format(loss_accum / tot, hit / tot))
if not os.path.exists('checkpoints'):
os.mkdir('checkpoints')
with open('checkpoints/{}-{}.pkl'.format(epoch, config['save_name']), 'wb') as f:
th.save(model.state_dict(), f)
if __name__ == '__main__':
argparser = argparse.ArgumentParser("machine translation")
argparser.add_argument('--config', type=str)
argparser.add_argument('--gpu', type=str, default='0')
argparser.add_argument('--checkpoint', type=int, default=-1)
args = argparser.parse_args()
with open(args.config, 'r') as f:
config = yaml.load(f)
devices = list(map(int, args.gpu.split(',')))
n_gpus = len(devices)
if n_gpus == 1:
run(0, n_gpus, devices, config, args.checkpoint)
else:
mp = th.multiprocessing
mp.spawn(run, args=(n_gpus, devices, config, args.checkpoint), nprocs=n_gpus)
| [
"graph.get_mt_dataset",
"loss.LabelSmoothing",
"yaml.load",
"optim.get_wrapper",
"torch.distributed.barrier",
"os.path.exists",
"argparse.ArgumentParser",
"modules.make_translation_model",
"graph.DocumentMTDataset",
"numpy.random.seed",
"os.mkdir",
"torchtext.data.Field",
"torch.distributed.... | [((412, 442), 'torch.manual_seed', 'th.manual_seed', (["config['seed']"], {}), "(config['seed'])\n", (426, 442), True, 'import torch as th\n'), ((447, 477), 'numpy.random.seed', 'np.random.seed', (["config['seed']"], {}), "(config['seed'])\n", (461, 477), True, 'import numpy as np\n'), ((482, 521), 'torch.cuda.manual_seed_all', 'th.cuda.manual_seed_all', (["config['seed']"], {}), "(config['seed'])\n", (505, 521), True, 'import torch as th\n'), ((5641, 5827), 'modules.make_translation_model', 'make_translation_model', (['vocab_sizes', 'dim_model', 'dim_ff', 'num_heads', 'n_layers', 'm_layers'], {'dropouti': 'dropouti', 'dropouth': 'dropouth', 'dropouta': 'dropouta', 'dropoutc': 'dropoutc', 'rel_pos': 'rel_pos'}), '(vocab_sizes, dim_model, dim_ff, num_heads, n_layers,\n m_layers, dropouti=dropouti, dropouth=dropouth, dropouta=dropouta,\n dropoutc=dropoutc, rel_pos=rel_pos)\n', (5663, 5827), False, 'from modules import make_translation_model\n'), ((6344, 6390), 'loss.LabelSmoothing', 'LabelSmoothing', (['vocab_sizes[-1]'], {'smoothing': '(0.1)'}), '(vocab_sizes[-1], smoothing=0.1)\n', (6358, 6390), False, 'from loss import LabelSmoothing\n'), ((6405, 6422), 'torch.device', 'th.device', (['dev_id'], {}), '(dev_id)\n', (6414, 6422), True, 'import torch as th\n'), ((6427, 6453), 'torch.cuda.set_device', 'th.cuda.set_device', (['device'], {}), '(device)\n', (6445, 6453), True, 'import torch as th\n'), ((11111, 11157), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""machine translation"""'], {}), "('machine translation')\n", (11134, 11157), False, 'import argparse\n'), ((734, 854), 'torch.distributed.init_process_group', 'th.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': 'dist_init_method', 'world_size': 'world_size', 'rank': 'dev_id'}), "(backend='nccl', init_method=\n dist_init_method, world_size=world_size, rank=dev_id)\n", (767, 854), True, 'import torch as th\n'), ((1159, 1182), 'graph.get_mt_dataset', 'get_mt_dataset', (['"""iwslt"""'], {}), "('iwslt')\n", (1173, 1182), False, 'from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset\n'), ((1296, 1387), 'graph.DocumentMTDataset', 'DocumentMTDataset', (['train'], {'context_length': "config['context_len']", 'part': '(proc_id, n_gpus)'}), "(train, context_length=config['context_len'], part=(\n proc_id, n_gpus))\n", (1313, 1387), False, 'from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset\n'), ((1397, 1457), 'graph.DocumentMTDataset', 'DocumentMTDataset', (['dev'], {'context_length': "config['context_len']"}), "(dev, context_length=config['context_len'])\n", (1414, 1457), False, 'from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset\n'), ((1473, 1534), 'graph.DocumentMTDataset', 'DocumentMTDataset', (['test'], {'context_length': "config['context_len']"}), "(test, context_length=config['context_len'])\n", (1490, 1534), False, 'from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset\n'), ((1900, 2021), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train', 'batch_size': "(config['batch_size'] // n_gpus)", 'collate_fn': 'batcher', 'shuffle': '(True)', 'num_workers': '(6)'}), "(dataset=train, batch_size=config['batch_size'] // n_gpus,\n collate_fn=batcher, shuffle=True, num_workers=6)\n", (1910, 2021), False, 'from torch.utils.data import DataLoader\n'), ((2175, 2275), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dev', 'batch_size': "config['dev_batch_size']", 'collate_fn': 'batcher', 'shuffle': '(False)'}), "(dataset=dev, batch_size=config['dev_batch_size'], collate_fn=\n batcher, shuffle=False)\n", (2185, 2275), False, 'from torch.utils.data import DataLoader\n'), ((2389, 2490), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test', 'batch_size': "config['dev_batch_size']", 'collate_fn': 'batcher', 'shuffle': '(False)'}), "(dataset=test, batch_size=config['dev_batch_size'], collate_fn=\n batcher, shuffle=False)\n", (2399, 2490), False, 'from torch.utils.data import DataLoader\n'), ((6567, 6586), 'optim.get_wrapper', 'get_wrapper', (['"""noam"""'], {}), "('noam')\n", (6578, 6586), False, 'from optim import get_wrapper\n'), ((11421, 11433), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (11430, 11433), False, 'import yaml\n'), ((1093, 1121), 'torchtext.data.Field', 'data.Field', ([], {'batch_first': '(True)'}), '(batch_first=True)\n', (1103, 1121), False, 'from torchtext import data\n'), ((2629, 2657), 'torchtext.data.Field', 'data.Field', ([], {'batch_first': '(True)'}), '(batch_first=True)\n', (2639, 2657), False, 'from torchtext import data\n'), ((2676, 2699), 'graph.get_mt_dataset', 'get_mt_dataset', (['"""wmt14"""'], {}), "('wmt14')\n", (2690, 2699), False, 'from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset\n'), ((2815, 2855), 'graph.MTDataset', 'MTDataset', (['train'], {'part': '(proc_id, n_gpus)'}), '(train, part=(proc_id, n_gpus))\n', (2824, 2855), False, 'from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset\n'), ((2870, 2884), 'graph.MTDataset', 'MTDataset', (['dev'], {}), '(dev)\n', (2879, 2884), False, 'from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset\n'), ((2900, 2915), 'graph.MTDataset', 'MTDataset', (['test'], {}), '(test)\n', (2909, 2915), False, 'from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset\n'), ((3196, 3317), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train', 'batch_size': "(config['batch_size'] // n_gpus)", 'collate_fn': 'batcher', 'shuffle': '(True)', 'num_workers': '(6)'}), "(dataset=train, batch_size=config['batch_size'] // n_gpus,\n collate_fn=batcher, shuffle=True, num_workers=6)\n", (3206, 3317), False, 'from torch.utils.data import DataLoader\n'), ((3471, 3571), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dev', 'batch_size': "config['dev_batch_size']", 'collate_fn': 'batcher', 'shuffle': '(False)'}), "(dataset=dev, batch_size=config['dev_batch_size'], collate_fn=\n batcher, shuffle=False)\n", (3481, 3571), False, 'from torch.utils.data import DataLoader\n'), ((3685, 3786), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test', 'batch_size': "config['dev_batch_size']", 'collate_fn': 'batcher', 'shuffle': '(False)'}), "(dataset=test, batch_size=config['dev_batch_size'], collate_fn=\n batcher, shuffle=False)\n", (3695, 3786), False, 'from torch.utils.data import DataLoader\n'), ((6106, 6159), 'torch.load', 'th.load', (['f'], {'map_location': '(lambda storage, loc: storage)'}), '(f, map_location=lambda storage, loc: storage)\n', (6113, 6159), True, 'import torch as th\n'), ((9644, 9668), 'torch.distributed.barrier', 'th.distributed.barrier', ([], {}), '()\n', (9666, 9668), True, 'import torch as th\n'), ((10673, 10697), 'torch.distributed.barrier', 'th.distributed.barrier', ([], {}), '()\n', (10695, 10697), True, 'import torch as th\n'), ((3993, 4019), 'graph.get_mt_dataset', 'get_mt_dataset', (['"""multi30k"""'], {}), "('multi30k')\n", (4007, 4019), False, 'from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset\n'), ((4137, 4177), 'graph.MTDataset', 'MTDataset', (['train'], {'part': '(proc_id, n_gpus)'}), '(train, part=(proc_id, n_gpus))\n', (4146, 4177), False, 'from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset\n'), ((4192, 4206), 'graph.MTDataset', 'MTDataset', (['dev'], {}), '(dev)\n', (4201, 4206), False, 'from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset\n'), ((4222, 4237), 'graph.MTDataset', 'MTDataset', (['test'], {}), '(test)\n', (4231, 4237), False, 'from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset\n'), ((4603, 4724), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train', 'batch_size': "(config['batch_size'] // n_gpus)", 'collate_fn': 'batcher', 'shuffle': '(True)', 'num_workers': '(6)'}), "(dataset=train, batch_size=config['batch_size'] // n_gpus,\n collate_fn=batcher, shuffle=True, num_workers=6)\n", (4613, 4724), False, 'from torch.utils.data import DataLoader\n'), ((4878, 4978), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'dev', 'batch_size': "config['dev_batch_size']", 'collate_fn': 'batcher', 'shuffle': '(False)'}), "(dataset=dev, batch_size=config['dev_batch_size'], collate_fn=\n batcher, shuffle=False)\n", (4888, 4978), False, 'from torch.utils.data import DataLoader\n'), ((5092, 5193), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test', 'batch_size': "config['dev_batch_size']", 'collate_fn': 'batcher', 'shuffle': '(False)'}), "(dataset=test, batch_size=config['dev_batch_size'], collate_fn=\n batcher, shuffle=False)\n", (5102, 5193), False, 'from torch.utils.data import DataLoader\n'), ((8882, 8894), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (8892, 8894), True, 'import torch as th\n'), ((9911, 9923), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (9921, 9923), True, 'import torch as th\n'), ((10852, 10881), 'os.path.exists', 'os.path.exists', (['"""checkpoints"""'], {}), "('checkpoints')\n", (10866, 10881), False, 'import os\n'), ((10899, 10922), 'os.mkdir', 'os.mkdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (10907, 10922), False, 'import os\n'), ((3927, 3955), 'torchtext.data.Field', 'data.Field', ([], {'batch_first': '(True)'}), '(batch_first=True)\n', (3937, 3955), False, 'from torchtext import data\n'), ((8486, 8560), 'torch.distributed.all_reduce', 'th.distributed.all_reduce', (['param.grad.data'], {'op': 'th.distributed.ReduceOp.SUM'}), '(param.grad.data, op=th.distributed.ReduceOp.SUM)\n', (8511, 8560), True, 'import torch as th\n')] |
import yaml
import inspect
from pcl2depth import velo_points_2_pano
import scipy.io
import numpy as np
import os
from os.path import join
import sys
from tqdm import tqdm
import matplotlib.pyplot as plt
import cv2
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
sys.path.insert(1, parentdir)
# get config
with open(join(parentdir, 'config.yaml'), 'r') as f:
cfg = yaml.safe_load(f)
# Select Platform
# platform = 'dataset_creation_handheld'
platform = 'dataset_creation_drone' # UAV
exp_names = cfg[platform]['all_exp_files']
pendrive_dir = cfg[platform]['dataroot']
v_fov = tuple(map(int, cfg[platform]['pcl2depth']['v_fov'][1:-1].split(',')))
h_fov = tuple(map(int, cfg[platform]['pcl2depth']['h_fov'][1:-1].split(',')))
nb_overlay_frames = cfg[platform]['pcl2depth']['nb_overlay_frames']
save_dir = pendrive_dir
for BAG_DATE in exp_names:
print('********* Processing {} *********'.format(BAG_DATE))
ROS_SAVE_DIR = join(save_dir, BAG_DATE)
MMWAVE_SAVE_PATH = os.path.join(*[ROS_SAVE_DIR, 'mmwave_middle'])
print(" Creating folder for mmWave depth images {}".format(MMWAVE_SAVE_PATH))
if not os.path.exists(MMWAVE_SAVE_PATH):
os.makedirs(MMWAVE_SAVE_PATH)
MMWAVE_READ_PATH = os.path.join(*[ROS_SAVE_DIR, 'mmwave_middle_pcl'])
mmwave_file_list = os.listdir(MMWAVE_READ_PATH)
mmwave_file_list.sort()
mmwave_ts_list = [int(i[:-4]) for i in mmwave_file_list]
###################
# Read all frames #
###################
frames = []
for file in mmwave_file_list:
mat = scipy.io.loadmat(os.path.join(MMWAVE_READ_PATH, file))
pc = np.array(mat['frame'])
upper_row_filter = (pc[:, 0] ** 2 + pc[:, 1] ** 2 + pc[:, 2] ** 2) ** 0.5 < cfg[platform]['pcl2depth']['mmwave_dist_max']
lower_row_filter = (pc[:, 0] ** 2 + pc[:, 1] ** 2 + pc[:, 2] ** 2) ** 0.5 > cfg[platform]['pcl2depth']['mmwave_dist_min']
row_filter = np.bitwise_and(upper_row_filter, lower_row_filter)
frames.append(pc[row_filter, :])
###################
# Overlay frames #
###################
frames = np.array(frames)
# overlay frames accounting for sparse pcl
overlay_frames = list()
# frames_array = np.array(frames)
for i in range(frames.shape[0]):
if i < nb_overlay_frames:
tmp = frames[i: i + nb_overlay_frames]
else:
tmp = frames[i - nb_overlay_frames:i]
try:
overlay_frames.append(np.concatenate(tmp))
except:
print('error')
###################
# Save Images #
###################
for timestamp, frame in tqdm(zip(mmwave_ts_list, overlay_frames), total=len(mmwave_ts_list)):
pano_img = velo_points_2_pano(frame,
cfg[platform]['pcl2depth']['v_res'],
cfg[platform]['pcl2depth']['h_res'],
v_fov,
h_fov,
cfg[platform]['pcl2depth']['max_v'],
depth=True)
try:
pano_img = cv2.resize(pano_img, (pano_img.shape[1] * 4, pano_img.shape[0] * 4))
pc_path = os.path.join(MMWAVE_SAVE_PATH, str(timestamp) + '.png')
cv2.imwrite(pc_path, pano_img)
except:
width = (h_fov[1] - h_fov[0] + 2) * 2
height = (v_fov[1] - v_fov[0] + 2) * 2
blank_image = np.zeros((height, width), np.uint8)
pc_path = os.path.join(MMWAVE_SAVE_PATH, str(timestamp) + '.png')
print('No point in the frame, empty image at: ' + pc_path)
cv2.imwrite(pc_path, blank_image)
| [
"os.path.exists",
"cv2.imwrite",
"sys.path.insert",
"os.listdir",
"os.makedirs",
"inspect.currentframe",
"os.path.join",
"numpy.bitwise_and",
"os.path.dirname",
"yaml.safe_load",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"cv2.resize",
"sys.path.append",
"pcl2depth.velo_points_... | [((314, 341), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (329, 341), False, 'import os\n'), ((342, 368), 'sys.path.append', 'sys.path.append', (['parentdir'], {}), '(parentdir)\n', (357, 368), False, 'import sys\n'), ((369, 398), 'sys.path.insert', 'sys.path.insert', (['(1)', 'parentdir'], {}), '(1, parentdir)\n', (384, 398), False, 'import sys\n'), ((476, 493), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (490, 493), False, 'import yaml\n'), ((1041, 1065), 'os.path.join', 'join', (['save_dir', 'BAG_DATE'], {}), '(save_dir, BAG_DATE)\n', (1045, 1065), False, 'from os.path import join\n'), ((1089, 1135), 'os.path.join', 'os.path.join', (["*[ROS_SAVE_DIR, 'mmwave_middle']"], {}), "(*[ROS_SAVE_DIR, 'mmwave_middle'])\n", (1101, 1135), False, 'import os\n'), ((1326, 1376), 'os.path.join', 'os.path.join', (["*[ROS_SAVE_DIR, 'mmwave_middle_pcl']"], {}), "(*[ROS_SAVE_DIR, 'mmwave_middle_pcl'])\n", (1338, 1376), False, 'import os\n'), ((1401, 1429), 'os.listdir', 'os.listdir', (['MMWAVE_READ_PATH'], {}), '(MMWAVE_READ_PATH)\n', (1411, 1429), False, 'import os\n'), ((2206, 2222), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (2214, 2222), True, 'import numpy as np\n'), ((423, 453), 'os.path.join', 'join', (['parentdir', '"""config.yaml"""'], {}), "(parentdir, 'config.yaml')\n", (427, 453), False, 'from os.path import join\n'), ((1230, 1262), 'os.path.exists', 'os.path.exists', (['MMWAVE_SAVE_PATH'], {}), '(MMWAVE_SAVE_PATH)\n', (1244, 1262), False, 'import os\n'), ((1272, 1301), 'os.makedirs', 'os.makedirs', (['MMWAVE_SAVE_PATH'], {}), '(MMWAVE_SAVE_PATH)\n', (1283, 1301), False, 'import os\n'), ((1724, 1746), 'numpy.array', 'np.array', (["mat['frame']"], {}), "(mat['frame'])\n", (1732, 1746), True, 'import numpy as np\n'), ((2028, 2078), 'numpy.bitwise_and', 'np.bitwise_and', (['upper_row_filter', 'lower_row_filter'], {}), '(upper_row_filter, lower_row_filter)\n', (2042, 2078), True, 'import numpy as np\n'), ((2823, 2995), 'pcl2depth.velo_points_2_pano', 'velo_points_2_pano', (['frame', "cfg[platform]['pcl2depth']['v_res']", "cfg[platform]['pcl2depth']['h_res']", 'v_fov', 'h_fov', "cfg[platform]['pcl2depth']['max_v']"], {'depth': '(True)'}), "(frame, cfg[platform]['pcl2depth']['v_res'], cfg[platform\n ]['pcl2depth']['h_res'], v_fov, h_fov, cfg[platform]['pcl2depth'][\n 'max_v'], depth=True)\n", (2841, 2995), False, 'from pcl2depth import velo_points_2_pano\n'), ((276, 298), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (296, 298), False, 'import inspect\n'), ((1673, 1709), 'os.path.join', 'os.path.join', (['MMWAVE_READ_PATH', 'file'], {}), '(MMWAVE_READ_PATH, file)\n', (1685, 1709), False, 'import os\n'), ((3250, 3318), 'cv2.resize', 'cv2.resize', (['pano_img', '(pano_img.shape[1] * 4, pano_img.shape[0] * 4)'], {}), '(pano_img, (pano_img.shape[1] * 4, pano_img.shape[0] * 4))\n', (3260, 3318), False, 'import cv2\n'), ((3409, 3439), 'cv2.imwrite', 'cv2.imwrite', (['pc_path', 'pano_img'], {}), '(pc_path, pano_img)\n', (3420, 3439), False, 'import cv2\n'), ((2569, 2588), 'numpy.concatenate', 'np.concatenate', (['tmp'], {}), '(tmp)\n', (2583, 2588), True, 'import numpy as np\n'), ((3583, 3618), 'numpy.zeros', 'np.zeros', (['(height, width)', 'np.uint8'], {}), '((height, width), np.uint8)\n', (3591, 3618), True, 'import numpy as np\n'), ((3780, 3813), 'cv2.imwrite', 'cv2.imwrite', (['pc_path', 'blank_image'], {}), '(pc_path, blank_image)\n', (3791, 3813), False, 'import cv2\n')] |
"""
License
-------
The MIT License (MIT)
Copyright (c) 2018 Snappy2 Project
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
Created on Sat Dec 16 22:46:28 2017
@author: <NAME>
@Source: Github.com/Barqawiz
"""
import cv2
from keras.models import load_model
import numpy as np
from utils.Utility import Utility
import os
class Detector:
"""
Common class to detect areas of interest from images
Developed by: github.com/barqawiz
"""
def __init__(self):
base_folder = os.path.dirname(__file__)
self.face_cascade = cv2.CascadeClassifier(
os.path.join(base_folder,'../resource/haarcascades/haarcascade_frontalface_default.xml'))
self.model = load_model(os.path.join(base_folder, '../resource/models/keras_cv_base_model_1_avg.h5'))
def detect_faces(self, gray_human_image):
"""
Detect faces in image and return list of faces with related properties
:param gray_human_image: numpy gray image
:return: face_properties a list of dictionary with following information (face, loc, size)
"""
face_properties = []
faces = self.face_cascade.detectMultiScale(gray_human_image, 1.2, 7, minSize=(30, 30))
if len(faces) == 0:
faces = self.face_cascade.detectMultiScale(gray_human_image, 1.1, 8, minSize=(30, 30))
for (x, y, w, h) in faces:
# 1- detect face area
roi_image = gray_human_image[y:y + h, x:x + w]
face_properties.append({'face': roi_image, 'loc': (x, y), 'size': (w, h)})
return face_properties
def detect_key_points(self, face_properties, network_in_size=96):
"""
Detect key points areas in the face using neural network.
- key points for eyes, noise and mouth
:param face_properties:
:param network_in_size:
:return: key_properties a list of dictionary with following information (keys_x, keys_y, face_index)
"""
key_properties = []
index = 0
for face in face_properties:
# 0- get face information
roi_image = face['face']
x,y = face['loc']
w, h = face['size']
# 1- pre process
# prepare face image
roi_image = cv2.resize(roi_image, (network_in_size, network_in_size))
# scale pixel values to [0, 1]
roi_image = roi_image / 255
# reshape for netowork format
roi_image = roi_image.reshape(roi_image.shape[0], roi_image.shape[1], 1)
roi_image = np.array([roi_image])
# 2- predict face key points
y_predict = self.model.predict(roi_image)[0]
# map to original coordinates values
y_predict = Utility.reverse_nn_normalization(y_predict)
# 3- extract information
# get value within the original image
x_axis = y_predict[0::2]
y_axis = y_predict[1::2]
x_scale_factor = (w / 96)
y_scale_factor = (h / 96)
x_axis = x_axis * x_scale_factor + x
y_axis = y_axis * y_scale_factor + y
key_properties.append({'keys_x': x_axis, 'keys_y': y_axis, 'face_x': x, 'face_y': y,
'face_w': w, 'face_h': h, 'face_index': index})
index += 1
return key_properties
| [
"utils.Utility.Utility.reverse_nn_normalization",
"os.path.join",
"numpy.array",
"os.path.dirname",
"cv2.resize"
] | [((1054, 1079), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1069, 1079), False, 'import os\n'), ((1143, 1236), 'os.path.join', 'os.path.join', (['base_folder', '"""../resource/haarcascades/haarcascade_frontalface_default.xml"""'], {}), "(base_folder,\n '../resource/haarcascades/haarcascade_frontalface_default.xml')\n", (1155, 1236), False, 'import os\n'), ((1265, 1341), 'os.path.join', 'os.path.join', (['base_folder', '"""../resource/models/keras_cv_base_model_1_avg.h5"""'], {}), "(base_folder, '../resource/models/keras_cv_base_model_1_avg.h5')\n", (1277, 1341), False, 'import os\n'), ((2836, 2893), 'cv2.resize', 'cv2.resize', (['roi_image', '(network_in_size, network_in_size)'], {}), '(roi_image, (network_in_size, network_in_size))\n', (2846, 2893), False, 'import cv2\n'), ((3128, 3149), 'numpy.array', 'np.array', (['[roi_image]'], {}), '([roi_image])\n', (3136, 3149), True, 'import numpy as np\n'), ((3322, 3365), 'utils.Utility.Utility.reverse_nn_normalization', 'Utility.reverse_nn_normalization', (['y_predict'], {}), '(y_predict)\n', (3354, 3365), False, 'from utils.Utility import Utility\n')] |
import pandas as pd
import numpy as np
def preprocess_data(df):
df = df.drop(columns = ['FEINumberRecall','RecallingFirmName','RecallEventID','RecallEventClassification','RefusalFEINumber',
'RefusedDate','AnalysisDone','OutbreakLevel','Id','ImportingCountry'])
a = df[df['OriginCountry'].isin(['-'])]['OriginCountry']
df['OriginContinent'] = df['OriginContinent'].fillna(a)
df['PrimaryProcessingOriginContinent'] = df['PrimaryProcessingOriginContinent'].fillna(a)
b = df[df['OriginContinent'].isna()]
b['OriginContinent'][b['OriginCountry'].isin(['Faroe Islands', 'Gibraltar'])] = 'Europe'
b['OriginContinent'][b['OriginCountry'].isin(['American Samoa', 'French Polynesia', 'New Caledonia', 'Tokelau'])] = 'Oceania'
b['OriginContinent'][b['OriginCountry'].isin(['Hong Kong'])] = 'Asia'
b['OriginContinent'][b['OriginCountry'].isin(['Greenland', 'British Virgin Islands', 'Turks and Caicos Islands', 'U.S. Virgin Islands'])] = 'North America'
b['OriginContinent'][b['OriginCountry'].isin(['Reunion', 'Cape Verde', 'Saint Helena'])] = 'Africa'
b['PrimaryProcessingOriginContinent'][b['OriginCountry'].isin(['Faroe Islands', 'Gibraltar'])] = 'Europe'
b['PrimaryProcessingOriginContinent'][b['OriginCountry'].isin(['American Samoa', 'French Polynesia', 'New Caledonia', 'Tokelau'])] = 'Oceania'
b['PrimaryProcessingOriginContinent'][b['OriginCountry'].isin(['Hong Kong'])] = 'Asia'
b['PrimaryProcessingOriginContinent'][b['OriginCountry'].isin(['Greenland', 'British Virgin Islands', 'Turks and Caicos Islands', 'U.S. Virgin Islands'])] = 'North America'
b['PrimaryProcessingOriginContinent'][b['OriginCountry'].isin(['Reunion', 'Cape Verde', 'Saint Helena'])] = 'Africa'
df['OriginContinent'] = df['OriginContinent'].fillna(b['OriginContinent'])
df['PrimaryProcessingOriginContinent'] = df['PrimaryProcessingOriginContinent'].fillna(b['PrimaryProcessingOriginContinent'])
sec_na = df[df['SecondaryProcessingOriginCity'].isna()]
c = sec_na[(sec_na['PrimaryProcessingOriginCity'] == 'NA') & (sec_na['SecondaryProcessingOriginCity'].isna())]['SecondaryProcessingOriginCity'].fillna('NA')
sec_na['SecondaryProcessingOriginCity'] = sec_na['SecondaryProcessingOriginCity'].fillna(value = c)
d = sec_na[sec_na['ProductType'].isin(['Frozen','Raw'])]['OriginCity']
sec_na['SecondaryProcessingOriginCity'] = sec_na['SecondaryProcessingOriginCity'].fillna(d)
mask = sec_na['SecondaryProcessingOriginCity'].isna()
ind = sec_na['SecondaryProcessingOriginCity'].loc[mask][sec_na['SecondaryProcessingOriginCountry'] == 'Canada'].sample(frac=0.3).index
sec_na.loc[ind, 'SecondaryProcessingOriginCity']=sec_na.loc[ind, 'SecondaryProcessingOriginCity'].fillna('Vancouver')
sec_na['SecondaryProcessingOriginCity'] = sec_na[sec_na['SecondaryProcessingOriginCountry'] == 'Canada']['SecondaryProcessingOriginCity'].fillna("Brampton")
mask = sec_na['SecondaryProcessingOriginCity'].isna()
ind = sec_na['SecondaryProcessingOriginCity'].loc[mask][sec_na['SecondaryProcessingOriginCountry'] == 'Japan'].sample(frac=0.5).index
sec_na.loc[ind, 'SecondaryProcessingOriginCity']=sec_na.loc[ind, 'SecondaryProcessingOriginCity'].fillna('Kagoshima')
sec_na['SecondaryProcessingOriginCity'] = sec_na[sec_na['SecondaryProcessingOriginCountry'] == 'Japan']['SecondaryProcessingOriginCity'].fillna("Ojima")
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Mexico'])] = "Ensenada"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Chile'])] = "SANTIAGO"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Ecuador'])] = "Puerto Lopez"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Indonesia'])] = "Medan"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['India'])] = "Mumbai"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Thailand'])] = "Amphur Muang"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['United States'])] = "Shanghai"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['New Zealand'])] = "Mosgiel"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Honduras'])] = "Choluteca"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Spain'])] = "Madrid"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Ireland'])] = "Madrid"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Hong Kong'])] = "Kwai Chung"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Vietnam'])] = "Ho Chi Minh"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Peru'])] = "Piura"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Nicaragua'])] = "Managua"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Sri Lanka'])] = "Colombo"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['South Korea'])] = "Busan"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Denmark'])] = "Vinderup"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Philippines'])] = "Las Pinas"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Norway'])] = "Oslo"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Germany'])] = "Wallersdorf"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['France'])] = "Lyon"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Grenada'])] = "St. George"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Guatemala'])] = "Flores"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Italy'])] = "Genova"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Uruguay'])] = "Montevideo"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Guyana'])] = "Georgetown"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Venezuela'])] = "Cumana"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Latvia'])] = "Riga"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Morocco'])] = "Agadir"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Taiwan'])] = "Kaohsiung"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['China'])] = "Zhanjiang"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Iceland'])] = "Reykjavík"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Portugal'])] = "Matosinhos"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Greece'])] = "Keramoti"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Brazil'])] = "Itaoca"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['-'])] = "-"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Russia'])] = "Murmansk"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Australia'])] = "Mackay"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Turkey'])] = "Mugla"
sec_na['SecondaryProcessingOriginCity'][sec_na['SecondaryProcessingOriginCountry'].isin(['Panama'])] = "BRISAS DE AMADOR"
sec_na['SecondaryProcessingOriginCity'] = sec_na['SecondaryProcessingOriginCity'].fillna('Other')
df['SecondaryProcessingOriginCity'] = df['SecondaryProcessingOriginCity'].fillna(sec_na['SecondaryProcessingOriginCity'])
# Filling OriginContinent with OriginCountry's continent
# Europe = Faroe Islands, Gibraltar
# Oceania = American Samao, FrenchPolynesia, New Caledonia, Tokelau
# Asia = Hong Kong
# north america = Greenland, British Virgin Islands, Turks and Caicos Islands, U.S. Virgin Islands
# Africa = Reunion, Cape Verde, Saint Helena
df['SecondaryProcessingOriginContinent'][df['SecondaryProcessingOriginCountry'].isin(['Faroe Islands', 'Gibraltar'])] = 'Europe'
df['SecondaryProcessingOriginContinent'][df['SecondaryProcessingOriginCountry'].isin(['American Samoa', 'French Polynesia', 'New Caledonia', 'Tokelau'])] = 'Oceania'
df['SecondaryProcessingOriginContinent'][df['SecondaryProcessingOriginCountry'].isin(['Hong Kong'])] = 'Asia'
df['SecondaryProcessingOriginContinent'][df['SecondaryProcessingOriginCountry'].isin(['Greenland', 'British Virgin Islands', 'Turks and Caicos Islands', 'U.S. Virgin Islands'])] = 'North America'
df['SecondaryProcessingOriginContinent'][df['SecondaryProcessingOriginCountry'].isin(['Reunion', 'Cape Verde', 'Saint Helena'])] = 'Africa'
df['SecondaryProcessingOriginContinent'][df['SecondaryProcessingOriginCountry'].isin(['-'])] = '-'
df['OriginCity'] = df['OriginCity'].fillna('Others')
df['PrimaryProcessingOriginCity'] = df['PrimaryProcessingOriginCity'].fillna('Others')
df['StorageCondition'] = df['StorageCondition'].fillna('others')
df['PackagingType'] = df['PackagingType'].fillna('others')
corr_matrix = df.apply(lambda x : pd.factorize(x)[0]).corr(method='pearson', min_periods=1)
high_corr_var=np.where(corr_matrix>0.8)
high_corr_var=[(corr_matrix.columns[x],corr_matrix.columns[y]) for x,y in zip(*high_corr_var) if x!=y and x<y]
final_df = df.drop(['ShipmentID','ArrivalDate', 'SubmissionDate','ManufacturerFEINumber','FilerFEINumber','PrimaryProcessedDateTime',
'SecondaryProcessedDTTM','CatchDTTM','OriginCountry','PrimaryProcessingOriginCountry','SecondaryProcessingOriginCountry',
'FishCommonName','SourceSubCategory','PrimaryProcessingOriginCity','SecondaryProcessingOriginCountry',
'SecondaryProcessingOriginContinent','LastInspectionStatus','InspectActive','PackagingType',
'FinalDispositionDate','IsSafe', 'PrimaryProcessingOriginContinent'], axis = 1)
return final_df
data=pd.read_csv("data/raw_data/seafood_imports.csv")
data=preprocess_data(data)
data.to_csv("data/preprocessed_data/seafood_imports.csv")
| [
"numpy.where",
"pandas.factorize",
"pandas.read_csv"
] | [((10855, 10903), 'pandas.read_csv', 'pd.read_csv', (['"""data/raw_data/seafood_imports.csv"""'], {}), "('data/raw_data/seafood_imports.csv')\n", (10866, 10903), True, 'import pandas as pd\n'), ((10075, 10102), 'numpy.where', 'np.where', (['(corr_matrix > 0.8)'], {}), '(corr_matrix > 0.8)\n', (10083, 10102), True, 'import numpy as np\n'), ((9999, 10014), 'pandas.factorize', 'pd.factorize', (['x'], {}), '(x)\n', (10011, 10014), True, 'import pandas as pd\n')] |
import pandas as pd
import numpy
import matplotlib
import sklearn_crfsuite
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
from sklearn_crfsuite import metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import RandomizedSearchCV
from sklearn_crfsuite import scorers
from sklearn.externals import joblib
from glob import glob
import scipy.stats
import operator
import sys
import os
# FBcols = ["FB%d" % d for d in range(4096)]
# GGcols = ["GG%d" % d for d in range(512)]
# elmocols = ["ELMO%d" % d for d in range(1024)]
features = ["GS%d" % d for d in range(4096)] + ['wordCount','chartStart','charEnd']
# labelNames = ['semanticType','Symptom','PMH','MEDS','ALLG','FAMHx','SOCHx','pysch','lifestyle','substanceUse','PE','FORM','supportProvision','transition']
labelNames = ['supportProvision']
files = glob("/Users/karanjani/Desktop/csvWithVecs/TrainCSV_Updated/*.csv")
#MAYBE CREATE A LIST FOR featurelabels so you can add what you wish to the FB vectors?
for name in labelNames:
featureMaster = []
labelMaster = []
for file in files[:10]:
df = pd.read_csv(file)
df = df.dropna(axis=0, how='any')
df = df[df.speakerID == 'doctor']
#DROP ALL LABELS + ANY FEATURES YOU DON'T WANT TO INCLUDE
dfX = df[features]
# dfX = df.drop(['labelType','stringList','transition'], axis=1)
#CREATE LIST OF LIST OF DICTS OF FEATURES
list_of_FeatureDicts = dfX.to_dict(orient='records')
featureMaster += [list_of_FeatureDicts]
#CREATE LIST OF LIST OF STRINGS OF LABELS
labels = df[name].values.tolist()
labelMaster += [labels]
X_train, X_valid, Y_train, Y_valid = train_test_split(featureMaster, labelMaster, test_size = 0.2)
crf = sklearn_crfsuite.CRF(
algorithm='lbfgs',
max_iterations=100,
all_possible_transitions=True)
params_space = {'c1': scipy.stats.expon(scale=0.5),'c2': scipy.stats.expon(scale=0.05)}
f1_scorer = make_scorer(metrics.flat_f1_score,average='weighted', labels=numpy.unique(name))
rs = RandomizedSearchCV(crf, params_space,
cv=2,
verbose=1,
n_jobs=-1,
n_iter=10,
scoring=f1_scorer)
rs.fit(X_train, Y_train)
print('best params:', rs.best_params_)
print('best CV score:', rs.best_score_)
print('model size: {:0.2f}M'.format(rs.best_estimator_.size_ / 1000000))
| [
"sklearn.grid_search.RandomizedSearchCV",
"numpy.unique",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn_crfsuite.CRF",
"glob.glob"
] | [((972, 1039), 'glob.glob', 'glob', (['"""/Users/karanjani/Desktop/csvWithVecs/TrainCSV_Updated/*.csv"""'], {}), "('/Users/karanjani/Desktop/csvWithVecs/TrainCSV_Updated/*.csv')\n", (976, 1039), False, 'from glob import glob\n'), ((1750, 1809), 'sklearn.model_selection.train_test_split', 'train_test_split', (['featureMaster', 'labelMaster'], {'test_size': '(0.2)'}), '(featureMaster, labelMaster, test_size=0.2)\n', (1766, 1809), False, 'from sklearn.model_selection import train_test_split\n'), ((1820, 1914), 'sklearn_crfsuite.CRF', 'sklearn_crfsuite.CRF', ([], {'algorithm': '"""lbfgs"""', 'max_iterations': '(100)', 'all_possible_transitions': '(True)'}), "(algorithm='lbfgs', max_iterations=100,\n all_possible_transitions=True)\n", (1840, 1914), False, 'import sklearn_crfsuite\n'), ((2116, 2215), 'sklearn.grid_search.RandomizedSearchCV', 'RandomizedSearchCV', (['crf', 'params_space'], {'cv': '(2)', 'verbose': '(1)', 'n_jobs': '(-1)', 'n_iter': '(10)', 'scoring': 'f1_scorer'}), '(crf, params_space, cv=2, verbose=1, n_jobs=-1, n_iter=10,\n scoring=f1_scorer)\n', (2134, 2215), False, 'from sklearn.grid_search import RandomizedSearchCV\n'), ((1226, 1243), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (1237, 1243), True, 'import pandas as pd\n'), ((2088, 2106), 'numpy.unique', 'numpy.unique', (['name'], {}), '(name)\n', (2100, 2106), False, 'import numpy\n')] |
import os
import glob
import numpy as np
import tabulate
def pprint_dict(x):
"""
:param x: a dict
:return: a string of pretty representation of the dict
"""
def helper(d):
ret = {}
for k, v in d.items():
if isinstance(v, dict):
ret[k] = helper(v)
else:
ret[k] = v
return tabulate.tabulate(ret.items())
return helper(x)
def str_to_dict(s, delim=',', kv_delim='='):
ss = s.split(delim)
d = {}
for s in ss:
if s == '':
continue
field, value = s.split(kv_delim)
try:
value = eval(value, {'__builtins__': None})
except:
# Cannot convert the value. Treat it as it is.
pass
d[field] = value
return d
def module_grad_stats(module):
headers = ['layer', 'max', 'min']
def maybe_max(x):
return x.max() if x is not None else 'None'
def maybe_min(x):
return x.min() if x is not None else 'None'
data = [
(name, maybe_max(param.grad), maybe_min(param.grad))
for name, param in module.named_parameters()
]
return tabulate.tabulate(data, headers, tablefmt='psql')
def save_model(state, step, dir, filename):
import torch
path = os.path.join(dir, '%s.%d' % (filename, step))
torch.save(state, path)
def load_model(dir, filename, step=None, load_to_cpu=False):
'''
:param model:
:param dir:
:param filename:
:param step: if None. Load the latest.
:return: the saved state dict
'''
import torch
import parse
if not step:
files = glob.glob(os.path.join(dir, '%s.*' % filename))
parsed = []
for fn in files:
r = parse.parse('{}.{:d}', fn)
if r:
parsed.append((r, fn))
if not parsed:
return None
step, path = max(parsed, key=lambda x: x[0][1])
else:
path = os.path.join(dir, '%s.%d' % (filename, step))
if os.path.isfile(path):
if load_to_cpu:
return torch.load(path, map_location=lambda storage, location: storage)
else:
return torch.load(path)
raise Exception('Failed to load model')
def get_project_root():
return os.path.normpath(os.path.dirname(__file__) + '/../../')
def get_gibson_asset_dir():
return os.path.join(get_project_root(), 'rmp_nav', 'gibson', 'assets', 'dataset')
def get_data_dir():
return os.path.join(get_project_root(), 'data')
def get_config_dir():
return os.path.join(get_project_root(), 'configs')
def get_model_dir():
return os.path.join(get_project_root(), 'models')
def cairo_argb_to_opencv_rgb(arr):
argb = arr.view(dtype=np.dtype((np.uint32, {'a': (np.uint8, 3),
'r': (np.uint8, 2),
'g': (np.uint8, 1),
'b': (np.uint8, 0)})))
return np.stack([argb['r'], argb['g'], argb['b']], axis=2)
def cairo_argb_to_opencv_bgr(arr):
argb = arr.view(dtype=np.dtype((np.uint32, {'a': (np.uint8, 3),
'r': (np.uint8, 2),
'g': (np.uint8, 1),
'b': (np.uint8, 0)})))
return np.stack([argb['b'], argb['g'], argb['r']], axis=2)
| [
"tabulate.tabulate",
"parse.parse",
"torch.load",
"os.path.join",
"os.path.isfile",
"numpy.stack",
"os.path.dirname",
"torch.save",
"numpy.dtype"
] | [((1171, 1220), 'tabulate.tabulate', 'tabulate.tabulate', (['data', 'headers'], {'tablefmt': '"""psql"""'}), "(data, headers, tablefmt='psql')\n", (1188, 1220), False, 'import tabulate\n'), ((1295, 1340), 'os.path.join', 'os.path.join', (['dir', "('%s.%d' % (filename, step))"], {}), "(dir, '%s.%d' % (filename, step))\n", (1307, 1340), False, 'import os\n'), ((1345, 1368), 'torch.save', 'torch.save', (['state', 'path'], {}), '(state, path)\n', (1355, 1368), False, 'import torch\n'), ((2023, 2043), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (2037, 2043), False, 'import os\n'), ((3010, 3061), 'numpy.stack', 'np.stack', (["[argb['r'], argb['g'], argb['b']]"], {'axis': '(2)'}), "([argb['r'], argb['g'], argb['b']], axis=2)\n", (3018, 3061), True, 'import numpy as np\n'), ((3385, 3436), 'numpy.stack', 'np.stack', (["[argb['b'], argb['g'], argb['r']]"], {'axis': '(2)'}), "([argb['b'], argb['g'], argb['r']], axis=2)\n", (3393, 3436), True, 'import numpy as np\n'), ((1969, 2014), 'os.path.join', 'os.path.join', (['dir', "('%s.%d' % (filename, step))"], {}), "(dir, '%s.%d' % (filename, step))\n", (1981, 2014), False, 'import os\n'), ((1657, 1693), 'os.path.join', 'os.path.join', (['dir', "('%s.*' % filename)"], {}), "(dir, '%s.*' % filename)\n", (1669, 1693), False, 'import os\n'), ((1756, 1782), 'parse.parse', 'parse.parse', (['"""{}.{:d}"""', 'fn'], {}), "('{}.{:d}', fn)\n", (1767, 1782), False, 'import parse\n'), ((2088, 2152), 'torch.load', 'torch.load', (['path'], {'map_location': '(lambda storage, location: storage)'}), '(path, map_location=lambda storage, location: storage)\n', (2098, 2152), False, 'import torch\n'), ((2186, 2202), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (2196, 2202), False, 'import torch\n'), ((2302, 2327), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2317, 2327), False, 'import os\n'), ((2750, 2858), 'numpy.dtype', 'np.dtype', (["(np.uint32, {'a': (np.uint8, 3), 'r': (np.uint8, 2), 'g': (np.uint8, 1),\n 'b': (np.uint8, 0)})"], {}), "((np.uint32, {'a': (np.uint8, 3), 'r': (np.uint8, 2), 'g': (np.\n uint8, 1), 'b': (np.uint8, 0)}))\n", (2758, 2858), True, 'import numpy as np\n'), ((3125, 3233), 'numpy.dtype', 'np.dtype', (["(np.uint32, {'a': (np.uint8, 3), 'r': (np.uint8, 2), 'g': (np.uint8, 1),\n 'b': (np.uint8, 0)})"], {}), "((np.uint32, {'a': (np.uint8, 3), 'r': (np.uint8, 2), 'g': (np.\n uint8, 1), 'b': (np.uint8, 0)}))\n", (3133, 3233), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""OpenBabel toolkit for DeCAF"""
from decaf import PHARS, Pharmacophore
import pybel
import openbabel as ob
import numpy as np
from collections import deque
import math
PATTERNS = {phar: pybel.Smarts(smarts) for (phar, smarts) in PHARS.items()}
def __count_bonds(a1, a2, exclude):
"""Count number of bonds between two pharmacophore points, if the shortest
path does not contain any other pharmacophore point.
Args:
a1, a2 (OBAtom): source and target atoms
exclude (list): atoms (ids) that cannot be in the shortest path
Returns:
int: number of bonds in path or -1 if there is no path between a1 and a2
"""
visited = []
bonds_nr = -1
queue = deque([(a1, 0)])
while queue:
atom, depth = queue.popleft()
idx = atom.GetIdx()
visited.append(idx)
if atom == a2:
bonds_nr = depth
break
else:
for atom in ob.OBAtomAtomIter(atom):
if atom.GetIdx() not in visited and atom.GetIdx() not in exclude:
queue.append((atom, depth+1))
return bonds_nr
def phar_from_mol(ligand):
"""Create Pharmacophore from given pybel.Molecule object."""
if not isinstance(ligand, pybel.Molecule):
raise TypeError("Invalid ligand! Expected pybel.Molecule object, got "
"%s instead" % type(ligand).__name__)
matches = {}
for (phar, pattern) in PATTERNS.items():
atoms = list(zip(*pattern.findall(ligand)))
if len(atoms) > 0:
matches[phar] = list(atoms[0])
else:
matches[phar] = []
points = {} # graph ids of matched atoms
nodes = []
idx = 0
for (phar, atoms) in matches.items():
for atom in atoms:
if atom in points:
nodes[points[atom]]["type"][phar] = 1.0
else:
nodes.append({"label": atom, "type": {phar: 1.0},
"freq": 1.0})
points[atom] = idx
idx += 1
edges = np.zeros((idx, idx))
keys = sorted(points.keys())
for i in range(len(keys)):
for j in range(i):
dist = float(__count_bonds(ligand.atoms[keys[i]-1].OBAtom,
ligand.atoms[keys[j]-1].OBAtom,
[keys[k] for k in range(len(keys)) if
k not in [i, j]]))
if dist > -1:
edges[points[keys[i]], points[keys[j]]] = dist
edges[points[keys[j]], points[keys[i]]] = dist
if ligand.title == "":
return Pharmacophore(nodes, edges, molecules=1.0)
else:
return Pharmacophore(nodes, edges, molecules=1.0, title=ligand.title)
def layout(p):
"""Calculate points positions for depiction of Pharmacophore p using OpenBabel."""
if not isinstance(p, Pharmacophore):
raise TypeError("Expected Pharmacophore object, got %s instead" %
type(p).__name__)
positions = np.zeros((p.numnodes, 2))
m = pybel.Molecule(ob.OBMol())
for i in range(p.numnodes):
m.OBMol.NewAtom()
idx = p.numnodes + 1
for i in range(p.numnodes):
for j in range(i):
if p.edges[i, j] > 0:
tmp = int(math.ceil(p.edges[i, j])) - 1
prev = i + 1
# add invisible atoms to get right distance
for k in range(tmp):
atom = m.OBMol.NewAtom(idx)
atom.SetHyb(1)
m.OBMol.AddBond(prev, idx, 1)
prev = idx
idx += 1
m.OBMol.AddBond(prev, j + 1, 1)
m.draw(show=False, update=True)
for i in range(p.numnodes):
positions[i][0] = m.atoms[i].coords[0]
positions[i][1] = m.atoms[i].coords[1]
return positions
| [
"pybel.Smarts",
"collections.deque",
"math.ceil",
"decaf.PHARS.items",
"numpy.zeros",
"openbabel.OBAtomAtomIter",
"decaf.Pharmacophore",
"openbabel.OBMol"
] | [((215, 235), 'pybel.Smarts', 'pybel.Smarts', (['smarts'], {}), '(smarts)\n', (227, 235), False, 'import pybel\n'), ((726, 742), 'collections.deque', 'deque', (['[(a1, 0)]'], {}), '([(a1, 0)])\n', (731, 742), False, 'from collections import deque\n'), ((2082, 2102), 'numpy.zeros', 'np.zeros', (['(idx, idx)'], {}), '((idx, idx))\n', (2090, 2102), True, 'import numpy as np\n'), ((3036, 3061), 'numpy.zeros', 'np.zeros', (['(p.numnodes, 2)'], {}), '((p.numnodes, 2))\n', (3044, 3061), True, 'import numpy as np\n'), ((258, 271), 'decaf.PHARS.items', 'PHARS.items', ([], {}), '()\n', (269, 271), False, 'from decaf import PHARS, Pharmacophore\n'), ((2626, 2668), 'decaf.Pharmacophore', 'Pharmacophore', (['nodes', 'edges'], {'molecules': '(1.0)'}), '(nodes, edges, molecules=1.0)\n', (2639, 2668), False, 'from decaf import PHARS, Pharmacophore\n'), ((2695, 2757), 'decaf.Pharmacophore', 'Pharmacophore', (['nodes', 'edges'], {'molecules': '(1.0)', 'title': 'ligand.title'}), '(nodes, edges, molecules=1.0, title=ligand.title)\n', (2708, 2757), False, 'from decaf import PHARS, Pharmacophore\n'), ((3085, 3095), 'openbabel.OBMol', 'ob.OBMol', ([], {}), '()\n', (3093, 3095), True, 'import openbabel as ob\n'), ((962, 985), 'openbabel.OBAtomAtomIter', 'ob.OBAtomAtomIter', (['atom'], {}), '(atom)\n', (979, 985), True, 'import openbabel as ob\n'), ((3299, 3323), 'math.ceil', 'math.ceil', (['p.edges[i, j]'], {}), '(p.edges[i, j])\n', (3308, 3323), False, 'import math\n')] |
#!/usr/bin/env python3
################################################################################
# parse arguments first
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--min_2d_power', type=int, default=3)
parser.add_argument('--max_2d_power', type=int, default=15)
parser.add_argument('--min_3d_power', type=int, default=3)
parser.add_argument('--max_3d_power', type=int, default=10)
parser.add_argument('--build_type', type=str, default='Release')
args = parser.parse_args()
################################################################################
# preliminaries
import sys;
sys.path.insert(0, '../build/%s' % args.build_type)
sys.path.insert(0, '../misc/py')
import common
import common3d
import matplotlib.pyplot as plt
import numpy as np
import pyolim as olim
from matplotlib.colors import LogNorm
from numpy.linalg import norm
plt.rc('text', usetex=True)
plt.rc('font', **{
'family': 'serif',
'serif': ['Computer Modern'],
'size': 8
})
plt.style.use('bmh')
################################################################################
# parameters
R_fac = 0.1
N = 2**np.arange(args.min_2d_power, args.max_2d_power + 1) + 1
N3D = 2**np.arange(args.min_3d_power, args.max_3d_power + 1) + 1
vx, vy, vz = 5, 13, 20
x_fac_1, y_fac_1, z_fac_1 = 0.0, 0.0, 0.0
x_fac_2, y_fac_2, z_fac_2 = 0.8, 0.0, 0.0
marchers_2d = [olim.Olim8Mid0, olim.Olim8Mid1, olim.Olim8Rect]
marchers_3d = [olim.Olim26Mid0, olim.Olim26Mid1, olim.Olim26Rect,
olim.Olim3dHuMid0, olim.Olim3dHuMid1, olim.Olim3dHuRect]
################################################################################
# 2D
s = lambda x, y: 1/(2 + vx*x + vy*y)
s_1, s_2 = s(x_fac_1, y_fac_1), s(x_fac_2, y_fac_2)
def make_u(x_fac, y_fac, vx, vy, s):
return lambda x, y: \
(1/np.sqrt(vx**2 + vy**2)) * \
np.arccosh(
1 +
s(x_fac, y_fac)*s(x, y)*(vx**2 + vy**2)*(
(x - x_fac)**2 + (y - y_fac)**2)/2)
u_1 = make_u(x_fac_1, y_fac_1, vx, vy, s)
u_2 = make_u(x_fac_2, y_fac_2, vx, vy, s)
u = lambda x, y: np.minimum(u_1(x, y), u_2(x, y))
E2 = dict()
E2_fac = dict()
for Olim in marchers_2d:
print(common.get_marcher_name(Olim))
E2[Olim] = np.zeros(len(N))
E2_fac[Olim] = np.zeros(len(N))
for k, n in enumerate(N):
print('- n = %d (%d/%d)' % (n, k + 1, len(N)))
L = np.linspace(0, 1, n)
X, Y = np.meshgrid(L, L)
u_ = u(X, Y)
S = s(X, Y)
h = 1/(n - 1)
i_1, j_1 = y_fac_1/h, x_fac_1/h
i_2, j_2 = y_fac_2/h, x_fac_2/h
m_fac = Olim(S, h)
R_1 = np.sqrt((x_fac_1 - X)**2 + (y_fac_1 - Y)**2)
fc_1 = olim.FacCenter(i_1, j_1, s_1)
for i, j in zip(*np.where(R_1 <= R_fac)):
m_fac.set_node_fac_center(i, j, fc_1)
m_fac.add_boundary_node(x_fac_1, y_fac_1, s_1)
R_2 = np.sqrt((x_fac_2 - X)**2 + (y_fac_2 - Y)**2)
fc_2 = olim.FacCenter(i_2, j_2, s_2)
for i, j in zip(*np.where(R_2 <= R_fac)):
m_fac.set_node_fac_center(i, j, fc_2)
m_fac.add_boundary_node(x_fac_2, y_fac_2, s_2)
m_fac.run()
U_fac = np.array(
[[m_fac.get_value(i, j) for j in range(n)] for i in range(n)])
E2_fac[Olim][k] = \
norm((U_fac - u_).flatten(), np.inf)/norm(u_.flatten(), np.inf)
################################################################################
# 3D
s3d = lambda x, y, z: 1/(2 + vx*x + vy*y + vz*z)
s_1, s_2 = s3d(x_fac_1, y_fac_1, z_fac_1), s3d(x_fac_2, y_fac_2, z_fac_2)
def make_u3d(x_fac, y_fac, z_fac, vx, vy, vz, s):
return lambda x, y, z: \
(1/np.sqrt(vx**2 + vy**2 + vz**2)) * \
np.arccosh(
1 +
s3d(x_fac, y_fac, z_fac)*s3d(x, y, z)*(vx**2 + vy**2 + vz**2)*
((x - x_fac)**2 + (y - y_fac)**2 + (z - z_fac)**2)/2)
u3d_1 = make_u3d(x_fac_1, y_fac_1, z_fac_1, vx, vy, vz, s)
u3d_2 = make_u3d(x_fac_2, y_fac_2, z_fac_2, vx, vy, vz, s)
u3d = lambda x, y, z: np.minimum(u3d_1(x, y, z), u3d_2(x, y, z))
E3 = dict()
E3_fac = dict()
for Olim in marchers_3d:
print(common3d.get_marcher_name(Olim))
E3[Olim] = np.zeros(len(N3D))
E3_fac[Olim] = np.zeros(len(N3D))
for a, n in enumerate(N3D):
print('- n = %d (%d/%d)' % (n, a + 1, len(N3D)))
L = np.linspace(0, 1, n)
X, Y, Z = np.meshgrid(L, L, L)
u_ = u3d(X, Y, Z)
S = s3d(X, Y, Z)
h = 1/(n - 1)
i_1, j_1, k_1 = y_fac_1/h, x_fac_1/h, z_fac_1/h
i_2, j_2, k_2 = y_fac_2/h, x_fac_2/h, z_fac_2/h
m_fac = Olim(S, h)
R_1 = np.sqrt((x_fac_1 - X)**2 + (y_fac_1 - Y)**2 + (z_fac_1 - Z)**2)
fc_1 = olim.FacCenter3d(i_1, j_1, k_1, s_1)
for i, j, k in zip(*np.where(R_1 <= R_fac)):
m_fac.set_node_fac_center(i, j, k, fc_1)
m_fac.add_boundary_node(x_fac_1, y_fac_1, z_fac_1, s_1)
R_2 = np.sqrt((x_fac_2 - X)**2 + (y_fac_2 - Y)**2 + (z_fac_2 - Z)**2)
fc_2 = olim.FacCenter3d(i_2, j_2, k_2, s_2)
for i, j, k in zip(*np.where(R_2 <= R_fac)):
m_fac.set_node_fac_center(i, j, k, fc_2)
m_fac.add_boundary_node(x_fac_2, y_fac_2, z_fac_2, s_2)
m_fac.run()
U_fac = np.array([[[m_fac.get_value(i, j, k) for k in range(n)]
for j in range(n)]
for i in range(n)])
E3_fac[Olim][a] = \
norm((u_ - U_fac).flatten(), np.inf)/norm(u_.flatten(), np.inf)
################################################################################
# Plotting
fig, axes = plt.subplots(1, 2, sharex='col', sharey='all', figsize=(6.5, 2.5))
axes[0].set_ylabel(r'$\|u - U\|_\infty/\|u\|_\infty$')
ax = axes[0]
for Olim in marchers_2d:
ax.loglog(N, E2_fac[Olim], label=common.get_marcher_plot_name(Olim),
linewidth=1, marker='|', markersize=3.5)
ax.minorticks_off()
N_pow_2d = np.arange(args.min_2d_power, args.max_2d_power + 1, 3)
ax.set_xticks(2**N_pow_2d + 1)
ax.set_xticklabels(['$2^{%d} + 1$' % p for p in N_pow_2d])
ax.set_xlabel('$N$')
ax.legend(loc='lower left', prop={'size': 8})
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
cmap = [0, 1, 4, 3]
linestyles = ['-', '--', ':']
ax = axes[1]
it = 0
for Olim in marchers_3d:
ax.loglog(N3D, E3_fac[Olim], label=common3d.get_marcher_plot_name(Olim),
color=colors[cmap[it//3]], linestyle=linestyles[it % 3],
linewidth=1, marker='|', markersize=3.5)
it += 1
ax.minorticks_off()
N_pow_3d = np.arange(args.min_3d_power, args.max_3d_power + 1, 3)
ax.set_xticks(2**N_pow_3d + 1)
ax.set_xticklabels(['$2^{%d} + 1$' % p for p in N_pow_3d])
ax.set_xlabel('$N$')
ax.legend(loc='lower left', ncol=1, prop={'size': 8})
fig.tight_layout()
fig.show()
fig.savefig('qv_plots.eps')
| [
"common.get_marcher_plot_name",
"sys.path.insert",
"numpy.sqrt",
"common3d.get_marcher_plot_name",
"argparse.ArgumentParser",
"numpy.arange",
"pyolim.FacCenter3d",
"numpy.where",
"matplotlib.pyplot.style.use",
"numpy.linspace",
"common.get_marcher_name",
"pyolim.FacCenter",
"common3d.get_mar... | [((156, 181), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (179, 181), False, 'import argparse\n'), ((623, 674), 'sys.path.insert', 'sys.path.insert', (['(0)', "('../build/%s' % args.build_type)"], {}), "(0, '../build/%s' % args.build_type)\n", (638, 674), False, 'import sys\n'), ((675, 707), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../misc/py"""'], {}), "(0, '../misc/py')\n", (690, 707), False, 'import sys\n'), ((882, 909), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (888, 909), True, 'import matplotlib.pyplot as plt\n'), ((910, 988), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 8})\n", (916, 988), True, 'import matplotlib.pyplot as plt\n'), ((1004, 1024), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""bmh"""'], {}), "('bmh')\n", (1017, 1024), True, 'import matplotlib.pyplot as plt\n'), ((5622, 5688), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharex': '"""col"""', 'sharey': '"""all"""', 'figsize': '(6.5, 2.5)'}), "(1, 2, sharex='col', sharey='all', figsize=(6.5, 2.5))\n", (5634, 5688), True, 'import matplotlib.pyplot as plt\n'), ((5943, 5997), 'numpy.arange', 'np.arange', (['args.min_2d_power', '(args.max_2d_power + 1)', '(3)'], {}), '(args.min_2d_power, args.max_2d_power + 1, 3)\n', (5952, 5997), True, 'import numpy as np\n'), ((6558, 6612), 'numpy.arange', 'np.arange', (['args.min_3d_power', '(args.max_3d_power + 1)', '(3)'], {}), '(args.min_3d_power, args.max_3d_power + 1, 3)\n', (6567, 6612), True, 'import numpy as np\n'), ((1140, 1191), 'numpy.arange', 'np.arange', (['args.min_2d_power', '(args.max_2d_power + 1)'], {}), '(args.min_2d_power, args.max_2d_power + 1)\n', (1149, 1191), True, 'import numpy as np\n'), ((1205, 1256), 'numpy.arange', 'np.arange', (['args.min_3d_power', '(args.max_3d_power + 1)'], {}), '(args.min_3d_power, args.max_3d_power + 1)\n', (1214, 1256), True, 'import numpy as np\n'), ((2204, 2233), 'common.get_marcher_name', 'common.get_marcher_name', (['Olim'], {}), '(Olim)\n', (2227, 2233), False, 'import common\n'), ((2403, 2423), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (2414, 2423), True, 'import numpy as np\n'), ((2439, 2456), 'numpy.meshgrid', 'np.meshgrid', (['L', 'L'], {}), '(L, L)\n', (2450, 2456), True, 'import numpy as np\n'), ((2644, 2692), 'numpy.sqrt', 'np.sqrt', (['((x_fac_1 - X) ** 2 + (y_fac_1 - Y) ** 2)'], {}), '((x_fac_1 - X) ** 2 + (y_fac_1 - Y) ** 2)\n', (2651, 2692), True, 'import numpy as np\n'), ((2704, 2733), 'pyolim.FacCenter', 'olim.FacCenter', (['i_1', 'j_1', 's_1'], {}), '(i_1, j_1, s_1)\n', (2718, 2733), True, 'import pyolim as olim\n'), ((2904, 2952), 'numpy.sqrt', 'np.sqrt', (['((x_fac_2 - X) ** 2 + (y_fac_2 - Y) ** 2)'], {}), '((x_fac_2 - X) ** 2 + (y_fac_2 - Y) ** 2)\n', (2911, 2952), True, 'import numpy as np\n'), ((2964, 2993), 'pyolim.FacCenter', 'olim.FacCenter', (['i_2', 'j_2', 's_2'], {}), '(i_2, j_2, s_2)\n', (2978, 2993), True, 'import pyolim as olim\n'), ((4141, 4172), 'common3d.get_marcher_name', 'common3d.get_marcher_name', (['Olim'], {}), '(Olim)\n', (4166, 4172), False, 'import common3d\n'), ((4350, 4370), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (4361, 4370), True, 'import numpy as np\n'), ((4389, 4409), 'numpy.meshgrid', 'np.meshgrid', (['L', 'L', 'L'], {}), '(L, L, L)\n', (4400, 4409), True, 'import numpy as np\n'), ((4639, 4708), 'numpy.sqrt', 'np.sqrt', (['((x_fac_1 - X) ** 2 + (y_fac_1 - Y) ** 2 + (z_fac_1 - Z) ** 2)'], {}), '((x_fac_1 - X) ** 2 + (y_fac_1 - Y) ** 2 + (z_fac_1 - Z) ** 2)\n', (4646, 4708), True, 'import numpy as np\n'), ((4718, 4754), 'pyolim.FacCenter3d', 'olim.FacCenter3d', (['i_1', 'j_1', 'k_1', 's_1'], {}), '(i_1, j_1, k_1, s_1)\n', (4734, 4754), True, 'import pyolim as olim\n'), ((4940, 5009), 'numpy.sqrt', 'np.sqrt', (['((x_fac_2 - X) ** 2 + (y_fac_2 - Y) ** 2 + (z_fac_2 - Z) ** 2)'], {}), '((x_fac_2 - X) ** 2 + (y_fac_2 - Y) ** 2 + (z_fac_2 - Z) ** 2)\n', (4947, 5009), True, 'import numpy as np\n'), ((5019, 5055), 'pyolim.FacCenter3d', 'olim.FacCenter3d', (['i_2', 'j_2', 'k_2', 's_2'], {}), '(i_2, j_2, k_2, s_2)\n', (5035, 5055), True, 'import pyolim as olim\n'), ((5821, 5855), 'common.get_marcher_plot_name', 'common.get_marcher_plot_name', (['Olim'], {}), '(Olim)\n', (5849, 5855), False, 'import common\n'), ((6351, 6387), 'common3d.get_marcher_plot_name', 'common3d.get_marcher_plot_name', (['Olim'], {}), '(Olim)\n', (6381, 6387), False, 'import common3d\n'), ((1833, 1859), 'numpy.sqrt', 'np.sqrt', (['(vx ** 2 + vy ** 2)'], {}), '(vx ** 2 + vy ** 2)\n', (1840, 1859), True, 'import numpy as np\n'), ((2759, 2781), 'numpy.where', 'np.where', (['(R_1 <= R_fac)'], {}), '(R_1 <= R_fac)\n', (2767, 2781), True, 'import numpy as np\n'), ((3019, 3041), 'numpy.where', 'np.where', (['(R_2 <= R_fac)'], {}), '(R_2 <= R_fac)\n', (3027, 3041), True, 'import numpy as np\n'), ((3678, 3714), 'numpy.sqrt', 'np.sqrt', (['(vx ** 2 + vy ** 2 + vz ** 2)'], {}), '(vx ** 2 + vy ** 2 + vz ** 2)\n', (3685, 3714), True, 'import numpy as np\n'), ((4783, 4805), 'numpy.where', 'np.where', (['(R_1 <= R_fac)'], {}), '(R_1 <= R_fac)\n', (4791, 4805), True, 'import numpy as np\n'), ((5084, 5106), 'numpy.where', 'np.where', (['(R_2 <= R_fac)'], {}), '(R_2 <= R_fac)\n', (5092, 5106), True, 'import numpy as np\n')] |
# Copyright 2020 <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Code to reconstruct the purity via Importance Sampling
import numpy as np
import math
import cmath
from qutip import *
import random
from scipy import linalg
from src.ObtainMeasurements import *
from src.AnalyzeMeasurements import *
from src.PreprocessingImportanceSampling import *
### This script estimates the purity of a noisy GHZ realized in the experiment using uniform sampling and importance sampling from an ideal pure GHZ state
### Capable of simulating noisy GHZ state till N = 25 qubits !!!
### Importance sampling provides best performances for Nu ~ O(N) and NM ~O(2^N) !!
## Parameters
N = 16 # Number of qubits to analyze
d = 2**N
Nu = 50 # Number of random unitaries to be used
NM = d*4 # Number of projective measurements (shots) per random unitary
mode = 'CUE'
burn_in = 1 # determines the number of samples to be rejected during metropolis: (nu*burn_in)
### Step 1:: Create a quantum state
# The quantum state qstate is stored as numpy.array of type numpy.complex_
# qstate can be
# - a pure state |psi> represented by a numpy array of shape (2**N,)
# - a mixed state rho reprresented by a numpy array of shape (2**N, 2**N)
# An additional parameter p can be specified to admix the identity
# - |psi><psi| -> (1-p)*|psi><psi| + p*1/2**N or
# - rho -> (1-p)*rho + p*1/2**N
## An ideal GHZ state
qstate = np.zeros(2**N,dtype=np.complex_)
qstate[0] = 1./np.sqrt(2)
qstate[-1] = 1./np.sqrt(2)
### A random mixed state
#import qutip
#qstate = qutip.rand_dm(2**N).full()
#p_depo = 0.1
# Consider realizing a noisy version of the GHZ state experimentally. Noise given by depolarization noise strength p_depo
p_depo = 0.2
## Theoretical estimations:
p2_exp = (1-p_depo)**2 + (1-(1-p_depo)**2)/d ## purity of the realized noisy GHZ state
p2_theory = 1 ## Purity of the ideal pure GHZ state
fid = (1-p_depo) + p_depo/d ## Fidelity between the ideal and the experimenetal GHZ state
### Initiate Random Generator
a = random.SystemRandom().randrange(2 ** 32 - 1) #Init Random Generator
random_gen = np.random.RandomState(a)
### Perform Randomized measurements
print('Randomized measurements using uniform sampling with Nu = '+str(Nu)+' and NM = '+str(NM))
### Generate Random Unitaries
unitaries=np.zeros((Nu,N,2,2),dtype=np.complex_)
for iu in range(Nu):
for i in range(N):
unitaries[iu,i]=SingleQubitRotation(random_gen,mode)
print('Random unitaries generated using uniform sampling')
### Simulate the randomized measurements
Meas_Data_uni = np.zeros((Nu,NM),dtype='int64') ## array to store the measurement results as integers representing the measured bitstrings
for iu in range(Nu):
print('Data acquisition {:d} % \r'.format(int(100*iu/(Nu))),end = "",flush=True)
prob = ObtainOutcomeProbabilities(N, qstate, unitaries[iu] , p_depo)
Meas_Data_uni[iu,:] = Sampling_Meas(prob,N,NM)
print('Measurement data generated for uniform sampling \n')
## Estimate the uniform sampled purity
X_uni = np.zeros(Nu)
for iu in range(Nu):
print('Postprocessing {:d} % \r'.format(int(100*iu/(Nu))),end = "",flush=True)
probe = get_prob(Meas_Data_uni[iu,:], N)
X_uni[iu] = get_X(probe,N)
p2_uni = 0 # purity given by uniform sampling
p2_uni = unbias(np.mean(X_uni),N,NM)
print('Randomized measurements using importance sampling with Nu = '+str(Nu)+' and NM = '+str(NM))
### Step 1: Preprocessing step for importance sampling. Sample Y and Z rotation angles (2N angles for each unitary u)
# Importance sampling of the angles (theta_is) and (phi_is) using metropolis algorithm from an ideal GHZ state
theta_is, phi_is, n_r, N_s, p_IS = MetropolisSampling_pure(N, qstate,Nu, burn_in)
### Step: Randomized measurements
## Step 2a: Perform the actual experiment on your quantum machine
# Store angles theta_is, phi_is on the hard drive
# np.savetxt('theta_is.txt',theta_is) ## text file with Nu rows and N columns containing angles
# np.savetxt('phi_is.txt',phi_is) ## text file with Nu rows and N columns containing angles
# >>>> Run your quantum machine <<<<
# Load measurement results from hard drive as an array of shape (Nu,NM) containing integers
#Meas_Data_IS = np.load('MeasurementResults.npy',dtype='int64')
## Step 2b: Simulate randomized measurements with the generated importance sampled unitaries
### Generate the local importance sampled Random Unitaries
unitaries=np.zeros((Nu,N,2,2),dtype=np.complex_)
for iu in range(Nu):
for i in range(N):
unitaries[iu,i]=SingleQubitRotationIS(theta_is[i,iu],phi_is[i,iu])
print('Importance sampled random unitaries generated')
### Simulate the randomized measurements
Meas_Data_IS = np.zeros((Nu,NM),dtype='int64') ## array to store the measurement results as integers representing the measured bitstrings
for iu in range(Nu):
print('Data acquisition {:d} % \r'.format(int(100*iu/(Nu))),end = "",flush=True)
prob = ObtainOutcomeProbabilities(N, qstate, unitaries[iu] , p_depo)
Meas_Data_IS[iu,:] = Sampling_Meas(prob,N,NM)
print('Measurement data generated for importance sampling')
## Step 3: Estimation of the purity given by importance sampling
X_imp = np.zeros(Nu)
for iu in range(Nu):
print('Postprocessing {:d} % \r'.format(int(100*iu/(Nu))),end = "",flush=True)
probe = get_prob(Meas_Data_IS[iu,:], N)
X_imp[iu] = unbias(get_X(probe,N),N,NM)
p2_IS = 0 # purity given by importance sampling
for iu in range(Nu):
p2_IS += X_imp[iu]*n_r[iu]/p_IS[iu,0]/N_s
### some performance illustrations
print('Fidelity of the importance sampler: ', np.round(100*fid,2), '%')
print('p2 (True value) = ', p2_exp)
print('p2 (uniform sampling) = ', p2_uni)
print('p2 (Importance sampling) = ', p2_IS)
print ('Error uniform: ', np.round(100*(np.abs(p2_uni-p2_exp)/p2_exp),2), '%')
print ('Error IS: ', np.round(100*(np.abs(p2_IS-p2_exp)/p2_exp),2), '%')
| [
"numpy.mean",
"numpy.abs",
"numpy.sqrt",
"numpy.zeros",
"numpy.random.RandomState",
"random.SystemRandom",
"numpy.round"
] | [((1946, 1981), 'numpy.zeros', 'np.zeros', (['(2 ** N)'], {'dtype': 'np.complex_'}), '(2 ** N, dtype=np.complex_)\n', (1954, 1981), True, 'import numpy as np\n'), ((2634, 2658), 'numpy.random.RandomState', 'np.random.RandomState', (['a'], {}), '(a)\n', (2655, 2658), True, 'import numpy as np\n'), ((2834, 2876), 'numpy.zeros', 'np.zeros', (['(Nu, N, 2, 2)'], {'dtype': 'np.complex_'}), '((Nu, N, 2, 2), dtype=np.complex_)\n', (2842, 2876), True, 'import numpy as np\n'), ((3095, 3128), 'numpy.zeros', 'np.zeros', (['(Nu, NM)'], {'dtype': '"""int64"""'}), "((Nu, NM), dtype='int64')\n", (3103, 3128), True, 'import numpy as np\n'), ((3557, 3569), 'numpy.zeros', 'np.zeros', (['Nu'], {}), '(Nu)\n', (3565, 3569), True, 'import numpy as np\n'), ((4948, 4990), 'numpy.zeros', 'np.zeros', (['(Nu, N, 2, 2)'], {'dtype': 'np.complex_'}), '((Nu, N, 2, 2), dtype=np.complex_)\n', (4956, 4990), True, 'import numpy as np\n'), ((5218, 5251), 'numpy.zeros', 'np.zeros', (['(Nu, NM)'], {'dtype': '"""int64"""'}), "((Nu, NM), dtype='int64')\n", (5226, 5251), True, 'import numpy as np\n'), ((5705, 5717), 'numpy.zeros', 'np.zeros', (['Nu'], {}), '(Nu)\n', (5713, 5717), True, 'import numpy as np\n'), ((1994, 2004), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2001, 2004), True, 'import numpy as np\n'), ((2021, 2031), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2028, 2031), True, 'import numpy as np\n'), ((3813, 3827), 'numpy.mean', 'np.mean', (['X_uni'], {}), '(X_uni)\n', (3820, 3827), True, 'import numpy as np\n'), ((6113, 6135), 'numpy.round', 'np.round', (['(100 * fid)', '(2)'], {}), '(100 * fid, 2)\n', (6121, 6135), True, 'import numpy as np\n'), ((2553, 2574), 'random.SystemRandom', 'random.SystemRandom', ([], {}), '()\n', (2572, 2574), False, 'import random\n'), ((6301, 6324), 'numpy.abs', 'np.abs', (['(p2_uni - p2_exp)'], {}), '(p2_uni - p2_exp)\n', (6307, 6324), True, 'import numpy as np\n'), ((6375, 6397), 'numpy.abs', 'np.abs', (['(p2_IS - p2_exp)'], {}), '(p2_IS - p2_exp)\n', (6381, 6397), True, 'import numpy as np\n')] |
"""Machine Learning 2 Section 10 @ GWU
Quiz 4 - Solution for Q4
Author: Xiaochi (George) Li"""
import torch
import numpy as np
from torch.autograd import Variable
import matplotlib.pyplot as plt
torch.manual_seed(42)
size = 100
p = np.linspace(-3, 3, size)
t = np.exp(-np.abs(p)) * np.sin(np.pi * p)
p = Variable(torch.from_numpy(p)).float().view(size, -1).cuda()
t = Variable(torch.from_numpy(t)).float().view(size, -1).cuda()
R = 1 # Input size
S = 2000 # Number of neurons
a_size = 1 # Network output size
model = torch.nn.Sequential(
torch.nn.Linear(R, S),
torch.nn.ReLU(),
torch.nn.Linear(S, a_size)
)
model.cuda()
performance_index = torch.nn.MSELoss()
learning_rate = 0.1
max_epoch = 5000
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for epoch in range(max_epoch):
a = model(p)
loss = performance_index(a, t)
print(epoch, loss.item())
if loss.item() < 1e-4:
break
optimizer.zero_grad()
loss.backward()
optimizer.step()
# visualize
prediction = model(p).detach().cpu().numpy()
real = t.cpu().numpy()
x = p.cpu().numpy()
plt.plot(x, real, label="Actual")
plt.scatter(x, prediction, label="NN Prediction")
plt.legend()
plt.title("title")
plt.show()
| [
"torch.manual_seed",
"torch.nn.ReLU",
"numpy.abs",
"matplotlib.pyplot.plot",
"torch.from_numpy",
"torch.nn.MSELoss",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"torch.nn.Linear",
"numpy.sin",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((198, 219), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (215, 219), False, 'import torch\n'), ((235, 259), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', 'size'], {}), '(-3, 3, size)\n', (246, 259), True, 'import numpy as np\n'), ((691, 709), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (707, 709), False, 'import torch\n'), ((1140, 1173), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'real'], {'label': '"""Actual"""'}), "(x, real, label='Actual')\n", (1148, 1173), True, 'import matplotlib.pyplot as plt\n'), ((1174, 1223), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'prediction'], {'label': '"""NN Prediction"""'}), "(x, prediction, label='NN Prediction')\n", (1185, 1223), True, 'import matplotlib.pyplot as plt\n'), ((1224, 1236), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1234, 1236), True, 'import matplotlib.pyplot as plt\n'), ((1237, 1255), 'matplotlib.pyplot.title', 'plt.title', (['"""title"""'], {}), "('title')\n", (1246, 1255), True, 'import matplotlib.pyplot as plt\n'), ((1256, 1266), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1264, 1266), True, 'import matplotlib.pyplot as plt\n'), ((285, 302), 'numpy.sin', 'np.sin', (['(np.pi * p)'], {}), '(np.pi * p)\n', (291, 302), True, 'import numpy as np\n'), ((580, 601), 'torch.nn.Linear', 'torch.nn.Linear', (['R', 'S'], {}), '(R, S)\n', (595, 601), False, 'import torch\n'), ((607, 622), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (620, 622), False, 'import torch\n'), ((628, 654), 'torch.nn.Linear', 'torch.nn.Linear', (['S', 'a_size'], {}), '(S, a_size)\n', (643, 654), False, 'import torch\n'), ((272, 281), 'numpy.abs', 'np.abs', (['p'], {}), '(p)\n', (278, 281), True, 'import numpy as np\n'), ((317, 336), 'torch.from_numpy', 'torch.from_numpy', (['p'], {}), '(p)\n', (333, 336), False, 'import torch\n'), ((381, 400), 'torch.from_numpy', 'torch.from_numpy', (['t'], {}), '(t)\n', (397, 400), False, 'import torch\n')] |
'''This is the Channel module
It can simulate a river channel basing on the inputs it is provided.
It consists of a centerline, an inner channel, and arbitrary number of
outer banks.
All functions apply to it should be continuous.
The offsets from banks to centerline are in sn coordinate system, and
transform into xy coordinate system later on.
'''
import numpy as np
from . import functions
import random
import math
from math import pi, sqrt, log, ceil, floor
import csv
from .cPipe import Pipe
import matplotlib.pyplot as plt
import sys
class Channel(Pipe):
def __init__(self, x_len=100, wbf_min=0, valley_slope=0.01, dx=1, zd=1000):
'''Channel class initiator
x_len -- int; valley length in x direction
wbf_min -- float; minimum bankfull width
valley_slope -- float; slope of valley
dx -- int; resolution in x direction
class private variables:
hbf -- float; average bankfull height
thalweg -- array; z values of thalweg
curvature -- array; curvature of centerline
xshapePoints -- int; number of points in each Xshape
xshape_x -- array; x values of Xshape
xshape_y -- array; y values of Xshape
xshape_z -- array; z values of Xshape
z_center -- array; z values of centerline
dynamicCurv -- array; values of curvature of center line
tz -- int; trapezoid xshape bottom points. -1 means asymetric
'''
super().__init__(int(x_len), valley_slope, dx, zd)
self.wbf_min = wbf_min/dx
self.turns_center = []
self.hbf = None
self.curvature = None
self.xshapePoints = 21
self.xshape_x = None
self.xshape_y = None
self.xshape_z = None
self.z_center = None
self.dynamicCurv = None
self.channelUndulation = None
self.tz = -1
def shapeCenterline(self, fun):
'''Shape the centerline. Basically recalculate the centerline.'''
x_v = self.x_v
n_v = self.getCenterline_y()
x_v_valley = list(set(x_v.tolist()))
x_v_valley.sort()
x_v_valley = np.array(x_v_valley)
x_v_valley, y_v = fun(x_v_valley)
y_v = y_v/self.dx
x_max = np.amax(x_v_valley)
out_x, out_y = [], []
for i in range(len(x_v)):
x = x_v[i]
ind = np.where(x_v_valley == x)[0][0]
if x == x_max:
continue
x1, x2 = x_v_valley[ind], x_v_valley[ind+1]
y1, y2 = y_v[ind], y_v[ind+1]
x_new, y_new = functions.sn_to_xy(x1, y1, x2, y2, n_v[i])
out_x.append(x_new)
out_y.append(y_new)
self.x_v = np.array(out_x)
self.y_center = np.array(out_y)
def getRiverSlope(self):
'''Return river slope'''
return self.getPipeSlope()
def setXShapePoints(self, n):
'''Set how many points in one x-section shape.'''
self.xshapePoints = n
def setHbfManual(self, hbf):
'''Mannually set self.hbf'''
self.hbf = hbf/self.dx
def setHbf(self, d50=0.01, css=0.047, g_s=1922, g_w=1000):
'''Automatically calculate Hbf'''
self.hbf = functions.shields(d50, css, self.getRiverSlope(), g_s, g_w)/self.dx
def getHbf(self):
'''Return self.hbf'''
if self.hbf is None:
self.setHbf()
return self.hbf
def setTZ(self, n):
self.tz = n
def setCurvature(self, fun):
'''Mannually set centerline curvature
fun -- function to calculate curvature
'''
x = self.x_v
dummy, self.dynamicCurv = fun(x)
def getDynamicCurv(self):
'''Return self.dynamicCur'''
if self.dynamicCurv is None:
self.setDynamicCurv()
return self.dynamicCurv
def createInnerChannel(self, leftFun=None, rightFun=None, thalwegFun=None):
'''Create most inner channel of river
leftFun -- function to calculate left inner bank
rightFun -- function to calculate right innert bank
thalwegFun -- function to calculate thalweg
Value will be modified:
self.levels_x
self.levels_y
self.levels_z
self.levels_n
'''
self.setThalweg(thalwegFun)
thalweg = self.getThalweg()
orig_thalweg = thalweg+self.channelUndulation
thalweg_max = np.amax(orig_thalweg)
z_start = thalweg_max - self.channelUndulation
hbf = self.getHbf()*self.dx
wbf = self.wbf_min/2*self.dx
self.setLevel(hbf, z_start, wbf, 'left', leftFun, True)
self.setLevel(hbf, z_start, wbf, 'right', rightFun, True)
def getAveWbf(self):
'''Return average bankfull width.'''
if self.levels_y['left'] == []:
self.createInnerChannel()
bf = self.levels_n['left'][0] + np.absolute(self.levels_n['right'][0])
return np.average(bf)*self.dx
def getAveHbf(self):
'''Return average bankfull height.'''
if self.levels_y['left'] == []:
self.createInnerChannel()
thalweg = self.getThalweg()
flat_thalweg = thalweg + self.channelUndulation
thalweg_max = np.amax(flat_thalweg)
diff = thalweg_max - flat_thalweg
return (np.average(diff) + self.getHbf())*self.dx
def getCoWbf(self):
'''Return coefficient of variation of bankfull width.'''
ave = self.getAveWbf()
std = np.std(self.levels_n['left'][0]*self.dx + (np.absolute(self.levels_n['right'][0])*self.dx))
return std/ave
def getCoHbf(self):
'''Return coefficient of variation of bankfull width.'''
thalweg = self.getThalweg()
flat_thalweg = thalweg + self.channelUndulation
thalweg_max = np.amax(flat_thalweg)
diff = thalweg_max - flat_thalweg
ave = (np.average(diff) + self.getHbf())*self.dx
std = np.std(diff*self.dx)
return std/ave
def getXShape(self):
'''Return x, y, z values for Xshape of the whole channel'''
if self.xshape_x is None:
self.setXShape()
return self.xshape_x, self.xshape_y, self.xshape_z
def getCenterlineElevation(self):
'''Return z values for centerline.'''
if self.xshape_x is None:
self.setXShape()
return self.z_center
def getXShapePlot(self):
'''return matplotlib plot object that contains X-Shape plots of the river Channel.'''
cur_v = self.getDynamicCurv()
maxCur = np.amax(cur_v)
minCur = np.amin(cur_v)
# If no curvature at all, plot at middle point
if maxCur == minCur or self.tz != -1:
fig, ax = plt.subplots(1, 1)
fig.suptitle('X-Shape for Channel')
midInd = floor(len(self.x_v)/2)
wbf = abs(self.levels_n["left"][0][midInd]) + abs(self.levels_n["right"][0][midInd])
if self.tz == -1:
y, z = self.pointXShape(midInd, maxCur, wbf, self.xshapePoints)
else:
y, z = self.suXShape(midInd, wbf, self.tz, self.xshapePoints)
z = z + midInd*self.x_slope
y, z = self.addBankPoints(y, z, midInd)
y = y*self.dx
z = z*self.dx
ax.plot(y, z, 'k-', marker='o', label='x = '+str(midInd))
plt.xlabel('Y (related to center of channel)')
plt.ylabel('Z')
plt.legend()
return fig
else:
abs_cur_v = np.absolute(cur_v)
fig, ax = plt.subplots(2, 1, sharex=True)
fig.suptitle('Max Curvature X-Shape vs. Zero Curvature X-Shape')
plt.subplot(212)
indMax = np.argmax(abs_cur_v)
maxCur = cur_v[indMax]
wbf = abs(self.levels_n["left"][0][indMax]) + abs(self.levels_n["right"][0][indMax])
y, z = self.pointXShape(indMax, maxCur, wbf, self.xshapePoints)
si = self.getCenterline_sn()[indMax]
z = z + si*self.getPipeSlope()
y, z = self.addBankPoints(y, z, indMax)
plt.plot(y, z, 'k-', marker='o', label='Max Curvature:\nx = '+str(indMax))
plt.xlabel('Y (related to center of channel)')
plt.ylabel('Z')
plt.legend()
plt.subplot(211)
indMin = np.argmin(abs_cur_v)
minCur = cur_v[indMin]
wbf = abs(self.levels_n["left"][0][indMin]) + abs(self.levels_n["right"][0][indMin])
y, z = self.pointXShape(indMin, maxCur, wbf, self.xshapePoints)
si = self.getCenterline_sn()[indMin]
z = z + si*self.getPipeSlope()
y, z = self.addBankPoints(y, z, indMin)
y = y*self.dx
z = z*self.dx
plt.plot(y, z, 'k-', marker='o', label='Min Curvature:\nx = '+str(indMin))
plt.ylabel('Z')
plt.legend()
return fig
def setXShape(self, n=-1):
'''Calculate x, y, z values for Xshape of the whole channel.
Also calculate the z values of centerline.
xshapePointsDict: {(x, y): [z, (x_center, y_center)]}
'''
out_x, out_y, out_z = [], [], []
xshapePointsList = []
center_z = []
y_center = self.getCenterline_y()
s_center = self.getCenterline_sn()
pipe_slope = self.getPipeSlope()
cur_v = self.getDynamicCurv()
maxCur = np.amax(np.absolute(cur_v))
asFlag = True # asymmetric flag
if n != -1:
asFlag = False # innerPipePoints dict will be empty
xshape_lines = [[] for i in range(self.xshapePoints)]
for ind in range(len(y_center)-1):
wbf = abs(self.levels_n['left'][0][ind]) + abs(self.levels_n['right'][0][ind])
centerOffset = (self.levels_n['left'][0][ind] + self.levels_n['right'][0][ind])/2
x1 = self.x_v[ind]
y1 = y_center[ind]
x2 = self.x_v[ind+1]
y2 = y_center[ind+1]
s = s_center[ind]
# This if statement will determine whether it is AU or SU
if asFlag:
y_temp, z = self.pointXShape(ind, maxCur, wbf, self.xshapePoints)
else:
y_temp, z = self.suXShape(ind, wbf, n, self.xshapePoints)
y_temp = y_temp + centerOffset
real_x, real_y = functions.sn_to_xy(x1, y1, x2, y2, y_temp)
# the following line may need to be commented out
z = z - pipe_slope*s # use s instead of x
###############################################
# if asFlag:
for i in range(len(xshape_lines)):
xshape_lines[i].append((real_x[i], real_y[i], z[i]))
# else:
# out_x += real_x.tolist()
# out_y += real_y.tolist()
# out_z += z.tolist()
#find z for center line
center_z.append(self.calCenter_z(real_x, real_y, z, x1, y1))
center_z.append(center_z[-1])
x_min = floor(min(self.innerPipePoints.keys()))
x_max = ceil(max(self.innerPipePoints.keys())) + 1
markPoints = [[] for i in range(ceil(x_max) - min(floor(x_min), 0))]
for i in range(len(self.levels_x['left'][0])):
x = self.levels_x['left'][0][i]
y = self.levels_y['left'][0][i]
z = self.levels_z['left'][0][i]
markPoints[int(x)].append((y, z))
for i in range(len(self.levels_x['right'][0])):
x = self.levels_x['right'][0][i]
y = self.levels_y['right'][0][i]
z = self.levels_z['right'][0][i]
markPoints[int(x)].append((y, z))
for line in xshape_lines:
line = functions.deleteCycles(line)
for (x, y, z) in line:
markPoints[x].append((y, z))
for x in self.innerPipePoints.keys():
innerPoint_y = self.innerPipePoints[x]
xshape_yz = markPoints[x]
if len(xshape_yz) == 0:
continue
xshape_yz.sort()
xshape_y = [y for (y, z) in xshape_yz]
xshape_z = [z for (y, z) in xshape_yz]
for y in innerPoint_y:
ind1, ind2 = functions.indexBound(y, xshape_y)
if ind1 == ind2:
z = xshape_z[ind1]
else:
z1 = xshape_z[ind1]
z2 = xshape_z[ind2]
y1 = xshape_y[ind1]
y2 = xshape_y[ind2]
alpha = (y-y1)/(y2-y1)
z = z1*(1-alpha) + z2*alpha
out_x.append(x)
out_y.append(y)
out_z.append(z)
self.xshape_x = np.array(out_x)
self.xshape_y = np.array(out_y)
self.xshape_z = np.array(out_z)
self.z_center = np.array(center_z)
def addBoulders(self, num, size_mean, size_std, height):
'''
Add boulders
avail_pts - nested list;
elem: [set(available y values), (y1, z1), ...]
'''
size_mean = size_mean/self.dx
size_std = size_std/self.dx
height = height/self.dx
x_min = np.amin(self.xshape_x)
x_min = int(min(x_min, 0))
x_max = int(np.amax(self.xshape_x) + 1)
avail_pts = [[set()] for i in range(x_min, x_max)]
for i in range(len(self.xshape_x)):
x = int(self.xshape_x[i])
avail_pts[x][0].add(self.xshape_y[i])
area = []
check_x = set(list(range(x_min, x_max)))
while num > 0:
area, check_x = self.cutArea(avail_pts, size_mean, size_std, check_x, x_min, x_max)
if area == []:
break
boulder = self.createBoulder(area, height)
self.updateBoulder(boulder)
num -= 1
def addCheckDam(self, loc, height, thick):
'''
Add check dam
loc - location along meandering stream.
height - height from the centerline point.
thick - how thick is the dam.
'''
height = height/self.dx
thick = thick/self.dx
loc_ind = np.where(self.s_center > loc)[0]
loc_ind = np.amin(loc_ind)
s = self.getSlope()[loc_ind]
x_cp = self.x_v[loc_ind]
y_cp = self.y_center[loc_ind]
z_cp = self.z_center[loc_ind]
lf_range = np.amax(self.levels_n['left'][0])
rt_range = np.amin(self.levels_n['right'][0])
# x_len_inc, y_len_inc, x_wid_inc, y_wid_inc = 0, 0, 0, 0
if abs(s) == math.inf:
x_len_inc = 1
y_len_inc = 0
x_wid_inc = 0
y_wid_inc = 1
elif s == 0:
x_len_inc = 0
y_len_inc = 1
x_wid_inc = 1
y_wid_inc = 0
elif abs(s) > 1:
x_len_inc = 1
y_len_inc = -1/s
x_wid_inc = 1/s
y_wid_inc = 1
else:
x_len_inc = s
y_len_inc = -1
x_wid_inc = 1
y_wid_inc = 1/s
pt_crt_x, pt_crt_y = round(x_cp), round(y_cp)
ck_dam_pts = []
for dummy in range(int(lf_range)):
ck_dam_pts.append((pt_crt_x, pt_crt_y))
pt_crt_x = round(pt_crt_x - x_len_inc)
pt_crt_y = round(pt_crt_y - y_len_inc)
for i in range(thick):
pt_wid_x = round(pt_crt_x + i*x_wid_inc)
pt_wid_y = round(pt_crt_y + i*y_wid_inc)
ck_dam_pts.append((pt_wid_x, pt_wid_y))
pt_crt_x, pt_crt_y = round(x_cp), round(y_cp)
for dummy in range(abs(int(rt_range))):
ck_dam_pts.append((pt_crt_x, pt_crt_y))
pt_crt_x = round(pt_crt_x + x_len_inc)
pt_crt_y = round(pt_crt_y + y_len_inc)
for i in range(thick):
pt_wid_x = round(pt_crt_x + i*x_wid_inc)
pt_wid_y = round(pt_crt_y + i*y_wid_inc)
ck_dam_pts.append((pt_wid_x, pt_wid_y))
for (x, y) in ck_dam_pts:
ind_x = np.where(self.xshape_x == x)[0]
ind_y = np.where(self.xshape_y == y)[0]
# if len(np.intersect1d(ind_x, ind_y)) == 0:
# print('ind_x', ind_x)
# print('ind_y', ind_y)
# print('x, y', x, y)
inter = np.intersect1d(ind_x, ind_y)
if len(inter) > 0:
ind = np.intersect1d(ind_x, ind_y)[0]
self.xshape_z[ind] = z_cp + height
def tolist(self):
'''Return x, y, z values for all levels in a secondary list'''
x = []
y = []
z = []
x += self.x_v.tolist()
y += self.getCenterline_y().tolist()
z += self.getThalweg().tolist()
self.helpAppend(x, self.levels_x)
self.helpAppend(y, self.levels_y)
self.helpAppend(z, self.levels_z)
return [x, y, z]
def tocsv(self, outfile):
'''Outwrite xy values and xz values of all banks to output file.'''
header = ["x", "y", 'z']
out = [header]
xyz = self.tolist()
xyz_out = [[round(xyz[0][i]*self.dx, 3), round(xyz[1][i]*self.dx, 3), round(xyz[2][i]*self.dx, 3)] for i in range(len(xyz[0]))]
out += xyz_out
with open(outfile+".csv", 'w') as cf:
cw = csv.writer(cf)
cw.writerows(out)
def __str__(self):
'''Turn most important data of river.'''
sl = self.getSL()
aveWbf = self.getAveWbf()
aveHbf = self.getAveHbf()
slope = self.getRiverSlope()
coWbf = self.getCoWbf()
coHbf = self.getCoHbf()
s = 'Sinuosity:'+str(round(sl, 3))+'\n'
s += 'Channel Slope:'+str(slope)+'\n'
s += 'Average Width of Inner Channel:'+str(round(aveWbf, 3))+'\n'
s += 'Average Height of Inner Channel:'+str(round(aveHbf, 3))+'\n'
s += 'Coefficient of Variation (W_ic):'+str(round(coWbf, 3))+'\n'
s += 'Coefficient of Variation (H_ic):'+str(round(coHbf, 3))+'\n'
if len(self.levels_n['left']) == 1:
return s
for i in range(1, len(self.levels_n['left'])):
s += 'Average Width Offset of '+'L'+str(i)+' Outer Bank is: '+str(round(np.average(self.levels_n['left'][i])*self.dx, 3)) + '\n'
if len(self.levels_n['right']) == 1:
return s
for i in range(1, len(self.levels_n['right'])):
s += 'Average Width Offset of '+'R'+str(i)+' Outer Bank is: '+str(abs(round(np.average(self.levels_n['right'][i])*self.dx, 3))) + '\n'
return s
def constructChannel(self):
''' Construct channel based on the information stored in Channel.
The construction follows following steps:
1. Build up centerline.
2. Build up thalweg.
3. Build up inner channels.
4. Build up xshape points.
5. Build up outer banks.
'''
# Build up centerline.
self.setCenterline()
self.setCenterline_sn()
self.loopCenterline(goal)
# Build up thalweg.
self.setThalweg()
# Build up inner channels.
self.createInnerChannel()
self.loopHbf(goal)
##############################################################
def helpAppend(self, li, dic):
for array in dic["left"]:
li += array.tolist()
for array in dic["right"]:
li += array.tolist()
def pointXShape(self, ind, maxCur, wbf, n):
'''Return y values and z values of XSection of given x
n -- number of points to calculate XSection
'''
cur = self.getDynamicCurv()[ind]
pipe_slope = self.getPipeSlope()
si = self.getCenterline_sn()[ind]
xVal = np.round(self.x_v[ind])
if maxCur == 0:
B = 1/2
else:
B = 1/2 * (1 - abs(cur/maxCur))
if B < 0.1:
B = 0.1
if B == 1:
L = 1
else:
L = -1*log(2)/log(B)
lbx = self.levels_x['left'][0]
lb = self.levels_z['left'][0]
rbx = self.levels_x['right'][0]
rb = self.levels_z['right'][0]
lb_ind = np.where(lbx == xVal)[0]
if len(lb_ind) == 0:
bankH = rb[np.where(rbx == xVal)[0][0]]
else:
bankH = lb[lb_ind[0]]
hbf = bankH - self.thalweg[ind]
n_y = np.array([-wbf*x/(n+1) + (1/2)*wbf for x in range(1, n+1)])
Y = (wbf/2-n_y) / wbf
if cur <= 0:
n_z = 4 * hbf * (Y**L) * (1-Y**L)
else:
n_z = 4 * hbf * ((1-Y)**L) * (1-(1-Y)**L)
n_z = self.thalweg[ind] + hbf - n_z
return n_y, n_z
def suXShape(self, ind, wbf, tzn, n):
'''Return y values and z values of Symmetric XSection of given x.
ind - index of x value on centerline
tzn - number of points on base
n - number of points on XS
'''
xVal = np.round(self.x_v[ind])
lbx = self.levels_x['left'][0]
lb = self.levels_z['left'][0]
rbx = self.levels_x['right'][0]
rb = self.levels_z['right'][0]
lb_ind = np.where(lbx == xVal)[0]
if len(lb_ind) == 0:
bankH = rb[np.where(rbx == xVal)[0][0]]
else:
bankH = lb[lb_ind[0]]
hbf = bankH - self.thalweg[ind]
n_y = np.array([-wbf*x/(n+1) + (1/2)*wbf for x in range(1, n+1)])
n_z = []
sidePoints = floor(((n-tzn)/2) + 1)
if (n-tzn) % 2 != 0:
tzn += 1
for i in range(1, sidePoints):
n_z.append((i/sidePoints)*hbf)
zEnd = n_z[::-1]
n_z += [hbf] * tzn
n_z += zEnd
n_z = self.thalweg[ind] + hbf - n_z
return n_y, np.array(n_z)
def addBankPoints(self, y, z, ind):
'''Add bank points to xshape points.
y - y values for xshape points
z - z values for xshape points
ind - where the xshape points are calculated
Return:
y, z with bank points added
'''
leftEdge = y[0]-(y[1]-y[0])
y = np.append(leftEdge, y)
rightEdge = y[-1]+(y[1]-y[0])
y = np.append(y, rightEdge)
z = np.append(self.levels_z['left'][0][0], z)
z = np.append(z, self.levels_z['left'][0][0])
for i in range(1, len(self.levels_n['left'])):
y = np.append(self.levels_n['left'][i][ind] - self.levels_n['left'][0][ind] + leftEdge, y)
z = np.append(self.levels_z['left'][i][0], z)
for i in range(1, len(self.levels_n['right'])):
y = np.append(y, self.levels_n['right'][i][ind] - self.levels_n['right'][0][ind] + rightEdge)
z = np.append(z, self.levels_z['right'][i][0])
return y, z
def setDynamicCurv(self):
''' Calculate the dynamic curve of the centerline.
'''
x_v = self.getCenterline_x()
y_v = self.getCenterline_y()
slopeVectorList = []
for i in range(len(x_v)-1):
v = (x_v[i+1]-x_v[i], y_v[i+1]-y_v[i])
slopeVectorList.append(v)
cur = []
piCheck = pi/2
for i in range(len(slopeVectorList)-1):
v1 = slopeVectorList[i]
v2 = slopeVectorList[i+1]
angle = functions.angle_between(v1, v2)
if np.cross(v1, v2) >= 0:
cur.append(functions.angle_between(v1, v2))
else:
cur.append(functions.angle_between(v1, v2) * -1)
cur.append(cur[-1])
cur.append(cur[-1])
self.dynamicCurv = np.array(cur)
def calCenter_z(self, real_x, real_y, z, x1, y1):
'''Calculate the z value for the centerline point.'''
xpoints = [(real_x[i], real_y[i]) for i in range(len(real_x))]
minInd = 0
minDist = functions.pointDist(xpoints[0], (x1, y1))
diff = [minDist]
for i in range(1, len(xpoints)):
dist = functions.pointDist(xpoints[i], (x1, y1))
diff.append(dist)
if dist < minDist:
minInd = i
minDist = dist
if minDist == 0 or (minInd-1 < 0 and minInd+1 >= len(diff)) :
return z[minInd]
elif minInd+1 < len(diff) and (minInd-1 < 0 or diff[minInd-1] >= diff[minInd+1]):
minInd2 = minInd+1
else:
minInd2 = minInd-1
z1 = z[minInd]
z2 = z[minInd2]
minX1 = real_x[minInd]
minX2 = real_x[minInd2]
if minX1 == minX2:
return (z1+z2)/2
elif minX1 < minX2:
alpha = (x1-minX1)/(minX2-minX1)
return alpha*z1 + (1-alpha)*z2
else:
alpha = (x1-minX2)/(minX1-minX2)
return alpha*z1 + (1-alpha)*z2
def updateXShapePointsList(self, pointsList, x_v, y_v, z_v, x_center, y_center):
'''Update the XShape points in XShape points Dictionary.
pointsList -- [(x, y, z)]
'''
##################################
# checkRange = max(int(np.amax(y_v) - np.amin(y_v)), int(np.amax(x_v) - np.amin(x_v)))
# x_check = self.getCenterline_x()
# y_check = self.getCenterline_y()
# inCount = outCount = 0
# for i in range(len(x_v)):
# x = int(x_v[i])
# y = int(y_v[i])
#
# left = max(int(x_center - checkRange), 0)
# right = min(int(x_center + checkRange), len(x_check))
#
# dist = np.square(x_check[left:right] - x) + np.square(y_check[left:right] - y)
# minIndex = np.argmin(dist)
# minDistX = int(x_check[left:right][minIndex])
#
# if minDistX == int(x_center):
# pointsList.append((x, y, z_v[i]))
##################################
lbx = self.levels_x['left'][0]
lby = self.levels_y['left'][0]
rbx = self.levels_x['right'][0]
rby = self.levels_y['right'][0]
head = (x_v[0], y_v[0])
tail = (x_v[-1], y_v[-1])
head_dist_lb = np.square(lbx - head[0]) + np.square(lby - head[1])
head_dist_rb = np.square(rbx - head[0]) + np.square(rby - head[1])
tail_dist_lb = np.square(lbx - tail[0]) + np.square(lby - tail[1])
tail_dist_rb = np.square(rbx - tail[0]) + np.square(rby - tail[1])
if min(np.amin(head_dist_lb), np.amin(head_dist_rb)) < 10:
for i in range(round(len(x_v)/2)):
pointsList.append((x_v[i], y_v[i], z_v[i]))
else:
for i in range(round(len(x_v)/2)-4, round(len(x_v)/2)):
pointsList.append((x_v[i], y_v[i], z_v[i]))
if min(np.amin(tail_dist_lb), np.amin(tail_dist_rb)) < 10:
for i in range(round(len(x_v)/2), len(x_v)):
pointsList.append((x_v[i], y_v[i], z_v[i]))
else:
for i in range(round(len(x_v)/2), round(len(x_v)/2)+4):
pointsList.append((x_v[i], y_v[i], z_v[i]))
##################################
return pointsList
def cutArea(self, avail_pts, size_mean, size_std, check, x_min, x_max):
'''
avail_pts - nested list;
elem: [set(available y values), (y1, z1), ...]
'''
find = False
area = []
length = int(np.random.normal(size_mean, size_std))
length = max(length, 5)
length = min(length, size_mean+3*size_std)
width = int(np.random.normal(size_mean, size_std))
width = max(width, 5)
width = min(width, size_mean+3*size_std)
width_half = width/2
while not find:
# check if no space left
if len(check) == 0:
break
# pop out a random x position
ind = random.sample(check, 1)[0]
check.remove(ind)
if ind - round(width_half) < x_min or \
ind + round(width_half) >= x_max:
continue
# all y values available at this x position
y_pool = avail_pts[ind][0].copy()
if len(y_pool) < length:
continue
# find a y that is valid
check_y_pool = 0 # This check if y pool it self has valid ys.
all_ok = True
y_start = 0
while len(y_pool) != 0:
# pop out a random starting y value
y_start = random.sample(y_pool, 1)[0]
y_pool.remove(y_start)
ys = list(range(y_start, y_start+length))
all_ok = True
# check x by x if there are enough space
for i in range(ind-round(width_half), ind+round(width_half)+1):
y_set = avail_pts[i][0]
for y in ys:
if y not in y_set:
all_ok = False
if i == ind:
check_y_pool += 1
break
# if not ok:
# break
if all_ok:
break
if check_y_pool < length:
check.add(ind)
if all_ok:
for i in range(ind-round(width_half), ind+round(width_half)+1):
# area.append([])
for t in range(length):
avail_pts[i][0].remove(t+y_start)
# area[-1].append((i, t+y_start))
area = [(ind-round(width_half), ind+round(width_half)), (y_start, y_start+length-1)]
find = True
return area, check
def createBoulder(self, area, height=5):
'''
area - list of range
[(x_start, x_end), (y_start, y_end)]
'''
# get length
end_y, start_y = area[1][1], area[1][0]
end_x, start_x = area[0][1], area[0][0]
length = max(end_y-start_y+1, end_x-start_x+1)
r = length/2
temp_x = np.arange(length)
temp_y = np.arange(length)
temp_x, temp_y = np.meshgrid(temp_x, temp_y)
z = np.sqrt(np.square(temp_x-r) + np.square(temp_y-r))
z = (r-z)/r
z = np.maximum(z, 0)
for i in range(len(z)):
for t in range(len(z[0])):
if z[i][t] > 0.5:
z[i][t] = 0.5 + (z[i][t]-0.5)/2
z = z/0.75 * height
err = np.random.random_sample(z.shape)*height/10
z = z + err
diff_x = length - (end_x - start_x + 1)
diff_y = length - (end_y - start_y + 1)
if diff_x > 0:
z = z[:, floor(diff_x/2):len(z[i])-ceil(diff_x/2)]
temp_x = temp_x[:, floor(diff_x/2):len(temp_x[i])-ceil(diff_x/2)]
temp_y = temp_y[:, floor(diff_x/2):len(temp_y[i])-ceil(diff_x/2)]
elif diff_y > 0:
z = z[floor(diff_y/2):len(z)-ceil(diff_y/2), :]
temp_x = temp_x[floor(diff_y/2):len(temp_y)-ceil(diff_y/2), :]
temp_y = temp_y[floor(diff_y/2):len(temp_y)-ceil(diff_y/2), :]
start_x -= temp_x[0][0]
start_y -= temp_y[0][0]
x = temp_x + start_x
y = temp_y + start_y
return (x, y, z)
def updateBoulder(self, boulder):
(b_x, b_y, b_z) = boulder
for i in range(len(b_x)):
for t in range(len(b_x[0])):
x = b_x[i][t]
y = b_y[i][t]
z = b_z[i][t]
ind_x = np.where(self.xshape_x == x)[0]
ind_y = np.where(self.xshape_y == y)[0]
# if len(np.intersect1d(ind_x, ind_y)) == 0:
# print('ind_x', ind_x)
# print('ind_y', ind_y)
# print('x, y', x, y)
ind = np.intersect1d(ind_x, ind_y)[0]
self.xshape_z[ind] += z
def perlinThalweg(self, height):
'''
2D perlin function through the whole inner channel.
height is the maximum difference of the noise.
'''
# decide the x range and y range of the channel base
height = height/self.dx
min_x = np.amin(self.xshape_x)
min_y = np.amin(self.xshape_y)
max_x = np.amax(self.xshape_x)
max_y = np.amax(self.xshape_y)
diff_x = max_x - min_x + 1
diff_y = max_y - min_y + 1
# create a frame for perlin2D function
diff_x = ceil(diff_x/10)
diff_y = ceil(diff_y/10)
lin_x = np.linspace(0, diff_x, diff_x*10)
lin_y = np.linspace(0, diff_y, diff_y*10)
x, y = np.meshgrid(lin_x, lin_y)
# generage 2d perlin noise
z = functions.perlin2D(x, y)
z *= height
# update noise to channel base
for i in range(len(self.xshape_x)):
xi = self.xshape_x[i]
yi = self.xshape_y[i]
xi = xi - min_x
yi = yi - min_y
zi = z[yi][xi]
self.xshape_z[i] += zi
| [
"math.floor",
"matplotlib.pyplot.ylabel",
"math.log",
"numpy.array",
"numpy.arange",
"numpy.cross",
"numpy.where",
"matplotlib.pyplot.xlabel",
"numpy.linspace",
"numpy.argmin",
"numpy.meshgrid",
"numpy.maximum",
"numpy.round",
"numpy.random.normal",
"random.sample",
"numpy.amax",
"nu... | [((2131, 2151), 'numpy.array', 'np.array', (['x_v_valley'], {}), '(x_v_valley)\n', (2139, 2151), True, 'import numpy as np\n'), ((2236, 2255), 'numpy.amax', 'np.amax', (['x_v_valley'], {}), '(x_v_valley)\n', (2243, 2255), True, 'import numpy as np\n'), ((2697, 2712), 'numpy.array', 'np.array', (['out_x'], {}), '(out_x)\n', (2705, 2712), True, 'import numpy as np\n'), ((2737, 2752), 'numpy.array', 'np.array', (['out_y'], {}), '(out_y)\n', (2745, 2752), True, 'import numpy as np\n'), ((4407, 4428), 'numpy.amax', 'np.amax', (['orig_thalweg'], {}), '(orig_thalweg)\n', (4414, 4428), True, 'import numpy as np\n'), ((5221, 5242), 'numpy.amax', 'np.amax', (['flat_thalweg'], {}), '(flat_thalweg)\n', (5228, 5242), True, 'import numpy as np\n'), ((5800, 5821), 'numpy.amax', 'np.amax', (['flat_thalweg'], {}), '(flat_thalweg)\n', (5807, 5821), True, 'import numpy as np\n'), ((5935, 5957), 'numpy.std', 'np.std', (['(diff * self.dx)'], {}), '(diff * self.dx)\n', (5941, 5957), True, 'import numpy as np\n'), ((6553, 6567), 'numpy.amax', 'np.amax', (['cur_v'], {}), '(cur_v)\n', (6560, 6567), True, 'import numpy as np\n'), ((6585, 6599), 'numpy.amin', 'np.amin', (['cur_v'], {}), '(cur_v)\n', (6592, 6599), True, 'import numpy as np\n'), ((12795, 12810), 'numpy.array', 'np.array', (['out_x'], {}), '(out_x)\n', (12803, 12810), True, 'import numpy as np\n'), ((12835, 12850), 'numpy.array', 'np.array', (['out_y'], {}), '(out_y)\n', (12843, 12850), True, 'import numpy as np\n'), ((12875, 12890), 'numpy.array', 'np.array', (['out_z'], {}), '(out_z)\n', (12883, 12890), True, 'import numpy as np\n'), ((12915, 12933), 'numpy.array', 'np.array', (['center_z'], {}), '(center_z)\n', (12923, 12933), True, 'import numpy as np\n'), ((13266, 13288), 'numpy.amin', 'np.amin', (['self.xshape_x'], {}), '(self.xshape_x)\n', (13273, 13288), True, 'import numpy as np\n'), ((14289, 14305), 'numpy.amin', 'np.amin', (['loc_ind'], {}), '(loc_ind)\n', (14296, 14305), True, 'import numpy as np\n'), ((14471, 14504), 'numpy.amax', 'np.amax', (["self.levels_n['left'][0]"], {}), "(self.levels_n['left'][0])\n", (14478, 14504), True, 'import numpy as np\n'), ((14524, 14558), 'numpy.amin', 'np.amin', (["self.levels_n['right'][0]"], {}), "(self.levels_n['right'][0])\n", (14531, 14558), True, 'import numpy as np\n'), ((19876, 19899), 'numpy.round', 'np.round', (['self.x_v[ind]'], {}), '(self.x_v[ind])\n', (19884, 19899), True, 'import numpy as np\n'), ((21081, 21104), 'numpy.round', 'np.round', (['self.x_v[ind]'], {}), '(self.x_v[ind])\n', (21089, 21104), True, 'import numpy as np\n'), ((21586, 21610), 'math.floor', 'floor', (['((n - tzn) / 2 + 1)'], {}), '((n - tzn) / 2 + 1)\n', (21591, 21610), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((22235, 22257), 'numpy.append', 'np.append', (['leftEdge', 'y'], {}), '(leftEdge, y)\n', (22244, 22257), True, 'import numpy as np\n'), ((22308, 22331), 'numpy.append', 'np.append', (['y', 'rightEdge'], {}), '(y, rightEdge)\n', (22317, 22331), True, 'import numpy as np\n'), ((22345, 22386), 'numpy.append', 'np.append', (["self.levels_z['left'][0][0]", 'z'], {}), "(self.levels_z['left'][0][0], z)\n", (22354, 22386), True, 'import numpy as np\n'), ((22399, 22440), 'numpy.append', 'np.append', (['z', "self.levels_z['left'][0][0]"], {}), "(z, self.levels_z['left'][0][0])\n", (22408, 22440), True, 'import numpy as np\n'), ((23713, 23726), 'numpy.array', 'np.array', (['cur'], {}), '(cur)\n', (23721, 23726), True, 'import numpy as np\n'), ((30111, 30128), 'numpy.arange', 'np.arange', (['length'], {}), '(length)\n', (30120, 30128), True, 'import numpy as np\n'), ((30146, 30163), 'numpy.arange', 'np.arange', (['length'], {}), '(length)\n', (30155, 30163), True, 'import numpy as np\n'), ((30189, 30216), 'numpy.meshgrid', 'np.meshgrid', (['temp_x', 'temp_y'], {}), '(temp_x, temp_y)\n', (30200, 30216), True, 'import numpy as np\n'), ((30312, 30328), 'numpy.maximum', 'np.maximum', (['z', '(0)'], {}), '(z, 0)\n', (30322, 30328), True, 'import numpy as np\n'), ((32270, 32292), 'numpy.amin', 'np.amin', (['self.xshape_x'], {}), '(self.xshape_x)\n', (32277, 32292), True, 'import numpy as np\n'), ((32309, 32331), 'numpy.amin', 'np.amin', (['self.xshape_y'], {}), '(self.xshape_y)\n', (32316, 32331), True, 'import numpy as np\n'), ((32348, 32370), 'numpy.amax', 'np.amax', (['self.xshape_x'], {}), '(self.xshape_x)\n', (32355, 32370), True, 'import numpy as np\n'), ((32387, 32409), 'numpy.amax', 'np.amax', (['self.xshape_y'], {}), '(self.xshape_y)\n', (32394, 32409), True, 'import numpy as np\n'), ((32546, 32563), 'math.ceil', 'ceil', (['(diff_x / 10)'], {}), '(diff_x / 10)\n', (32550, 32563), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((32579, 32596), 'math.ceil', 'ceil', (['(diff_y / 10)'], {}), '(diff_y / 10)\n', (32583, 32596), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((32612, 32647), 'numpy.linspace', 'np.linspace', (['(0)', 'diff_x', '(diff_x * 10)'], {}), '(0, diff_x, diff_x * 10)\n', (32623, 32647), True, 'import numpy as np\n'), ((32662, 32697), 'numpy.linspace', 'np.linspace', (['(0)', 'diff_y', '(diff_y * 10)'], {}), '(0, diff_y, diff_y * 10)\n', (32673, 32697), True, 'import numpy as np\n'), ((32711, 32736), 'numpy.meshgrid', 'np.meshgrid', (['lin_x', 'lin_y'], {}), '(lin_x, lin_y)\n', (32722, 32736), True, 'import numpy as np\n'), ((4879, 4917), 'numpy.absolute', 'np.absolute', (["self.levels_n['right'][0]"], {}), "(self.levels_n['right'][0])\n", (4890, 4917), True, 'import numpy as np\n'), ((4933, 4947), 'numpy.average', 'np.average', (['bf'], {}), '(bf)\n', (4943, 4947), True, 'import numpy as np\n'), ((6724, 6742), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (6736, 6742), True, 'import matplotlib.pyplot as plt\n'), ((7366, 7412), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Y (related to center of channel)"""'], {}), "('Y (related to center of channel)')\n", (7376, 7412), True, 'import matplotlib.pyplot as plt\n'), ((7425, 7440), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Z"""'], {}), "('Z')\n", (7435, 7440), True, 'import matplotlib.pyplot as plt\n'), ((7453, 7465), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7463, 7465), True, 'import matplotlib.pyplot as plt\n'), ((7527, 7545), 'numpy.absolute', 'np.absolute', (['cur_v'], {}), '(cur_v)\n', (7538, 7545), True, 'import numpy as np\n'), ((7568, 7599), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)'}), '(2, 1, sharex=True)\n', (7580, 7599), True, 'import matplotlib.pyplot as plt\n'), ((7690, 7706), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (7701, 7706), True, 'import matplotlib.pyplot as plt\n'), ((7728, 7748), 'numpy.argmax', 'np.argmax', (['abs_cur_v'], {}), '(abs_cur_v)\n', (7737, 7748), True, 'import numpy as np\n'), ((8200, 8246), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Y (related to center of channel)"""'], {}), "('Y (related to center of channel)')\n", (8210, 8246), True, 'import matplotlib.pyplot as plt\n'), ((8259, 8274), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Z"""'], {}), "('Z')\n", (8269, 8274), True, 'import matplotlib.pyplot as plt\n'), ((8287, 8299), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8297, 8299), True, 'import matplotlib.pyplot as plt\n'), ((8313, 8329), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (8324, 8329), True, 'import matplotlib.pyplot as plt\n'), ((8351, 8371), 'numpy.argmin', 'np.argmin', (['abs_cur_v'], {}), '(abs_cur_v)\n', (8360, 8371), True, 'import numpy as np\n'), ((8877, 8892), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Z"""'], {}), "('Z')\n", (8887, 8892), True, 'import matplotlib.pyplot as plt\n'), ((8905, 8917), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8915, 8917), True, 'import matplotlib.pyplot as plt\n'), ((9457, 9475), 'numpy.absolute', 'np.absolute', (['cur_v'], {}), '(cur_v)\n', (9468, 9475), True, 'import numpy as np\n'), ((14238, 14267), 'numpy.where', 'np.where', (['(self.s_center > loc)'], {}), '(self.s_center > loc)\n', (14246, 14267), True, 'import numpy as np\n'), ((16434, 16462), 'numpy.intersect1d', 'np.intersect1d', (['ind_x', 'ind_y'], {}), '(ind_x, ind_y)\n', (16448, 16462), True, 'import numpy as np\n'), ((17422, 17436), 'csv.writer', 'csv.writer', (['cf'], {}), '(cf)\n', (17432, 17436), False, 'import csv\n'), ((20303, 20324), 'numpy.where', 'np.where', (['(lbx == xVal)'], {}), '(lbx == xVal)\n', (20311, 20324), True, 'import numpy as np\n'), ((21278, 21299), 'numpy.where', 'np.where', (['(lbx == xVal)'], {}), '(lbx == xVal)\n', (21286, 21299), True, 'import numpy as np\n'), ((21881, 21894), 'numpy.array', 'np.array', (['n_z'], {}), '(n_z)\n', (21889, 21894), True, 'import numpy as np\n'), ((22513, 22603), 'numpy.append', 'np.append', (["(self.levels_n['left'][i][ind] - self.levels_n['left'][0][ind] + leftEdge)", 'y'], {}), "(self.levels_n['left'][i][ind] - self.levels_n['left'][0][ind] +\n leftEdge, y)\n", (22522, 22603), True, 'import numpy as np\n'), ((22616, 22657), 'numpy.append', 'np.append', (["self.levels_z['left'][i][0]", 'z'], {}), "(self.levels_z['left'][i][0], z)\n", (22625, 22657), True, 'import numpy as np\n'), ((22732, 22826), 'numpy.append', 'np.append', (['y', "(self.levels_n['right'][i][ind] - self.levels_n['right'][0][ind] + rightEdge)"], {}), "(y, self.levels_n['right'][i][ind] - self.levels_n['right'][0][ind\n ] + rightEdge)\n", (22741, 22826), True, 'import numpy as np\n'), ((22838, 22880), 'numpy.append', 'np.append', (['z', "self.levels_z['right'][i][0]"], {}), "(z, self.levels_z['right'][i][0])\n", (22847, 22880), True, 'import numpy as np\n'), ((26134, 26158), 'numpy.square', 'np.square', (['(lbx - head[0])'], {}), '(lbx - head[0])\n', (26143, 26158), True, 'import numpy as np\n'), ((26161, 26185), 'numpy.square', 'np.square', (['(lby - head[1])'], {}), '(lby - head[1])\n', (26170, 26185), True, 'import numpy as np\n'), ((26209, 26233), 'numpy.square', 'np.square', (['(rbx - head[0])'], {}), '(rbx - head[0])\n', (26218, 26233), True, 'import numpy as np\n'), ((26236, 26260), 'numpy.square', 'np.square', (['(rby - head[1])'], {}), '(rby - head[1])\n', (26245, 26260), True, 'import numpy as np\n'), ((26285, 26309), 'numpy.square', 'np.square', (['(lbx - tail[0])'], {}), '(lbx - tail[0])\n', (26294, 26309), True, 'import numpy as np\n'), ((26312, 26336), 'numpy.square', 'np.square', (['(lby - tail[1])'], {}), '(lby - tail[1])\n', (26321, 26336), True, 'import numpy as np\n'), ((26360, 26384), 'numpy.square', 'np.square', (['(rbx - tail[0])'], {}), '(rbx - tail[0])\n', (26369, 26384), True, 'import numpy as np\n'), ((26387, 26411), 'numpy.square', 'np.square', (['(rby - tail[1])'], {}), '(rby - tail[1])\n', (26396, 26411), True, 'import numpy as np\n'), ((27382, 27419), 'numpy.random.normal', 'np.random.normal', (['size_mean', 'size_std'], {}), '(size_mean, size_std)\n', (27398, 27419), True, 'import numpy as np\n'), ((27524, 27561), 'numpy.random.normal', 'np.random.normal', (['size_mean', 'size_std'], {}), '(size_mean, size_std)\n', (27540, 27561), True, 'import numpy as np\n'), ((5301, 5317), 'numpy.average', 'np.average', (['diff'], {}), '(diff)\n', (5311, 5317), True, 'import numpy as np\n'), ((5879, 5895), 'numpy.average', 'np.average', (['diff'], {}), '(diff)\n', (5889, 5895), True, 'import numpy as np\n'), ((13344, 13366), 'numpy.amax', 'np.amax', (['self.xshape_x'], {}), '(self.xshape_x)\n', (13351, 13366), True, 'import numpy as np\n'), ((16142, 16170), 'numpy.where', 'np.where', (['(self.xshape_x == x)'], {}), '(self.xshape_x == x)\n', (16150, 16170), True, 'import numpy as np\n'), ((16194, 16222), 'numpy.where', 'np.where', (['(self.xshape_y == y)'], {}), '(self.xshape_y == y)\n', (16202, 16222), True, 'import numpy as np\n'), ((20122, 20128), 'math.log', 'log', (['B'], {}), '(B)\n', (20125, 20128), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((23463, 23479), 'numpy.cross', 'np.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (23471, 23479), True, 'import numpy as np\n'), ((26428, 26449), 'numpy.amin', 'np.amin', (['head_dist_lb'], {}), '(head_dist_lb)\n', (26435, 26449), True, 'import numpy as np\n'), ((26451, 26472), 'numpy.amin', 'np.amin', (['head_dist_rb'], {}), '(head_dist_rb)\n', (26458, 26472), True, 'import numpy as np\n'), ((26746, 26767), 'numpy.amin', 'np.amin', (['tail_dist_lb'], {}), '(tail_dist_lb)\n', (26753, 26767), True, 'import numpy as np\n'), ((26769, 26790), 'numpy.amin', 'np.amin', (['tail_dist_rb'], {}), '(tail_dist_rb)\n', (26776, 26790), True, 'import numpy as np\n'), ((27848, 27871), 'random.sample', 'random.sample', (['check', '(1)'], {}), '(check, 1)\n', (27861, 27871), False, 'import random\n'), ((30237, 30258), 'numpy.square', 'np.square', (['(temp_x - r)'], {}), '(temp_x - r)\n', (30246, 30258), True, 'import numpy as np\n'), ((30259, 30280), 'numpy.square', 'np.square', (['(temp_y - r)'], {}), '(temp_y - r)\n', (30268, 30280), True, 'import numpy as np\n'), ((30538, 30570), 'numpy.random.random_sample', 'np.random.random_sample', (['z.shape'], {}), '(z.shape)\n', (30561, 30570), True, 'import numpy as np\n'), ((2362, 2387), 'numpy.where', 'np.where', (['(x_v_valley == x)'], {}), '(x_v_valley == x)\n', (2370, 2387), True, 'import numpy as np\n'), ((5522, 5560), 'numpy.absolute', 'np.absolute', (["self.levels_n['right'][0]"], {}), "(self.levels_n['right'][0])\n", (5533, 5560), True, 'import numpy as np\n'), ((16516, 16544), 'numpy.intersect1d', 'np.intersect1d', (['ind_x', 'ind_y'], {}), '(ind_x, ind_y)\n', (16530, 16544), True, 'import numpy as np\n'), ((20115, 20121), 'math.log', 'log', (['(2)'], {}), '(2)\n', (20118, 20121), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((28507, 28531), 'random.sample', 'random.sample', (['y_pool', '(1)'], {}), '(y_pool, 1)\n', (28520, 28531), False, 'import random\n'), ((31612, 31640), 'numpy.where', 'np.where', (['(self.xshape_x == x)'], {}), '(self.xshape_x == x)\n', (31620, 31640), True, 'import numpy as np\n'), ((31668, 31696), 'numpy.where', 'np.where', (['(self.xshape_y == y)'], {}), '(self.xshape_y == y)\n', (31676, 31696), True, 'import numpy as np\n'), ((31910, 31938), 'numpy.intersect1d', 'np.intersect1d', (['ind_x', 'ind_y'], {}), '(ind_x, ind_y)\n', (31924, 31938), True, 'import numpy as np\n'), ((11231, 11242), 'math.ceil', 'ceil', (['x_max'], {}), '(x_max)\n', (11235, 11242), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((20380, 20401), 'numpy.where', 'np.where', (['(rbx == xVal)'], {}), '(rbx == xVal)\n', (20388, 20401), True, 'import numpy as np\n'), ((21355, 21376), 'numpy.where', 'np.where', (['(rbx == xVal)'], {}), '(rbx == xVal)\n', (21363, 21376), True, 'import numpy as np\n'), ((30750, 30767), 'math.floor', 'floor', (['(diff_x / 2)'], {}), '(diff_x / 2)\n', (30755, 30767), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((30823, 30840), 'math.floor', 'floor', (['(diff_x / 2)'], {}), '(diff_x / 2)\n', (30828, 30840), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((30901, 30918), 'math.floor', 'floor', (['(diff_x / 2)'], {}), '(diff_x / 2)\n', (30906, 30918), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((11249, 11261), 'math.floor', 'floor', (['x_min'], {}), '(x_min)\n', (11254, 11261), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((30776, 30792), 'math.ceil', 'ceil', (['(diff_x / 2)'], {}), '(diff_x / 2)\n', (30780, 30792), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((30854, 30870), 'math.ceil', 'ceil', (['(diff_x / 2)'], {}), '(diff_x / 2)\n', (30858, 30870), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((30932, 30948), 'math.ceil', 'ceil', (['(diff_x / 2)'], {}), '(diff_x / 2)\n', (30936, 30948), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((30991, 31008), 'math.floor', 'floor', (['(diff_y / 2)'], {}), '(diff_y / 2)\n', (30996, 31008), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((31061, 31078), 'math.floor', 'floor', (['(diff_y / 2)'], {}), '(diff_y / 2)\n', (31066, 31078), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((31136, 31153), 'math.floor', 'floor', (['(diff_y / 2)'], {}), '(diff_y / 2)\n', (31141, 31153), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((18332, 18368), 'numpy.average', 'np.average', (["self.levels_n['left'][i]"], {}), "(self.levels_n['left'][i])\n", (18342, 18368), True, 'import numpy as np\n'), ((31014, 31030), 'math.ceil', 'ceil', (['(diff_y / 2)'], {}), '(diff_y / 2)\n', (31018, 31030), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((31089, 31105), 'math.ceil', 'ceil', (['(diff_y / 2)'], {}), '(diff_y / 2)\n', (31093, 31105), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((31164, 31180), 'math.ceil', 'ceil', (['(diff_y / 2)'], {}), '(diff_y / 2)\n', (31168, 31180), False, 'from math import pi, sqrt, log, ceil, floor\n'), ((18612, 18649), 'numpy.average', 'np.average', (["self.levels_n['right'][i]"], {}), "(self.levels_n['right'][i])\n", (18622, 18649), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def import_data(file_name, time_column_index=None, mode='csv', header=True, room_name=None, tz=0):
"""
Load raw data from the disk.
:type file_name: str
:param file_name: the name of the raw data file
:type time_column_index: int
:param time_column_index: the column index for the timestamp in given raw data file
:type mode: str
:param mode: the format for raw data. Currently only support ``csv``
:type header: bool
:param header: indicate whether the raw data contains a header on the first row. If ``False``, then assign unique
index for each column
:type room_name: str or None
:param room_name: the name of the room. If ``None``, then assign unique number for the room
:type tz: int
:param tz: the time zone offset that need to fix in the raw data file
:rtype: core.data.dataset.Dataset
:return: The structured data set with one raw input data
"""
from csv import reader
from dateutil.parser import parse
from numpy import nan, asarray
from .dataset import Dataset
if mode == 'csv':
with open(file_name, 'r') as input_file:
csv_reader = reader(input_file, delimiter=',')
feature_name = []
data = []
if header:
feature_name = next(csv_reader)[:-1]
for line in csv_reader:
if not len(line):
continue
for i in range(len(line)):
if i == time_column_index:
line[i] = parse(line[i]).timestamp() + tz * 60 * 60
elif not len(line[i]):
line[i] = nan
else:
try:
line[i] = float(line[i])
except ValueError:
line[i] = nan
data.append(line)
data = asarray(data, dtype=float)
if not len(feature_name):
feature_name = list(range(data.shape[1]))
dataset = Dataset()
dataset.add_room(data[:, :-1], occupancy=data[:, -1], header=False, room_name=room_name)
dataset.set_feature_name(feature_name)
dataset.time_column_index = time_column_index
return dataset
| [
"dateutil.parser.parse",
"numpy.asarray",
"csv.reader"
] | [((1227, 1260), 'csv.reader', 'reader', (['input_file'], {'delimiter': '""","""'}), "(input_file, delimiter=',')\n", (1233, 1260), False, 'from csv import reader\n'), ((1984, 2010), 'numpy.asarray', 'asarray', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (1991, 2010), False, 'from numpy import nan, asarray\n'), ((1614, 1628), 'dateutil.parser.parse', 'parse', (['line[i]'], {}), '(line[i])\n', (1619, 1628), False, 'from dateutil.parser import parse\n')] |
import numpy as np
N = 17 #607 #17
matrix = []
for i in range(N):
matrix.append([0]*N)
matrix = np.matrix(matrix)
findn = 211 #368078 #211
find = [0,0]
fn = 1
x = N//2
y = N//2
expo = 1
while(fn < N*N):
per = expo**2 - (expo-2)**2
if per == 0:
matrix[x,y] = 1
if findn == fn : find = [x,y]
else:
x = x + 1
fn = fn + 1
matrix[y,x] = fn
if findn == fn : find = [x,y]
for j in range(expo-2):
y = y - 1
fn = fn + 1
matrix[y,x] = fn
if findn == fn : find = [x,y]
for j in range(1,expo):
x = x - 1
fn = fn + 1
matrix[y,x] = fn
if findn == fn : find = [x,y]
for j in range(1,expo):
y = y + 1
fn = fn + 1
matrix[y,x] = fn
if findn == fn : find = [x,y]
for j in range(1,expo):
x = x + 1
fn = fn + 1
matrix[y,x] = fn
if findn == fn : find = [x,y]
expo = expo + 2;
print(matrix)
print(find)
print(N//2,find[0], N//2 - find[0] , find[1] - N//2)
print( N//2 - find[0] + find[1] - N//2)
| [
"numpy.matrix"
] | [((100, 117), 'numpy.matrix', 'np.matrix', (['matrix'], {}), '(matrix)\n', (109, 117), True, 'import numpy as np\n')] |
from pathlib import Path
import os
import cv2
import numpy as np
import glob
from sklearn.preprocessing import LabelEncoder
from PIL import Image
from tqdm import tqdm
import albumentations as A
import tensorflow as tf
from .utils import load_bbox, get_resized_bbox
from .preprocessing import preprocess
DATA_DIR = Path("../../data")
IMAGES_DIR = DATA_DIR / "Images"
ANN_DIR = DATA_DIR / "Annotation"
def download_dataset():
"""
Downloads the StanfordDogs dataset.
"""
os.system(
"wget http://vision.stanford.edu/aditya86/ImageNetDogs/images.tar -P ./data")
os.system(
"wget http://vision.stanford.edu/aditya86/ImageNetDogs/annotation.tar -P ./data")
os.system("tar xf ./data/images.tar -C ./data")
os.system("tar xf ./data/annotation.tar -C ./data")
def prepare_raw_dataset():
"""Prepares the raw dataset from the StanfordDogs dataset present on disk.
Returns:
(np.array, np.array): (training images, training labels)
"""
all_breeds = os.listdir(IMAGES_DIR)
all_files = [file for breed in all_breeds for file in os.listdir(
os.path.join(IMAGES_DIR, breed))]
breeds = glob.glob(ANN_DIR+'*')
annotations = []
for breed in breeds:
annotations += glob.glob(breed+'/*')
breed_map = {}
for annotation in annotations:
breed = annotation.split('/')[-2]
index = breed.split('-')[0]
breed_map.setdefault(index, breed)
all_labels = [breed_map[file.split('_')[0]] for file in all_files]
le = LabelEncoder()
all_labels = le.fit_transform(all_labels)
all_labels = all_labels.astype(np.int32)
all_bboxes = [load_bbox(file) for file in all_files]
print('Total files : {}'.format(len(all_files)))
print('Total labels : {}'.format(len(all_labels)))
print('Total bboxes : {}'.format(len(all_bboxes)))
print('Total annotations : {}'.format(len(annotations)))
print('Total classes : {}'.format(len(le.classes_)))
resized_bboxes = []
for file, bbox in zip(all_files, all_bboxes):
file = os.path.join(breed_map[file.split('_')[0]], str(file))
path = os.path.join(IMAGES_DIR, file)
img = Image.open(path)
width, height = img.size
xmin, ymin, xmax, ymax = get_resized_bbox(height, width, bbox)
resized_bboxes.append((xmin, ymin, xmax, ymax))
all_images = []
dim = 64
for file, bbox in tqdm(zip(all_files, resized_bboxes), total=len(all_files)):
file = os.path.join(breed_map[file.split('_')[0]], str(file))
path = os.path.join(IMAGES_DIR, file)
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
xmin, ymin, xmax, ymax = bbox
img = img[ymin:ymax, xmin:xmax]
transform = A.Compose([A.Resize(dim, dim, interpolation=cv2.INTER_AREA),
A.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
img = transform(image=img)['image']
all_images.append(img)
all_images = np.array(all_images)
return all_images, all_labels
def prepare_dataset(train_data, train_labels, batch_size):
"""Prepares the tensorflow training dataset.
Args:
train_data (np.array): Training images.
train_labels (np.array): Training labels (breeds).
Returns:
tf.data.Dataset: Tensorflow training dataset.
"""
# Build Dataset
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_data, train_labels))
# Apply preprocessing
train_dataset = train_dataset.map(preprocess)
# Shuffle and batch
train_dataset = train_dataset.shuffle(
buffer_size=train_data.shape[0]).batch(batch_size)
# For performance
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
return train_dataset
| [
"sklearn.preprocessing.LabelEncoder",
"os.listdir",
"PIL.Image.open",
"pathlib.Path",
"tensorflow.data.Dataset.from_tensor_slices",
"os.path.join",
"numpy.array",
"albumentations.Normalize",
"albumentations.Resize",
"cv2.cvtColor",
"os.system",
"cv2.imread",
"glob.glob"
] | [((318, 336), 'pathlib.Path', 'Path', (['"""../../data"""'], {}), "('../../data')\n", (322, 336), False, 'from pathlib import Path\n'), ((490, 587), 'os.system', 'os.system', (['"""wget http://vision.stanford.edu/aditya86/ImageNetDogs/images.tar -P ./data"""'], {}), "(\n 'wget http://vision.stanford.edu/aditya86/ImageNetDogs/images.tar -P ./data'\n )\n", (499, 587), False, 'import os\n'), ((591, 692), 'os.system', 'os.system', (['"""wget http://vision.stanford.edu/aditya86/ImageNetDogs/annotation.tar -P ./data"""'], {}), "(\n 'wget http://vision.stanford.edu/aditya86/ImageNetDogs/annotation.tar -P ./data'\n )\n", (600, 692), False, 'import os\n'), ((696, 743), 'os.system', 'os.system', (['"""tar xf ./data/images.tar -C ./data"""'], {}), "('tar xf ./data/images.tar -C ./data')\n", (705, 743), False, 'import os\n'), ((748, 799), 'os.system', 'os.system', (['"""tar xf ./data/annotation.tar -C ./data"""'], {}), "('tar xf ./data/annotation.tar -C ./data')\n", (757, 799), False, 'import os\n'), ((1012, 1034), 'os.listdir', 'os.listdir', (['IMAGES_DIR'], {}), '(IMAGES_DIR)\n', (1022, 1034), False, 'import os\n'), ((1161, 1185), 'glob.glob', 'glob.glob', (["(ANN_DIR + '*')"], {}), "(ANN_DIR + '*')\n", (1170, 1185), False, 'import glob\n'), ((1533, 1547), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1545, 1547), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((3029, 3049), 'numpy.array', 'np.array', (['all_images'], {}), '(all_images)\n', (3037, 3049), True, 'import numpy as np\n'), ((3430, 3492), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(train_data, train_labels)'], {}), '((train_data, train_labels))\n', (3464, 3492), True, 'import tensorflow as tf\n'), ((1253, 1276), 'glob.glob', 'glob.glob', (["(breed + '/*')"], {}), "(breed + '/*')\n", (1262, 1276), False, 'import glob\n'), ((2159, 2189), 'os.path.join', 'os.path.join', (['IMAGES_DIR', 'file'], {}), '(IMAGES_DIR, file)\n', (2171, 2189), False, 'import os\n'), ((2204, 2220), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (2214, 2220), False, 'from PIL import Image\n'), ((2582, 2612), 'os.path.join', 'os.path.join', (['IMAGES_DIR', 'file'], {}), '(IMAGES_DIR, file)\n', (2594, 2612), False, 'import os\n'), ((2628, 2644), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (2638, 2644), False, 'import cv2\n'), ((2659, 2695), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (2671, 2695), False, 'import cv2\n'), ((1113, 1144), 'os.path.join', 'os.path.join', (['IMAGES_DIR', 'breed'], {}), '(IMAGES_DIR, breed)\n', (1125, 1144), False, 'import os\n'), ((2807, 2855), 'albumentations.Resize', 'A.Resize', (['dim', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(dim, dim, interpolation=cv2.INTER_AREA)\n', (2815, 2855), True, 'import albumentations as A\n'), ((2888, 2933), 'albumentations.Normalize', 'A.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (2899, 2933), True, 'import albumentations as A\n')] |
import random
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def make_bin_edges(sos, x):
middle, minim, maxim = np.mean(x), min(x), max(x)
d_x = sos
middle_low_edge, middle_high_edge = middle - d_x / 2, middle + d_x / 2
edges = [middle_low_edge, middle_high_edge]
temp = middle_low_edge
while temp > minim:
temp -= d_x
edges.append(temp)
temp = middle_high_edge
while temp < maxim:
temp += d_x
edges.append(temp)
return sorted(edges)
def size_of_state(x, k, window_size):
sos_temp = []
for i in range(1 + len(x) - window_size):
A = x[i : i + window_size]
sos_temp.append(np.std(A, ddof=1))
return 0 if not sos_temp else min(sos_temp) * k
def amp_sos_fisher(x_list, bins):
hist = np.histogram(x_list, bins=bins, density=False)
counts = [0] + list(hist[0] / len(x_list)) + [0]
return sum(
(np.sqrt(counts[x + 1]) - np.sqrt(counts[x])) ** 2
for x in range(len(counts) - 1)
)
def discrete_amp(x_list, k):
sos = np.std(x_list, ddof=1) * k
bins = make_bin_edges(sos, x_list)
return amp_sos_fisher(x_list, bins)
def temporal_amp(x_list, k, window_size, over):
N = len(x_list)
sos = size_of_state(x_list, k, window_size)
bins = make_bin_edges(sos, x_list)
fi = []
for i in range(0, 1 + N - window_size, over):
temp = x_list[i : i + window_size]
fi.append(amp_sos_fisher(temp, bins))
return fi
if __name__ == "__main__":
df = pd.read_csv("cantar2019.csv")
x = list(df["storage"])
k = 4
dN = 48
over = 1
fi = temporal_amp(x, k, dN, over)
fig, ax1 = plt.subplots(figsize=(5, 4))
ax1.plot(x, "k")
ax2 = ax1.twinx()
ax2.plot(range(dN, 1 + len(x), over), fi, "b")
plt.savefig("discrete_amp_disjoint.png")
| [
"numpy.mean",
"numpy.histogram",
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"pandas.read_csv",
"numpy.std",
"matplotlib.pyplot.subplots"
] | [((810, 856), 'numpy.histogram', 'np.histogram', (['x_list'], {'bins': 'bins', 'density': '(False)'}), '(x_list, bins=bins, density=False)\n', (822, 856), True, 'import numpy as np\n'), ((1538, 1567), 'pandas.read_csv', 'pd.read_csv', (['"""cantar2019.csv"""'], {}), "('cantar2019.csv')\n", (1549, 1567), True, 'import pandas as pd\n'), ((1686, 1714), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (1698, 1714), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1853), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""discrete_amp_disjoint.png"""'], {}), "('discrete_amp_disjoint.png')\n", (1824, 1853), True, 'import matplotlib.pyplot as plt\n'), ((143, 153), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (150, 153), True, 'import numpy as np\n'), ((1072, 1094), 'numpy.std', 'np.std', (['x_list'], {'ddof': '(1)'}), '(x_list, ddof=1)\n', (1078, 1094), True, 'import numpy as np\n'), ((692, 709), 'numpy.std', 'np.std', (['A'], {'ddof': '(1)'}), '(A, ddof=1)\n', (698, 709), True, 'import numpy as np\n'), ((935, 957), 'numpy.sqrt', 'np.sqrt', (['counts[x + 1]'], {}), '(counts[x + 1])\n', (942, 957), True, 'import numpy as np\n'), ((960, 978), 'numpy.sqrt', 'np.sqrt', (['counts[x]'], {}), '(counts[x])\n', (967, 978), True, 'import numpy as np\n')] |
#! -*- coding:utf-8 -*-
'''
@Author: ZM
@Date and Time: 2020/12/13 16:28
@File: train.py
'''
import math
import numpy as np
import pandas as pd
from gensim import corpora
from keras.layers import Input
from keras import Model
from keras.callbacks import Callback
from keras.optimizers import Adam
from keras import backend as K
from Dataset import Dataset
from get_dataset import get_dataset
from generator import generator
from utils import str2id, sequence_padding
from Loss import Loss
from ToOneHot import ToOneHot
from CNN_model import CNN_Model
class CrossEntropy(Loss):
def compute_loss(self, inputs):
y_true, y_pred = inputs
loss = K.categorical_crossentropy(y_true, K.softmax(y_pred))
return K.mean(loss)
if __name__ == '__main__':
num_classes = 4
vocab_size = 33106
max_length = 181
hidden_dim = 64
train_batch_size = 128
val_batch_size = 500
(X_train, Y_train), (X_val, Y_val) = get_dataset()
dictionary = corpora.Dictionary(pd.concat([X_train, X_val]))
X_train = [str2id(x, dictionary.token2id) for x in X_train]
X_val = [str2id(x, dictionary.token2id) for x in X_val]
X_train = sequence_padding(X_train, max_length=max_length)
Y_train = np.array(Y_train, dtype='int32')
X_val = sequence_padding(X_val, max_length=max_length)
Y_val = np.array(Y_val, dtype='int32')
train_dataset = Dataset(X_train, Y_train, label_transform=ToOneHot(num_classes))
val_dataset = Dataset(X_val, Y_val, label_transform=ToOneHot(num_classes))
train_generator = generator(train_dataset, batch_size=train_batch_size, shuffle=True)
val_generator = generator(val_dataset, batch_size=val_batch_size, shuffle=False)
text_input = Input(shape=(max_length, ), name='text_input', dtype='int32')
y_true = Input(shape=(num_classes, ), dtype='int32')
out = CNN_Model(text_input, vocab_size, max_length, hidden_dim=hidden_dim, num_classes=num_classes)
out = CrossEntropy(-1)([y_true, out])
model = Model([y_true, text_input], out)
opt = Adam()
model.compile(opt)
num_train_batches = math.ceil(len(Y_train) / train_batch_size)
num_val_examples = len(Y_val)
num_val_batches = math.ceil(num_val_examples / val_batch_size)
def evaluate(model):
total_loss = 0.
total_corrects = 0
for _ in range(num_val_batches):
batch_data, _ = next(val_generator)
val_loss, predict = model.test_on_batch(batch_data, y=None), model.predict_on_batch(batch_data)
total_loss += val_loss
total_corrects += np.sum(np.argmax(batch_data[0], axis=-1) == np.argmax(predict, axis=-1))
val_loss = total_loss / num_val_batches
val_acc = (total_corrects / num_val_examples) * 100
return val_loss, val_acc
class Evaluator(Callback):
def __init__(self):
super(Evaluator, self).__init__()
def on_epoch_end(self, epoch, logs=None):
val_loss, val_acc = evaluate(self.model)
print(f'val_loss = {val_loss:.5f}, val_acc = {val_acc:.2f}')
evaluator = Evaluator()
model.fit_generator(
train_generator,
steps_per_epoch=num_train_batches,
epochs=10,
callbacks=[evaluator],
shuffle=False,
initial_epoch=0
)
| [
"get_dataset.get_dataset",
"keras.optimizers.Adam",
"generator.generator",
"math.ceil",
"utils.str2id",
"keras.Model",
"keras.backend.mean",
"numpy.argmax",
"numpy.array",
"utils.sequence_padding",
"keras.layers.Input",
"ToOneHot.ToOneHot",
"CNN_model.CNN_Model",
"keras.backend.softmax",
... | [((968, 981), 'get_dataset.get_dataset', 'get_dataset', ([], {}), '()\n', (979, 981), False, 'from get_dataset import get_dataset\n'), ((1187, 1235), 'utils.sequence_padding', 'sequence_padding', (['X_train'], {'max_length': 'max_length'}), '(X_train, max_length=max_length)\n', (1203, 1235), False, 'from utils import str2id, sequence_padding\n'), ((1250, 1282), 'numpy.array', 'np.array', (['Y_train'], {'dtype': '"""int32"""'}), "(Y_train, dtype='int32')\n", (1258, 1282), True, 'import numpy as np\n'), ((1295, 1341), 'utils.sequence_padding', 'sequence_padding', (['X_val'], {'max_length': 'max_length'}), '(X_val, max_length=max_length)\n', (1311, 1341), False, 'from utils import str2id, sequence_padding\n'), ((1354, 1384), 'numpy.array', 'np.array', (['Y_val'], {'dtype': '"""int32"""'}), "(Y_val, dtype='int32')\n", (1362, 1384), True, 'import numpy as np\n'), ((1572, 1639), 'generator.generator', 'generator', (['train_dataset'], {'batch_size': 'train_batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=train_batch_size, shuffle=True)\n', (1581, 1639), False, 'from generator import generator\n'), ((1660, 1724), 'generator.generator', 'generator', (['val_dataset'], {'batch_size': 'val_batch_size', 'shuffle': '(False)'}), '(val_dataset, batch_size=val_batch_size, shuffle=False)\n', (1669, 1724), False, 'from generator import generator\n'), ((1743, 1803), 'keras.layers.Input', 'Input', ([], {'shape': '(max_length,)', 'name': '"""text_input"""', 'dtype': '"""int32"""'}), "(shape=(max_length,), name='text_input', dtype='int32')\n", (1748, 1803), False, 'from keras.layers import Input\n'), ((1818, 1860), 'keras.layers.Input', 'Input', ([], {'shape': '(num_classes,)', 'dtype': '"""int32"""'}), "(shape=(num_classes,), dtype='int32')\n", (1823, 1860), False, 'from keras.layers import Input\n'), ((1872, 1969), 'CNN_model.CNN_Model', 'CNN_Model', (['text_input', 'vocab_size', 'max_length'], {'hidden_dim': 'hidden_dim', 'num_classes': 'num_classes'}), '(text_input, vocab_size, max_length, hidden_dim=hidden_dim,\n num_classes=num_classes)\n', (1881, 1969), False, 'from CNN_model import CNN_Model\n'), ((2020, 2052), 'keras.Model', 'Model', (['[y_true, text_input]', 'out'], {}), '([y_true, text_input], out)\n', (2025, 2052), False, 'from keras import Model\n'), ((2063, 2069), 'keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (2067, 2069), False, 'from keras.optimizers import Adam\n'), ((2217, 2261), 'math.ceil', 'math.ceil', (['(num_val_examples / val_batch_size)'], {}), '(num_val_examples / val_batch_size)\n', (2226, 2261), False, 'import math\n'), ((749, 761), 'keras.backend.mean', 'K.mean', (['loss'], {}), '(loss)\n', (755, 761), True, 'from keras import backend as K\n'), ((1018, 1045), 'pandas.concat', 'pd.concat', (['[X_train, X_val]'], {}), '([X_train, X_val])\n', (1027, 1045), True, 'import pandas as pd\n'), ((1063, 1093), 'utils.str2id', 'str2id', (['x', 'dictionary.token2id'], {}), '(x, dictionary.token2id)\n', (1069, 1093), False, 'from utils import str2id, sequence_padding\n'), ((1125, 1155), 'utils.str2id', 'str2id', (['x', 'dictionary.token2id'], {}), '(x, dictionary.token2id)\n', (1131, 1155), False, 'from utils import str2id, sequence_padding\n'), ((715, 732), 'keras.backend.softmax', 'K.softmax', (['y_pred'], {}), '(y_pred)\n', (724, 732), True, 'from keras import backend as K\n'), ((1448, 1469), 'ToOneHot.ToOneHot', 'ToOneHot', (['num_classes'], {}), '(num_classes)\n', (1456, 1469), False, 'from ToOneHot import ToOneHot\n'), ((1527, 1548), 'ToOneHot.ToOneHot', 'ToOneHot', (['num_classes'], {}), '(num_classes)\n', (1535, 1548), False, 'from ToOneHot import ToOneHot\n'), ((2610, 2643), 'numpy.argmax', 'np.argmax', (['batch_data[0]'], {'axis': '(-1)'}), '(batch_data[0], axis=-1)\n', (2619, 2643), True, 'import numpy as np\n'), ((2647, 2674), 'numpy.argmax', 'np.argmax', (['predict'], {'axis': '(-1)'}), '(predict, axis=-1)\n', (2656, 2674), True, 'import numpy as np\n')] |
import os
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras import backend as K # 这里目的是使用后端tensorflow
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.layers import Input, Dense, Lambda
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import load_img, img_to_array
# 本图片认证采用的是三元组训练模型,构建孪生网络,将数据集分为一个一个的三元组进行训练
# 这里是全局训练参数调整
# 自定义损失函数
# 最大距离是1,最小距离是0
# 损失函数的公式是(1-Y)*0.5*(distance)^2 + y*0.5*max(0,margin-distance)^2
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
'''
# 这里需要指出的是,y_true表示的是两者是否为同一个人,1表示是同一人。
# y_pred是距离,接近0表示是同一人。
# 有了margin之后,margin-y_pred则转为与y_true相同。
margin = 1
square_pred = K.square(y_pred)
margin_square = K.square(K.maximum(margin - y_pred, 0))
return K.mean(y_true * square_pred + (1 - y_true) * margin_square)
# 训练时的精度函数
def accuracy(y_true, y_pred):
'''Compute classification accuracy with a fixed threshold on distances.
'''
return K.mean(K.equal(y_true, K.cast(y_pred < 0.5, y_true.dtype)))
# 计算两个向量的距离。
def _euclidean_distance(vects):
x, y = vects
sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
# 向量距离 与 epsilon之间 取大的那个数,开根号,避免了等于0的情形。
return K.sqrt(K.maximum(sum_square, K.epsilon()))
# 表示了要输出的向量形状
def _eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def load_my_model(weightfile):
model = verify()._model()
model.load_weights(weightfile)
return model
# 给定训练集的文件夹,返回两个数组,一个是成对图片的目录,一个是label
def _create_pic_pairs(pic_path, pairs_txt):
f = open(pairs_txt)
pairs_txt = f.readlines()[1:]
pairs = []
labels = []
for temp in pairs_txt:
temp = temp.split("\n")[0].split("\t")
if len(temp) == 3:
if len(temp[1]) != 4:
temp[1] = (4 - len(temp[1])) * "0" + temp[1]
if len(temp[2]) != 4:
temp[2] = (4 - len(temp[2])) * "0" + temp[2]
pair = [pic_path + "/" + temp[0] + "/" + temp[0] + "_" + temp[1] + ".jpg",
pic_path + "/" + temp[0] + "/" + temp[0] + "_" + temp[2] + ".jpg"]
if os.path.exists(pair[0]) and os.path.exists(pair[1]):
pairs.append(pair)
labels.append([1])
if len(temp) == 4:
if len(temp[1]) != 4:
temp[1] = (4 - len(temp[1])) * "0" + temp[1]
if len(temp[3]) != 4:
temp[3] = (4 - len(temp[3])) * "0" + temp[3]
pair = [pic_path + "/" + temp[0] + "/" + temp[0] + "_" + temp[1] + ".jpg",
pic_path + "/" + temp[2] + "/" + temp[2] + "_" + temp[3] + ".jpg"]
if os.path.exists(pair[0]) and os.path.exists(pair[1]):
pairs.append(pair)
labels.append([0])
return np.array(pairs), np.array(labels)
class verify:
def _load_image(self, a_pair_img_path):
img1 = load_img(a_pair_img_path[0])
img2 = load_img(a_pair_img_path[1])
return img_to_array(img1) / 255.0, img_to_array(img2) / 255.0
# 建立迭代器
def _get_batch_img(self, x_samples, y_samples, batch_size):
batch_num = int(len(x_samples) / batch_size) # 有多少个batch
max_len = batch_num * batch_size
x_samples = x_samples[:max_len] # 多余部分就不要了
x_batches = np.split(x_samples, batch_num) # 将x分割,x_batches每一个元素都是一个batch的目录
y_samples = y_samples[:max_len]
y_baches = np.split(y_samples, batch_num)
while True:
for i in range(batch_num):
x = np.array(list(map(self._load_image, x_batches[i]))) # 输出每一个batch的图片
y = np.array(y_baches[i]).astype("float32")
yield [x[:, 0], x[:, 1]], y
# 基础CNN网络,这里我使用的是VGG16结构,去掉原来softmax层,最后加了一层128向量。
def _create_base_network(self):
'''Base network to be shared (eq. to feature extraction).
'''
vgg16 = VGG16()
base_model = Sequential()
base_model.add(Model(inputs=vgg16.input, outputs=vgg16.get_layer("fc2").output, name="vgg16"))
base_model.add(Dense(128, "relu", name="fc3"))
# vgg层不进行训练权重
base_model.layers[0].trainable = False
return base_model
# 最终用来计算精度函数
def _compute_accuracy(self, y_true, y_pred):
'''Compute classification accuracy with a fixed threshold on distances.
'''
# 返回的是布尔值pred
# 最大
pred = y_pred.ravel() < 0.5
return np.mean(pred == y_true)
def _model(self, vgg_weight=None):
input_shape = (224, 224, 3)
model = self._create_base_network()
# 开始构建孪生网络
input_a = Input(shape=input_shape)
input_b = Input(shape=input_shape)
processed_a = model(input_a)
processed_b = model(input_b)
# 增加一层,输入提取特征后的向量,输出两者距离
distance = Lambda(_euclidean_distance,
output_shape=_eucl_dist_output_shape)([processed_a, processed_b])
# 输入a,b,输出距离
model = Model([input_a, input_b], distance)
rms = RMSprop()
model.compile(rms, loss=contrastive_loss, metrics=[accuracy])
return model
def train(self, face_path, batch_size, epochs, model_save_name="temp"):
# 创建对应的数据集目录对
pairs, label = _create_pic_pairs(face_path, "train_data/lfw/pairs.txt")
# 设置训练集与测试集,测试集使用300个
X_train_path, X_test_path, y_train, y_test = train_test_split(pairs, label, test_size=300, random_state=42)
X_test = np.array(list(map(self._load_image, X_test_path)))
model = self._model()
model.fit_generator(self._get_batch_img(X_train_path, y_train, batch_size),
steps_per_epoch=len(y_train) // batch_size,
validation_data=([X_test[:, 0], X_test[:, 1]], y_test.astype("float32")),
epochs=epochs)
pred = model.predict([X_test[:, 0], X_test[:, 1]])
print(self._compute_accuracy(y_test, pred))
model.save_weights(f"models/{model_save_name}.h5")
return model.history
def predict(self, pic_pairs, model):
model = model
pred = model.predict(pic_pairs)
return pred
| [
"tensorflow.keras.backend.epsilon",
"numpy.array",
"tensorflow.keras.layers.Dense",
"numpy.mean",
"tensorflow.keras.layers.Input",
"os.path.exists",
"tensorflow.keras.backend.mean",
"tensorflow.keras.backend.maximum",
"tensorflow.keras.backend.cast",
"tensorflow.keras.models.Model",
"tensorflow.... | [((898, 914), 'tensorflow.keras.backend.square', 'K.square', (['y_pred'], {}), '(y_pred)\n', (906, 914), True, 'from tensorflow.keras import backend as K\n'), ((986, 1045), 'tensorflow.keras.backend.mean', 'K.mean', (['(y_true * square_pred + (1 - y_true) * margin_square)'], {}), '(y_true * square_pred + (1 - y_true) * margin_square)\n', (992, 1045), True, 'from tensorflow.keras import backend as K\n'), ((944, 973), 'tensorflow.keras.backend.maximum', 'K.maximum', (['(margin - y_pred)', '(0)'], {}), '(margin - y_pred, 0)\n', (953, 973), True, 'from tensorflow.keras import backend as K\n'), ((1335, 1350), 'tensorflow.keras.backend.square', 'K.square', (['(x - y)'], {}), '(x - y)\n', (1343, 1350), True, 'from tensorflow.keras import backend as K\n'), ((3013, 3028), 'numpy.array', 'np.array', (['pairs'], {}), '(pairs)\n', (3021, 3028), True, 'import numpy as np\n'), ((3030, 3046), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3038, 3046), True, 'import numpy as np\n'), ((3123, 3151), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (['a_pair_img_path[0]'], {}), '(a_pair_img_path[0])\n', (3131, 3151), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((3167, 3195), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (['a_pair_img_path[1]'], {}), '(a_pair_img_path[1])\n', (3175, 3195), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((3522, 3552), 'numpy.split', 'np.split', (['x_samples', 'batch_num'], {}), '(x_samples, batch_num)\n', (3530, 3552), True, 'import numpy as np\n'), ((3647, 3677), 'numpy.split', 'np.split', (['y_samples', 'batch_num'], {}), '(y_samples, batch_num)\n', (3655, 3677), True, 'import numpy as np\n'), ((4116, 4123), 'tensorflow.keras.applications.vgg16.VGG16', 'VGG16', ([], {}), '()\n', (4121, 4123), False, 'from tensorflow.keras.applications.vgg16 import VGG16\n'), ((4145, 4157), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4155, 4157), False, 'from tensorflow.keras.models import Model, Sequential\n'), ((4656, 4679), 'numpy.mean', 'np.mean', (['(pred == y_true)'], {}), '(pred == y_true)\n', (4663, 4679), True, 'import numpy as np\n'), ((4837, 4861), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (4842, 4861), False, 'from tensorflow.keras.layers import Input, Dense, Lambda\n'), ((4880, 4904), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (4885, 4904), False, 'from tensorflow.keras.layers import Input, Dense, Lambda\n'), ((5191, 5226), 'tensorflow.keras.models.Model', 'Model', (['[input_a, input_b]', 'distance'], {}), '([input_a, input_b], distance)\n', (5196, 5226), False, 'from tensorflow.keras.models import Model, Sequential\n'), ((5241, 5250), 'tensorflow.keras.optimizers.RMSprop', 'RMSprop', ([], {}), '()\n', (5248, 5250), False, 'from tensorflow.keras.optimizers import RMSprop\n'), ((5604, 5666), 'sklearn.model_selection.train_test_split', 'train_test_split', (['pairs', 'label'], {'test_size': '(300)', 'random_state': '(42)'}), '(pairs, label, test_size=300, random_state=42)\n', (5620, 5666), False, 'from sklearn.model_selection import train_test_split\n'), ((1207, 1241), 'tensorflow.keras.backend.cast', 'K.cast', (['(y_pred < 0.5)', 'y_true.dtype'], {}), '(y_pred < 0.5, y_true.dtype)\n', (1213, 1241), True, 'from tensorflow.keras import backend as K\n'), ((1460, 1471), 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1469, 1471), True, 'from tensorflow.keras import backend as K\n'), ((4284, 4314), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)', '"""relu"""'], {'name': '"""fc3"""'}), "(128, 'relu', name='fc3')\n", (4289, 4314), False, 'from tensorflow.keras.layers import Input, Dense, Lambda\n'), ((5033, 5098), 'tensorflow.keras.layers.Lambda', 'Lambda', (['_euclidean_distance'], {'output_shape': '_eucl_dist_output_shape'}), '(_euclidean_distance, output_shape=_eucl_dist_output_shape)\n', (5039, 5098), False, 'from tensorflow.keras.layers import Input, Dense, Lambda\n'), ((2350, 2373), 'os.path.exists', 'os.path.exists', (['pair[0]'], {}), '(pair[0])\n', (2364, 2373), False, 'import os\n'), ((2378, 2401), 'os.path.exists', 'os.path.exists', (['pair[1]'], {}), '(pair[1])\n', (2392, 2401), False, 'import os\n'), ((2879, 2902), 'os.path.exists', 'os.path.exists', (['pair[0]'], {}), '(pair[0])\n', (2893, 2902), False, 'import os\n'), ((2907, 2930), 'os.path.exists', 'os.path.exists', (['pair[1]'], {}), '(pair[1])\n', (2921, 2930), False, 'import os\n'), ((3211, 3229), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['img1'], {}), '(img1)\n', (3223, 3229), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((3239, 3257), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['img2'], {}), '(img2)\n', (3251, 3257), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((3846, 3867), 'numpy.array', 'np.array', (['y_baches[i]'], {}), '(y_baches[i])\n', (3854, 3867), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import sys
import os
from lifelines.utils import concordance_index
from sklearn.metrics import r2_score
from torch.utils.data import DataLoader, TensorDataset
from torchcontrib.optim import SWA
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from argparse import ArgumentParser
sys.path.append('../')
sys.path.append('../../data/ml_mmrf')
sys.path.append('../../data/')
from ml_mmrf_v1.data import load_mmrf
from synthetic.synthetic_data import load_synthetic_data_trt, load_synthetic_data_noisy
from semi_synthetic.ss_data import *
from models.sfomm import SFOMM
from models.utils import *
# ave_diff test, linear, gated, rnn inftype
def return_per_class_acc(y_true, y_pred):
accs = []
for i in range(max(y_true)+1):
idxs = np.where(y_true == i)
t = y_true[idxs]
p = y_pred[idxs]
acc = sum(t == p) / len(t)
accs.append(acc)
return accs
def test_sfomm_texp_mm():
seed_everything(0)
configs = [
(1000, 'gated', 'rnn', 'l2', 0.01, 48, True)
# (1000, 'linear', 'rnn', 'l2', 0.01, 48, True),
# (1000, 'gated', 'birnn', 'l2', 0.01, 48, True),
# (1000, 'linear', 'birnn', 'l2', 0.01, 48, True)
]
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='sfomm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=-1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=50, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='mm', type=str)
parser.add_argument('--loss_type', type=str, default='unsup')
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--bs', default=200, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from SSM and base trainer
parser = SFOMM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
for k,config in enumerate(configs):
print(f'running config: {config}')
max_epochs, mtype, inftype, reg_type, C, ds, reg_all = config
args = parser.parse_args()
args.max_epochs = max_epochs
args.mtype = mtype
args.alpha1_type = 'linear'
args.inftype = inftype
args.reg_type = reg_type
args.C = C
args.dim_stochastic= ds
args.reg_all = reg_all
args.add_stochastic = False
dict_args = vars(args)
# initialize FOMM w/ args and train
model = SFOMM(**dict_args)
checkpoint_callback = ModelCheckpoint(filepath='./checkpoints/sfomm_gated')
early_stop_callback = EarlyStopping(
monitor='val_loss',
min_delta=0.00,
patience=10,
verbose=False,
mode='min'
)
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=checkpoint_callback, gpus=[2], early_stop_callback=False)
trainer.fit(model)
# evaluate on validation set; this should match what we were getting with the old codebase (after 100 epochs)
if torch.cuda.is_available():
device = torch.device('cuda:2')
else:
device = torch.device('cpu')
_, valid_loader = model.load_helper('valid', device=device, oversample=False)
nelbos = []
for i in range(50):
(nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
nelbos.append(nelbo.item())
print(f'final nelbo for {config} (config {k+1}): mean: {np.mean(nelbos)}, std: {np.std(nelbos)}')
# preds, _ = model.predict_ord(*valid_loader.dataset.tensors)
# B, X, A, M, Y, CE = valid_loader.dataset.tensors
# from sklearn.metrics import f1_score, precision_score, recall_score, roc_auc_score
# preds = pt_numpy(preds.argmax(dim=1))
# print(preds); print(Y)
# f1 = f1_score(pt_numpy(Y), preds, average='weighted')
# precision = precision_score(pt_numpy(Y), preds, average='weighted')
# recall = recall_score(pt_numpy(Y), preds, average='weighted')
# auc = roc_auc_score(pt_numpy(Y), preds, average='weighted')
# # auc=0.
# acc_class = return_per_class_acc(pt_numpy(Y), preds)
# print(f'F1: {f1}, Precision: {precision}, Recall: {recall}, AUC: {auc}, per_class_acc: {acc_class}')
# return preds
def test_sfomm_texp_syn():
seed_everything(0)
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='sfomm', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=8e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=-1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=50, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='synthetic', type=str)
parser.add_argument('--loss_type', type=str, default='unsup')
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from SSM and base trainer
parser = SFOMM.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = 5000
args.mtype = 'treatment_exp'
args.alpha1_type = 'linear'
args.inftype = 'rnn'
args.reg_type = 'l2'
args.C = 0.
args.dim_stochastic= 16
args.reg_all = True
args.add_stochastic = False
dict_args = vars(args)
# initialize FOMM w/ args and train
model = SFOMM(**dict_args)
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=False, gpus=[2])
trainer.fit(model)
# evaluate on validation set; this should match what we were getting with the old codebase (after 100 epochs)
if torch.cuda.is_available():
device = torch.device('cuda:2')
else:
device = torch.device('cpu')
_, valid_loader = model.load_helper('valid', device=device)
preds, _ = model.predict(*valid_loader.dataset.tensors)
mse, r2, ci = calc_stats(preds, valid_loader.dataset.tensors)
assert abs(mse - 4.57) < 1e-1
if __name__ == '__main__':
test_sfomm_texp_mm()
| [
"pytorch_lightning.callbacks.ModelCheckpoint",
"numpy.mean",
"pytorch_lightning.callbacks.EarlyStopping",
"pytorch_lightning.Trainer.add_argparse_args",
"argparse.ArgumentParser",
"models.sfomm.SFOMM",
"numpy.where",
"pytorch_lightning.seed_everything",
"models.sfomm.SFOMM.add_model_specific_args",
... | [((442, 464), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (457, 464), False, 'import sys\n'), ((465, 502), 'sys.path.append', 'sys.path.append', (['"""../../data/ml_mmrf"""'], {}), "('../../data/ml_mmrf')\n", (480, 502), False, 'import sys\n'), ((503, 533), 'sys.path.append', 'sys.path.append', (['"""../../data/"""'], {}), "('../../data/')\n", (518, 533), False, 'import sys\n'), ((1088, 1106), 'pytorch_lightning.seed_everything', 'seed_everything', (['(0)'], {}), '(0)\n', (1103, 1106), False, 'from pytorch_lightning import Trainer, seed_everything\n'), ((1378, 1394), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1392, 1394), False, 'from argparse import ArgumentParser\n'), ((2575, 2612), 'models.sfomm.SFOMM.add_model_specific_args', 'SFOMM.add_model_specific_args', (['parser'], {}), '(parser)\n', (2604, 2612), False, 'from models.sfomm import SFOMM\n'), ((2626, 2659), 'pytorch_lightning.Trainer.add_argparse_args', 'Trainer.add_argparse_args', (['parser'], {}), '(parser)\n', (2651, 2659), False, 'from pytorch_lightning import Trainer, seed_everything\n'), ((5213, 5231), 'pytorch_lightning.seed_everything', 'seed_everything', (['(0)'], {}), '(0)\n', (5228, 5231), False, 'from pytorch_lightning import Trainer, seed_everything\n'), ((5246, 5262), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (5260, 5262), False, 'from argparse import ArgumentParser\n'), ((6450, 6487), 'models.sfomm.SFOMM.add_model_specific_args', 'SFOMM.add_model_specific_args', (['parser'], {}), '(parser)\n', (6479, 6487), False, 'from models.sfomm import SFOMM\n'), ((6501, 6534), 'pytorch_lightning.Trainer.add_argparse_args', 'Trainer.add_argparse_args', (['parser'], {}), '(parser)\n', (6526, 6534), False, 'from pytorch_lightning import Trainer, seed_everything\n'), ((6953, 6971), 'models.sfomm.SFOMM', 'SFOMM', ([], {}), '(**dict_args)\n', (6958, 6971), False, 'from models.sfomm import SFOMM\n'), ((6986, 7093), 'pytorch_lightning.Trainer.from_argparse_args', 'Trainer.from_argparse_args', (['args'], {'deterministic': '(True)', 'logger': '(False)', 'checkpoint_callback': '(False)', 'gpus': '[2]'}), '(args, deterministic=True, logger=False,\n checkpoint_callback=False, gpus=[2])\n', (7012, 7093), False, 'from pytorch_lightning import Trainer, seed_everything\n'), ((7235, 7260), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7258, 7260), False, 'import torch\n'), ((908, 929), 'numpy.where', 'np.where', (['(y_true == i)'], {}), '(y_true == i)\n', (916, 929), True, 'import numpy as np\n'), ((3302, 3320), 'models.sfomm.SFOMM', 'SFOMM', ([], {}), '(**dict_args)\n', (3307, 3320), False, 'from models.sfomm import SFOMM\n'), ((3351, 3404), 'pytorch_lightning.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""./checkpoints/sfomm_gated"""'}), "(filepath='./checkpoints/sfomm_gated')\n", (3366, 3404), False, 'from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint\n'), ((3435, 3527), 'pytorch_lightning.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0.0)', 'patience': '(10)', 'verbose': '(False)', 'mode': '"""min"""'}), "(monitor='val_loss', min_delta=0.0, patience=10, verbose=False,\n mode='min')\n", (3448, 3527), False, 'from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint\n'), ((3608, 3761), 'pytorch_lightning.Trainer.from_argparse_args', 'Trainer.from_argparse_args', (['args'], {'deterministic': '(True)', 'logger': '(False)', 'checkpoint_callback': 'checkpoint_callback', 'gpus': '[2]', 'early_stop_callback': '(False)'}), '(args, deterministic=True, logger=False,\n checkpoint_callback=checkpoint_callback, gpus=[2], early_stop_callback=\n False)\n', (3634, 3761), False, 'from pytorch_lightning import Trainer, seed_everything\n'), ((3910, 3935), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3933, 3935), False, 'import torch\n'), ((7279, 7301), 'torch.device', 'torch.device', (['"""cuda:2"""'], {}), "('cuda:2')\n", (7291, 7301), False, 'import torch\n'), ((7330, 7349), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7342, 7349), False, 'import torch\n'), ((3958, 3980), 'torch.device', 'torch.device', (['"""cuda:2"""'], {}), "('cuda:2')\n", (3970, 3980), False, 'import torch\n'), ((4017, 4036), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4029, 4036), False, 'import torch\n'), ((4372, 4387), 'numpy.mean', 'np.mean', (['nelbos'], {}), '(nelbos)\n', (4379, 4387), True, 'import numpy as np\n'), ((4396, 4410), 'numpy.std', 'np.std', (['nelbos'], {}), '(nelbos)\n', (4402, 4410), True, 'import numpy as np\n')] |
# Copyright (c) 2018-2022, <NAME>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""The General Hough Transform (GHT) maps the orientation of edge points in a template image to a predefined origin
(typically the middle pixel of the image). Comparing this map with an image gives each point a rating of likelihood for
being a source point of that map.
:Author:
`<NAME>`
:Organization:
Biophysics and Biotechnology, Julius-Maximillians-University of Würzburg
:Version: 2019.06.26
Example
-------
>>> target = cv2.imread("path_to_file")
>>> template = cv2.imread("path_to_file")
>>> ght_target = GHTImage(target, blur=5, canny_lim=(130,180))
>>> H = HoughTransform()
>>> H.target = ght_target
>>> ght_template = TemplateImage(template)
>>> H.template = ght_template
>>> res = H.transform()
"""
import pycuda
import pycuda.autoinit
import pycuda.driver as drv
import numpy as np
import cv2
from pycuda.compiler import SourceModule
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path+ r'/cuda_files/hough_transform.cu', 'r') as f:
cuda_code = f.read()
mod = SourceModule(cuda_code)
class GHTImage:
"""
GHT specific image preprocessing
Attributes
----------
o_image: np.array
Input image
image: np.array
Resized and blurred input image
canny: np.array
Canny Edge processed image
gradient: np.array
Gradient image
"""
def __init__(self, image, blur=4, canny_lim=(130,200)):
self._canny_lim = canny_lim
self._blur = blur
self._scale = 0.5
self.o_image = image
self._create_images()
def _create_images(self):
"""
Create different image types by convolution
"""
self.image = cv2.resize(self.o_image, (0,0), fx=self._scale, fy=self._scale, interpolation=cv2.INTER_CUBIC)
self.image = cv2.blur(self.image, (self._blur,self._blur))
self.canny = cv2.Canny(self.image, self._canny_lim[0],self._canny_lim[1])
self.gradient = self._create_gradient(self.canny)
@staticmethod
def _create_gradient(image):
"""
Convolve an image with Sobel Kernels in X and Y direction to create a gradient image.
(Gradient orientation of a box size 5x5 in rad)
"""
X = cv2.Sobel(image,cv2.CV_64F,1,0,ksize=5)
Y = cv2.Sobel(image,cv2.CV_64F,0,1,ksize=5)
gradient = np.arctan2(X,Y)
return gradient
class TemplateImage(GHTImage):
"""
Extend GHTImage by templte properties
Attributes
----------
o_image: np.array
Input image
image: np.array
Resized and blurred input image
canny: np.array
Canny Edge processed image
gradient: np.array
Gradient image
r_matrix_zero: np.array
Vector mapping of edge points to a predefined origin
r_matrix: np.array
Rotated r_matrix_zero
"""
def __init__(self, image, **kwargs):
super().__init__(image, **kwargs)
self.r_matrix_zero = self._create_r_matrix()
self.r_matrix = np.array([])
def _create_r_matrix(self):
"""
Create R-matrix from gradient image
Returns
-------
np.array
R-Matrix
"""
origin = np.asarray((self.gradient.shape[0] / 2, self.gradient.shape[1] / 2))
Rtable = []
phitable = []
[phitable.append([]) for i in range(9)]
[Rtable.append([]) for i in range(9)]
for i in range(self.gradient.shape[0]):
for j in range(self.gradient.shape[1]):
if self.canny[i, j] == 255:
phi = self.gradient[i, j]
slice = self._is_slice(phi)
phitable[slice].append(phi)
Rtable[slice].append(np.array((origin[0] - i + 1, origin[1] - j + 1)))
self.phi_table = phitable
return self._table_to_matrix(Rtable)
def rotate_r_matrix(self, angle):
"""
Rotate R-Matrix by angle rad
Params
-------
angle: float
Angle to rotate matrix in rad
"""
s = np.sin(angle)
c = np.cos(angle)
new_table = []
phitable = []
[phitable.append([]) for i in range(9)]
[new_table.append([]) for i in range(9)]
for i, islice in enumerate(self.phi_table):
for j, phi in enumerate(islice):
vec = self.r_matrix_zero[i, j]
phi_new = phi + angle
slice = self._is_slice(phi_new)
rotated = (int(round(c*vec[0]-s*vec[1])),int(round(s*vec[0]+c*vec[1])))
new_table[slice].append(rotated)
phitable[slice].append(phi_new)
self.r_matrix = self._table_to_matrix(new_table)
@staticmethod
def _table_to_matrix(table):
"""
Convert table with different column lenghts to matrix. Added entries are filled with zeros
Params
-------
table: list
table to convert to numpy array
Returns
-------
np.array
Matrix
"""
maximum = 0
for i in table:
if len(i) > maximum:
maximum = len(i)
R_matrix = np.zeros([9,maximum,2])
for i,j in enumerate(table):
for h,k in enumerate(j):
R_matrix[i][h] = k
return R_matrix
@staticmethod
def _is_slice(phi):
return int(8 * (phi + np.pi) / (2 * np.pi))
class HoughTransform:
"""
Perform Gradient Weighted General Hough Transform on the Graphics Processing Unit
Attributes
----------
rotation_min: float
Start matching template at rotation_min
rotation_max: float
End matching template at rotation_max
template: np.array
Matching template image on target image
target: np.array
Matching template image on target image
weighted: bool
Weight the GHT by Gradient density
Methods
-------
transform()
Perform GHT algorithm with given data
"""
def __init__(self, rotation_min=-10, rotation_max=10):
self.rotation = np.array([rotation_min, rotation_max]).astype(np.int16)
self.weighted = True
self.r_table = []
self.gradient_image = []
self.create_accum = mod.get_function("create_accum")
@property
def rotation_min(self):
return self.rotation[0]
@rotation_min.setter
def rotation_min(self, value):
if value>=self.rotation[1]:
raise ValueError(f"Minimum rotation {rotation_min} should be smaller than maximum rotation {rotation_max}")
self.rotation[0] = value
@property
def rotation_max(self):
return self.rotation[1]
@rotation_max.setter
def rotation_max(self, value):
if value<=self.rotation[1]:
raise ValueError(f"Minimum rotation {rotation_min} should be smaller than maximum rotation {rotation_max}")
self.rotation[1] = value
@property
def template(self):
return self._templ
@template.setter
def template(self, templ):
if not isinstance(templ, TemplateImage):
raise ValueError("Template has to be an instance of class TemplateImage")
self._templ = templ
@property
def target(self):
return self._target
@target.setter
def target(self, target):
if not isinstance(target, GHTImage):
raise ValueError("Template has to be an instance of class GHTImage")
self._target = target
self.weight_array = cv2.boxFilter(self._target.canny.astype(np.uint16) / 255, -1, (100, 100), normalize=False)
def _fast_weighted_maximas(self, accum, ratio=0.8):
maxindex = np.unravel_index(accum.argmax(), accum.shape)
candidates = np.argwhere(accum >= (accum[maxindex]*ratio))
result = []
accum_max = accum[candidates[...,0],candidates[...,1]]
weight = self.weight_array[candidates[...,0],candidates[...,1]]
for i,candidate in enumerate(candidates):
result.append(np.array((candidate[0], candidate[1], accum_max[i], weight[i], 10000*accum_max[i]/weight[i]+6*accum_max[i])))
result = np.asarray(result).astype(np.int32)
return result
def transform(self):
accum = np.zeros_like(self._target.gradient)
#allocate memmory for matrices
#Pass target gradient image to GPU
gpu_gradient_image = Matrix(self._target.gradient.astype(np.float32))
max_threads = pycuda.tools.DeviceData(dev=None).max_threads
n = max_threads/1024
if n < 1:
raise(EnvironmentError("Upgrade GPU"))
block_size = (32,32,int(n))
res = 0,np.zeros(5)
for i in range(self.rotation[1]-self.rotation[0]):
angle = i+self.rotation[0]
self._templ.rotate_r_matrix(np.pi*(angle)/180)
#self.r_table = self._rot_r_table(np.pi*(angle)/180)
#Pass empty accumulator array to GPU
gpu_accumulator_array = Matrix(accum.astype(np.int32))
try:
# Pass r-table to GPU
gpu_r_table = Matrix(self._templ.r_matrix.astype(np.int32))
except:
print("Probably r-table empty better check")
break
#Compute
grid = int(self._target.gradient.shape[0]/block_size[0])+0,int(self._target.gradient.shape[1]/block_size[1])+0,int(self._templ.r_matrix.shape[1])
self.create_accum(gpu_accumulator_array.ptr, gpu_r_table.ptr, gpu_gradient_image.ptr, block=block_size, grid=grid)
acc = gpu_accumulator_array.get()
#Weight or not weight accunulator array
if self.weighted:
weighted_acc = self._fast_weighted_maximas(acc, ratio=0.8)
else:
weighted_acc = acc
#Find maximum values(result)
x = np.unravel_index(weighted_acc[...,4].argmax(),weighted_acc.shape[0])
if weighted_acc[x,4]>res[1][4]:
res = (angle,weighted_acc[x])
return res
class Matrix:
"""
Wrapper class for matrix and matrixf struct on GPU:
"""
def __init__(self, array):
mem_size = 16 + np.intp(0).nbytes
self.ptr = drv.mem_alloc(mem_size)
self.data = drv.to_device(array)
self.shape, self.dtype = array.shape, array.dtype
self.width = array.shape[1]
self.height = array.shape[0]
self.stride = np.int32(0).nbytes
drv.memcpy_htod(int(self.ptr), np.int32(self.width))
drv.memcpy_htod(int(self.ptr)+4, np.int32(self.height))
drv.memcpy_htod(int(self.ptr)+8, np.int32(self.stride))
drv.memcpy_htod(int(self.ptr)+16, np.intp(int(self.data)))
def get(self):
#drv.memcpy_dtoh(array, self.data)
return drv.from_device(self.data, self.shape, self.dtype)
| [
"pycuda.driver.to_device",
"numpy.int32",
"numpy.array",
"numpy.arctan2",
"numpy.sin",
"numpy.asarray",
"cv2.blur",
"pycuda.driver.mem_alloc",
"numpy.intp",
"numpy.cos",
"pycuda.driver.from_device",
"cv2.resize",
"cv2.Canny",
"pycuda.compiler.SourceModule",
"pycuda.tools.DeviceData",
"... | [((2532, 2555), 'pycuda.compiler.SourceModule', 'SourceModule', (['cuda_code'], {}), '(cuda_code)\n', (2544, 2555), False, 'from pycuda.compiler import SourceModule\n'), ((2407, 2433), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2423, 2433), False, 'import os\n'), ((3198, 3297), 'cv2.resize', 'cv2.resize', (['self.o_image', '(0, 0)'], {'fx': 'self._scale', 'fy': 'self._scale', 'interpolation': 'cv2.INTER_CUBIC'}), '(self.o_image, (0, 0), fx=self._scale, fy=self._scale,\n interpolation=cv2.INTER_CUBIC)\n', (3208, 3297), False, 'import cv2\n'), ((3314, 3360), 'cv2.blur', 'cv2.blur', (['self.image', '(self._blur, self._blur)'], {}), '(self.image, (self._blur, self._blur))\n', (3322, 3360), False, 'import cv2\n'), ((3381, 3442), 'cv2.Canny', 'cv2.Canny', (['self.image', 'self._canny_lim[0]', 'self._canny_lim[1]'], {}), '(self.image, self._canny_lim[0], self._canny_lim[1])\n', (3390, 3442), False, 'import cv2\n'), ((3738, 3781), 'cv2.Sobel', 'cv2.Sobel', (['image', 'cv2.CV_64F', '(1)', '(0)'], {'ksize': '(5)'}), '(image, cv2.CV_64F, 1, 0, ksize=5)\n', (3747, 3781), False, 'import cv2\n'), ((3790, 3833), 'cv2.Sobel', 'cv2.Sobel', (['image', 'cv2.CV_64F', '(0)', '(1)'], {'ksize': '(5)'}), '(image, cv2.CV_64F, 0, 1, ksize=5)\n', (3799, 3833), False, 'import cv2\n'), ((3849, 3865), 'numpy.arctan2', 'np.arctan2', (['X', 'Y'], {}), '(X, Y)\n', (3859, 3865), True, 'import numpy as np\n'), ((4518, 4530), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4526, 4530), True, 'import numpy as np\n'), ((4720, 4788), 'numpy.asarray', 'np.asarray', (['(self.gradient.shape[0] / 2, self.gradient.shape[1] / 2)'], {}), '((self.gradient.shape[0] / 2, self.gradient.shape[1] / 2))\n', (4730, 4788), True, 'import numpy as np\n'), ((5589, 5602), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (5595, 5602), True, 'import numpy as np\n'), ((5615, 5628), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (5621, 5628), True, 'import numpy as np\n'), ((6711, 6736), 'numpy.zeros', 'np.zeros', (['[9, maximum, 2]'], {}), '([9, maximum, 2])\n', (6719, 6736), True, 'import numpy as np\n'), ((9305, 9350), 'numpy.argwhere', 'np.argwhere', (['(accum >= accum[maxindex] * ratio)'], {}), '(accum >= accum[maxindex] * ratio)\n', (9316, 9350), True, 'import numpy as np\n'), ((9809, 9845), 'numpy.zeros_like', 'np.zeros_like', (['self._target.gradient'], {}), '(self._target.gradient)\n', (9822, 9845), True, 'import numpy as np\n'), ((11795, 11818), 'pycuda.driver.mem_alloc', 'drv.mem_alloc', (['mem_size'], {}), '(mem_size)\n', (11808, 11818), True, 'import pycuda.driver as drv\n'), ((11839, 11859), 'pycuda.driver.to_device', 'drv.to_device', (['array'], {}), '(array)\n', (11852, 11859), True, 'import pycuda.driver as drv\n'), ((12365, 12415), 'pycuda.driver.from_device', 'drv.from_device', (['self.data', 'self.shape', 'self.dtype'], {}), '(self.data, self.shape, self.dtype)\n', (12380, 12415), True, 'import pycuda.driver as drv\n'), ((10032, 10065), 'pycuda.tools.DeviceData', 'pycuda.tools.DeviceData', ([], {'dev': 'None'}), '(dev=None)\n', (10055, 10065), False, 'import pycuda\n'), ((10229, 10240), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (10237, 10240), True, 'import numpy as np\n'), ((12013, 12024), 'numpy.int32', 'np.int32', (['(0)'], {}), '(0)\n', (12021, 12024), True, 'import numpy as np\n'), ((12071, 12091), 'numpy.int32', 'np.int32', (['self.width'], {}), '(self.width)\n', (12079, 12091), True, 'import numpy as np\n'), ((12134, 12155), 'numpy.int32', 'np.int32', (['self.height'], {}), '(self.height)\n', (12142, 12155), True, 'import numpy as np\n'), ((12198, 12219), 'numpy.int32', 'np.int32', (['self.stride'], {}), '(self.stride)\n', (12206, 12219), True, 'import numpy as np\n'), ((7635, 7673), 'numpy.array', 'np.array', (['[rotation_min, rotation_max]'], {}), '([rotation_min, rotation_max])\n', (7643, 7673), True, 'import numpy as np\n'), ((9582, 9702), 'numpy.array', 'np.array', (['(candidate[0], candidate[1], accum_max[i], weight[i], 10000 * accum_max[i] /\n weight[i] + 6 * accum_max[i])'], {}), '((candidate[0], candidate[1], accum_max[i], weight[i], 10000 *\n accum_max[i] / weight[i] + 6 * accum_max[i]))\n', (9590, 9702), True, 'import numpy as np\n'), ((9709, 9727), 'numpy.asarray', 'np.asarray', (['result'], {}), '(result)\n', (9719, 9727), True, 'import numpy as np\n'), ((11758, 11768), 'numpy.intp', 'np.intp', (['(0)'], {}), '(0)\n', (11765, 11768), True, 'import numpy as np\n'), ((5252, 5300), 'numpy.array', 'np.array', (['(origin[0] - i + 1, origin[1] - j + 1)'], {}), '((origin[0] - i + 1, origin[1] - j + 1))\n', (5260, 5300), True, 'import numpy as np\n')] |
import numpy as np
from typing import List, Set
from .model_controllers import AdaptiveController
from .controller_utils import ModelResults
class AdaptiveSelector:
def __init__(self, controllers: List[AdaptiveController], model_valid_results: List[ModelResult]):
self._controllers = controllers
self._valid_results = valid_results
# Get the budgets for all controllers
budget_set: Set[float] = set()
for controller in controllers:
budget_set.extend(controller.budgets)
budgets = np.array(list(sorted(budget_set)))
self._budgets = budgets
# Create the policy
self._budget_dict: Dict[float, AdaptiveController] = dict() # Holds budget to model index map
for budget in budgets:
max_accuracy = 0.0
best_controller = None
for controller, valid_results in zip(controllers, model_valid_results):
if budget in controller.budgets:
accuracy = controller.get_accuracy(budget=budget, model_results=valid_results)
if accuracy > max_accuracy:
max_accuracy = accuracy
best_controller = controller
assert best_controller is not None, 'Could not find controller for budget: {0}'.format(budget)
self._budget_dict[budget] = best_controller
def get_controller(self, budget: float) -> AdaptiveController:
budget_diff = np.abs(self._budgets - budget)
model_idx = np.argmin(budget_diff)
nearest_budget = self._budgets[budget]
return self._budget_dict[nearest_budget]
| [
"numpy.argmin",
"numpy.abs"
] | [((1500, 1530), 'numpy.abs', 'np.abs', (['(self._budgets - budget)'], {}), '(self._budgets - budget)\n', (1506, 1530), True, 'import numpy as np\n'), ((1551, 1573), 'numpy.argmin', 'np.argmin', (['budget_diff'], {}), '(budget_diff)\n', (1560, 1573), True, 'import numpy as np\n')] |
from Beam import Beam
from OpticalElement import Optical_element
from Shape import BoundaryRectangle
import numpy as np
import matplotlib.pyplot as plt
from numpy.testing import assert_almost_equal
from Vector import Vector
fx=0.5
fz=0.5
beam=Beam(5000)
#beam.set_divergences_collimated()
#beam.set_rectangular_spot(1.,-1.,1.,-1.)
beam.set_flat_divergence(0.05,0.05)
beam.plot_xz()
beam.plot_xpzp()
lens=Optical_element()
lens.set_parameters(p=2.,q=5.)
beam=lens.trace_ideal_lens(beam)
beam.plot_xz()
hyp=Optical_element.initialize_my_hyperboloid(p=5-np.sqrt(2),q=np.sqrt(2),theta=0)
beam=hyp.trace_optical_element(beam)
beam.plot_xz()
plt.show() | [
"Beam.Beam",
"numpy.sqrt",
"matplotlib.pyplot.show",
"OpticalElement.Optical_element"
] | [((246, 256), 'Beam.Beam', 'Beam', (['(5000)'], {}), '(5000)\n', (250, 256), False, 'from Beam import Beam\n'), ((410, 427), 'OpticalElement.Optical_element', 'Optical_element', ([], {}), '()\n', (425, 427), False, 'from OpticalElement import Optical_element\n'), ((647, 657), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (655, 657), True, 'import matplotlib.pyplot as plt\n'), ((573, 583), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (580, 583), True, 'import numpy as np\n'), ((560, 570), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (567, 570), True, 'import numpy as np\n')] |
def calculate_line_diff():
# SAME LINE
line1_start = (41.88695, -87.63248)
line1_end = (41.88692, -87.63539)
line2_start = (41.88695, -87.62951)
line2_end = (41.88695, -87.63248)
# NOT THE SAME LINE
# line1_start = (41.87523, -87.64807)
# line1_end = (41.8777, -87.64545)
# line2_start = (41.87437, -87.64243)
# line2_end = (41.87432, -87.64711)
tf_x1 = line1_start[0]
tf_x2 = line1_end[0]
tf_y1 = line1_start[1]
tf_y2 = line1_end[1]
rn_x1 = line2_start[0]
rn_x2 = line2_end[0]
rn_y1 = line2_start[1]
rn_y2 = line2_end[1]
tf_m = (tf_x2 - tf_x1) / (tf_y2 - tf_y1)
rn_m = (rn_x2 - rn_x1) / (rn_y2 - rn_y1)
if tf_m < 0:
tf_m = -(tf_m)
if rn_m < 0:
rn_m = -(rn_m)
diff = tf_m - rn_m
# if diff > 0.05:
# print('DIFF')
# else:
# print('SAME')
# print(tf_m)
# print(rn_m)
############################################################################################################
############################################################################################################
############################################################################################################
from math import atan2, cos, sin, degrees
def verify_angle():
# lat1 = line1_start[0]
# lon1 = line1_start[1]
# lat2 = line1_end[0]
# lon2 = line1_end[1]
lat1 = line2_start[0]
lon1 = line2_start[1]
lat2 = line2_end[0]
lon2 = line2_end[1]
angle = atan2(cos(lat1)*sin(lat2)-sin(lat1) *
cos(lat2)*cos(lon2-lon1), sin(lon2-lon1)*cos(lat2))
bearing = (degrees(angle) + 360) % 360
print(bearing)
############################################################################################################
############################################################################################################
############################################################################################################
from scipy import stats
import numpy as np
import matplotlib.pyplot as plt
def measure(n):
"Measurement model, return two coupled measurements."
m1 = np.random.normal(size=n)
m2 = np.random.normal(scale=0.5, size=n)
return m1+m2, m1-m2
def calculate_kde():
m1, m2 = measure(2000)
xmin = m1.min()
xmax = m1.max()
ymin = m2.min()
ymax = m2.max()
print(m1)
print(m2)
X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([m1, m2])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
fig, ax = plt.subplots()
ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
extent=[xmin, xmax, ymin, ymax])
ax.plot(m1, m2, 'k.', markersize=2)
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
plt.show()
lats = [41.80057462, 41.803056742, 41.807783124, 41.955842185, 41.925399981, 41.940909163, 41.927006879, 41.758259068, 41.756497643, 41.736613967]
lons = [-87.589225075, -87.603607686, -87.592093307, -87.650268135, -87.658559102, -87.63936916, -87.639020687, -87.615264202, -87.613695282, -87.61444973]
def my_kde():
xmin, xmax = min(lats), max(lats)
ymin, ymax = min(lons), max(lons)
X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([lats, lons])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
print(kernel.pdf([41.87744754022766, -87.64837510877838]))
print(Z)
print(np.amax(Z))
print(np.amin(Z))
# fig = plt.figure()
# ax = fig.add_subplot(111)
# ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,extent=[xmin, xmax, ymin, ymax])
# ax.plot(lats, lons, 'k.', markersize=2)
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([ymin, ymax])
# plt.show()
# my_kde()
############################################################################################################
############################################################################################################
############################################################################################################
from sklearn.neighbors import KernelDensity
def calculate():
x = [[41.80057462, 41.803056742, 41.807783124, 41.955842185, 41.925399981, 41.940909163, 41.927006879, 41.758259068, 41.756497643, 41.736613967],
[-87.589225075, -87.603607686, -87.592093307, -87.650268135, -87.658559102, -87.63936916, -87.639020687, -87.615264202, -87.613695282, -87.61444973]]
kde = KernelDensity(bandwidth=1.0, kernel='gaussian')
kde.fit(x[:, None])
xd = [(41.87744754022766, -87.64837510877838)]
logprob = kde.score_samples(x_d[:, None])
print(logprob)
#calculate()
############################################################################################################
############################################################################################################
############################################################################################################
import multiprocessing as mp
import time
def callable_func(param):
time.sleep(5)
print(param)
time.sleep(5)
print('Leaving {0}'.format(param))
def main_mp():
processes = []
for i in range(15):
processes.append(mp.Process(target=callable_func, args=[i]))
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
# main_mp() | [
"numpy.random.normal",
"scipy.stats.gaussian_kde",
"numpy.amin",
"multiprocessing.Process",
"math.degrees",
"sklearn.neighbors.KernelDensity",
"time.sleep",
"math.cos",
"numpy.vstack",
"numpy.rot90",
"math.sin",
"numpy.amax",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((2039, 2063), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n'}), '(size=n)\n', (2055, 2063), True, 'import numpy as np\n'), ((2070, 2105), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.5)', 'size': 'n'}), '(scale=0.5, size=n)\n', (2086, 2105), True, 'import numpy as np\n'), ((2372, 2391), 'numpy.vstack', 'np.vstack', (['[m1, m2]'], {}), '([m1, m2])\n', (2381, 2391), True, 'import numpy as np\n'), ((2402, 2428), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['values'], {}), '(values)\n', (2420, 2428), False, 'from scipy import stats\n'), ((2487, 2501), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2499, 2501), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2690), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2688, 2690), True, 'import matplotlib.pyplot as plt\n'), ((3188, 3211), 'numpy.vstack', 'np.vstack', (['[lats, lons]'], {}), '([lats, lons])\n', (3197, 3211), True, 'import numpy as np\n'), ((3222, 3248), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['values'], {}), '(values)\n', (3240, 3248), False, 'from scipy import stats\n'), ((4367, 4414), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'bandwidth': '(1.0)', 'kernel': '"""gaussian"""'}), "(bandwidth=1.0, kernel='gaussian')\n", (4380, 4414), False, 'from sklearn.neighbors import KernelDensity\n'), ((4958, 4971), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4968, 4971), False, 'import time\n'), ((4987, 5000), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4997, 5000), False, 'import time\n'), ((2513, 2524), 'numpy.rot90', 'np.rot90', (['Z'], {}), '(Z)\n', (2521, 2524), True, 'import numpy as np\n'), ((3373, 3383), 'numpy.amax', 'np.amax', (['Z'], {}), '(Z)\n', (3380, 3383), True, 'import numpy as np\n'), ((3392, 3402), 'numpy.amin', 'np.amin', (['Z'], {}), '(Z)\n', (3399, 3402), True, 'import numpy as np\n'), ((1474, 1490), 'math.sin', 'sin', (['(lon2 - lon1)'], {}), '(lon2 - lon1)\n', (1477, 1490), False, 'from math import atan2, cos, sin, degrees\n'), ((1489, 1498), 'math.cos', 'cos', (['lat2'], {}), '(lat2)\n', (1492, 1498), False, 'from math import atan2, cos, sin, degrees\n'), ((1512, 1526), 'math.degrees', 'degrees', (['angle'], {}), '(angle)\n', (1519, 1526), False, 'from math import atan2, cos, sin, degrees\n'), ((5112, 5154), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'callable_func', 'args': '[i]'}), '(target=callable_func, args=[i])\n', (5122, 5154), True, 'import multiprocessing as mp\n'), ((1411, 1420), 'math.cos', 'cos', (['lat1'], {}), '(lat1)\n', (1414, 1420), False, 'from math import atan2, cos, sin, degrees\n'), ((1421, 1430), 'math.sin', 'sin', (['lat2'], {}), '(lat2)\n', (1424, 1430), False, 'from math import atan2, cos, sin, degrees\n'), ((1458, 1474), 'math.cos', 'cos', (['(lon2 - lon1)'], {}), '(lon2 - lon1)\n', (1461, 1474), False, 'from math import atan2, cos, sin, degrees\n'), ((1431, 1440), 'math.sin', 'sin', (['lat1'], {}), '(lat1)\n', (1434, 1440), False, 'from math import atan2, cos, sin, degrees\n'), ((1448, 1457), 'math.cos', 'cos', (['lat2'], {}), '(lat2)\n', (1451, 1457), False, 'from math import atan2, cos, sin, degrees\n')] |
import numpy as np
import sys
lines = []
for line in sys.stdin:
lines.append(line.rstrip('\n'))
n = int(lines[0])
count = 1
for i in range(n):
size = int(lines[count])
count += 1
m = [[int(x) for x in lin.split()] for lin in lines[count:count+size]]
m = np.array(m)
k = np.trace(m)
r = sum(0 if len(set(x)) == size else 1 for x in m)
c = sum(0 if len(set(x)) == size else 1 for x in m.T)
print("Case #"+str(i+1)+": ", k, r, c)
# print(m)
count += size
| [
"numpy.array",
"numpy.trace"
] | [((278, 289), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (286, 289), True, 'import numpy as np\n'), ((298, 309), 'numpy.trace', 'np.trace', (['m'], {}), '(m)\n', (306, 309), True, 'import numpy as np\n')] |
"""Implementation of parallel computation of the
velocity integrals as a function of the integral
variable y from the Gordeyev integral.
"""
import ctypes
import multiprocessing as mp
from functools import partial
import numpy as np
import scipy.integrate as si
from isr_spectrum.inputs import config as cf
def integrand(y, params, v, f):
"""Integrate from `0` to `V_MAX` with an integrand on
the form `e^{-iwt}f(t)`, for every value in the np.ndarray `y`.
Arguments:
y {np.ndarray} -- sample points of integration variable
from Gordeyev integral
params {dict} -- plasma parameters
v {np.ndarray} -- sample points of VDF
f {np.ndarray} -- value of VDF at sample points
Returns:
np.ndarray -- the value of the velocity integral at every
sample of the integration variable
"""
idx = set(enumerate(y))
func = partial(parallel, params, v, f)
pool = mp.Pool()
pool.map(func, idx)
pool.close()
return array
def parallel(params, v, f, index):
array[index[0]] = v_int_integrand(index[1], params, v, f)
# Velocity integral $\label{lst:velocity}$
def v_int_integrand(y, params, v, f):
sin = np.sin(p(y, params) * v)
val = v * sin * f
res = si.simps(val, v)
return res
def p(y, params):
"""From Mace [2003].
Args:
y {np.ndarray} -- parameter from Gordeyev integral
params {dict} -- plasma parameters
Returns:
np.ndarray -- value of the `p` function
"""
k_perp = params["K_RADAR"] * np.sin(params["THETA"])
k_par = params["K_RADAR"] * np.cos(params["THETA"])
return (
2 * k_perp ** 2 / params["w_c"] ** 2 * (1 - np.cos(y * params["w_c"]))
+ k_par ** 2 * y ** 2
) ** 0.5
def shared_array(shape):
"""
Form a shared memory numpy array.
https://tinyurl.com/c9m75k2
"""
shared_array_base = mp.Array(ctypes.c_double, shape[0])
shared_arr = np.ctypeslib.as_array(shared_array_base.get_obj())
shared_arr = shared_arr.view(np.double).reshape(*shape)
return shared_arr
# Y_N_POINTS = $N_y$
array = shared_array((int(cf.Y_N_POINTS),))
| [
"multiprocessing.Array",
"scipy.integrate.simps",
"functools.partial",
"multiprocessing.Pool",
"numpy.cos",
"numpy.sin"
] | [((901, 932), 'functools.partial', 'partial', (['parallel', 'params', 'v', 'f'], {}), '(parallel, params, v, f)\n', (908, 932), False, 'from functools import partial\n'), ((944, 953), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (951, 953), True, 'import multiprocessing as mp\n'), ((1261, 1277), 'scipy.integrate.simps', 'si.simps', (['val', 'v'], {}), '(val, v)\n', (1269, 1277), True, 'import scipy.integrate as si\n'), ((1908, 1943), 'multiprocessing.Array', 'mp.Array', (['ctypes.c_double', 'shape[0]'], {}), '(ctypes.c_double, shape[0])\n', (1916, 1943), True, 'import multiprocessing as mp\n'), ((1554, 1577), 'numpy.sin', 'np.sin', (["params['THETA']"], {}), "(params['THETA'])\n", (1560, 1577), True, 'import numpy as np\n'), ((1610, 1633), 'numpy.cos', 'np.cos', (["params['THETA']"], {}), "(params['THETA'])\n", (1616, 1633), True, 'import numpy as np\n'), ((1699, 1724), 'numpy.cos', 'np.cos', (["(y * params['w_c'])"], {}), "(y * params['w_c'])\n", (1705, 1724), True, 'import numpy as np\n')] |
import glob
import importlib
import os
import unittest
import SimpleITK as sitk
import numpy as np
import sys
import seg_metrics.seg_metrics as sg
from parameterized import parameterized
from medutils.medutils import save_itk
import tempfile
SUFFIX_LS = {".mhd", ".mha", ".nrrd", ".nii", ".nii.gz"}
TEST_CASE1 = [{
"IMG": np.random.randint(low=-1500, high=1500, size=(512, 512, 200)),
"ORIGIN": np.array([-192.345, 129.023, 1100]),
"SPACING": np.array([0.602, 0.602, 0.3]),
"ORIENTATION": np.array([1, 0, 0, 0, 1, 0, 0, 0, 1])}]
TEST_CASE2 = [{
"IMG": np.random.randint(low=-1500, high=1500, size=(512, 512)),
"ORIGIN": np.array([-192.345, 129.023]),
"SPACING": np.array([0.602, 0.602]),
"ORIENTATION": np.array([1, 0, 0, 1])}]
class Test_seg_metrics(unittest.TestCase):
@parameterized.expand([TEST_CASE1, TEST_CASE2])
def test_save_and_load(self, image):
with tempfile.TemporaryDirectory() as tempdir:
for suffix in SUFFIX_LS:
img_fpath = os.path.join(tempdir, 'test_img' + suffix)
# print('img', image['IMG'].shape)
save_itk(img_fpath, image['IMG'], image['ORIGIN'], image['SPACING']) # save image
load_img, load_origin, load_spacing = sg.load_itk(img_fpath, require_ori_sp=True) # load image
# print(SUFFIX_LS)
# print('suffix', suffix)
# print('load_origin', load_origin)
# print('image[ORIGIN]', image['ORIGIN'])
self.assertIsNone(np.testing.assert_allclose(load_img, image['IMG']))
self.assertIsNone(np.testing.assert_allclose(load_origin, image['ORIGIN']))
self.assertIsNone(np.testing.assert_allclose(load_spacing, image['SPACING']))
if __name__ == '__main__':
unittest.main() | [
"tempfile.TemporaryDirectory",
"parameterized.parameterized.expand",
"numpy.testing.assert_allclose",
"os.path.join",
"numpy.array",
"numpy.random.randint",
"seg_metrics.seg_metrics.load_itk",
"medutils.medutils.save_itk",
"unittest.main"
] | [((780, 826), 'parameterized.parameterized.expand', 'parameterized.expand', (['[TEST_CASE1, TEST_CASE2]'], {}), '([TEST_CASE1, TEST_CASE2])\n', (800, 826), False, 'from parameterized import parameterized\n'), ((1786, 1801), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1799, 1801), False, 'import unittest\n'), ((324, 385), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(-1500)', 'high': '(1500)', 'size': '(512, 512, 200)'}), '(low=-1500, high=1500, size=(512, 512, 200))\n', (341, 385), True, 'import numpy as np\n'), ((397, 432), 'numpy.array', 'np.array', (['[-192.345, 129.023, 1100]'], {}), '([-192.345, 129.023, 1100])\n', (405, 432), True, 'import numpy as np\n'), ((445, 474), 'numpy.array', 'np.array', (['[0.602, 0.602, 0.3]'], {}), '([0.602, 0.602, 0.3])\n', (453, 474), True, 'import numpy as np\n'), ((491, 528), 'numpy.array', 'np.array', (['[1, 0, 0, 0, 1, 0, 0, 0, 1]'], {}), '([1, 0, 0, 0, 1, 0, 0, 0, 1])\n', (499, 528), True, 'import numpy as np\n'), ((555, 611), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(-1500)', 'high': '(1500)', 'size': '(512, 512)'}), '(low=-1500, high=1500, size=(512, 512))\n', (572, 611), True, 'import numpy as np\n'), ((623, 652), 'numpy.array', 'np.array', (['[-192.345, 129.023]'], {}), '([-192.345, 129.023])\n', (631, 652), True, 'import numpy as np\n'), ((665, 689), 'numpy.array', 'np.array', (['[0.602, 0.602]'], {}), '([0.602, 0.602])\n', (673, 689), True, 'import numpy as np\n'), ((706, 728), 'numpy.array', 'np.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (714, 728), True, 'import numpy as np\n'), ((881, 910), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (908, 910), False, 'import tempfile\n'), ((988, 1030), 'os.path.join', 'os.path.join', (['tempdir', "('test_img' + suffix)"], {}), "(tempdir, 'test_img' + suffix)\n", (1000, 1030), False, 'import os\n'), ((1098, 1166), 'medutils.medutils.save_itk', 'save_itk', (['img_fpath', "image['IMG']", "image['ORIGIN']", "image['SPACING']"], {}), "(img_fpath, image['IMG'], image['ORIGIN'], image['SPACING'])\n", (1106, 1166), False, 'from medutils.medutils import save_itk\n'), ((1236, 1279), 'seg_metrics.seg_metrics.load_itk', 'sg.load_itk', (['img_fpath'], {'require_ori_sp': '(True)'}), '(img_fpath, require_ori_sp=True)\n', (1247, 1279), True, 'import seg_metrics.seg_metrics as sg\n'), ((1515, 1565), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['load_img', "image['IMG']"], {}), "(load_img, image['IMG'])\n", (1541, 1565), True, 'import numpy as np\n'), ((1601, 1657), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['load_origin', "image['ORIGIN']"], {}), "(load_origin, image['ORIGIN'])\n", (1627, 1657), True, 'import numpy as np\n'), ((1693, 1751), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['load_spacing', "image['SPACING']"], {}), "(load_spacing, image['SPACING'])\n", (1719, 1751), True, 'import numpy as np\n')] |
"""
In vivo prediction robustness and first PC's
"""
import os
import string
import pandas as pd
import seaborn as sns
import numpy as np
from .FigureCommon import getSetup, Legend, subplotLabel
from ..StoneModMouseFit import InVivoPredict
def makeFigure():
# Get list of axis objects
ax, f = getSetup((6, 3), (1, 2))
# Make FcgR expression plot
FcgRexpression(ax[0])
# Make the robustness plot
robustnessPlot(ax[1])
# Add subplot labels
for ii, item in enumerate(ax):
subplotLabel(item, string.ascii_uppercase[ii])
# Tweak layout
f.tight_layout()
return f
def FcgRexpression(ax):
""" Calculate robustness or load it. """
filepath = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"../data/murine-FcgR-abundance.csv")
data = pd.melt(pd.read_csv(filepath), id_vars=['Cells'])
data['Receptor'] = data.variable.str.extract('(R[1234])', expand=False)
data.drop('variable', inplace=True, axis=1)
# Setup replacement dict
FcsIDX = {'R1': r'mFc$\gamma$RI',
'R2': r'mFc$\gamma$RIIB',
'R3': r'mFc$\gamma$RIII',
'R4': r'mFc$\gamma$RIV'}
# Do replacement for receptors
data.replace({"Receptor": FcsIDX}, inplace=True)
sns.factorplot(x="Cells", y="value", hue="Receptor",
data=data, kind="bar", ax=ax, ci=63)
ax.set_ylabel(r'Fc$\gamma$R Expression')
ax.set_xlabel('')
ax.set_xticklabels(ax.get_xticklabels(), rotation=40, rotation_mode="anchor", ha="right")
def robustnessPlot(ax):
""" Vary IC concentration and avidity and show the prediction still stands. """
# Setup the range of avidity and ligand concentration we'll look at
gnus = np.logspace(1, 3, 3, base=2, dtype=np.int)
Los = np.logspace(start=-11, stop=-7, num=35, dtype=np.float)
pp = pd.DataFrame(np.array(np.meshgrid(gnus, Los)).T.reshape(-1, 2),
columns=['gnus', 'Los'])
pp['CPredict'] = pp.apply(lambda x: InVivoPredict(x.values)[1], axis=1)
# Change avidities to strings
pp['gnus'] = pp['gnus'].apply(lambda gnu: r'$\nu=' + str(int(gnu)) + '$')
avcolors = dict(zip(pp['gnus'].unique(), sns.color_palette()[1:]))
# Plot the calculated crossvalidation performance
sns.FacetGrid(pp,
hue='gnus',
palette=sns.color_palette()[1:]).map(ax.semilogx, 'Los', 'CPredict')
ax.legend(handles=Legend(pp['gnus'].unique(), avcolors, [], {}), bbox_to_anchor=(1, 1), loc=2)
ax.set_xlabel('Assumed IC Conc. (M)')
ax.set_ylabel('LOO Prediction R-Squared')
ax.set_ylim(0.0, 1.0)
| [
"seaborn.factorplot",
"seaborn.color_palette",
"pandas.read_csv",
"os.path.abspath",
"numpy.meshgrid",
"numpy.logspace"
] | [((1292, 1385), 'seaborn.factorplot', 'sns.factorplot', ([], {'x': '"""Cells"""', 'y': '"""value"""', 'hue': '"""Receptor"""', 'data': 'data', 'kind': '"""bar"""', 'ax': 'ax', 'ci': '(63)'}), "(x='Cells', y='value', hue='Receptor', data=data, kind='bar',\n ax=ax, ci=63)\n", (1306, 1385), True, 'import seaborn as sns\n'), ((1757, 1799), 'numpy.logspace', 'np.logspace', (['(1)', '(3)', '(3)'], {'base': '(2)', 'dtype': 'np.int'}), '(1, 3, 3, base=2, dtype=np.int)\n', (1768, 1799), True, 'import numpy as np\n'), ((1810, 1865), 'numpy.logspace', 'np.logspace', ([], {'start': '(-11)', 'stop': '(-7)', 'num': '(35)', 'dtype': 'np.float'}), '(start=-11, stop=-7, num=35, dtype=np.float)\n', (1821, 1865), True, 'import numpy as np\n'), ((844, 865), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {}), '(filepath)\n', (855, 865), True, 'import pandas as pd\n'), ((731, 756), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (746, 756), False, 'import os\n'), ((2223, 2242), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (2240, 2242), True, 'import seaborn as sns\n'), ((1898, 1920), 'numpy.meshgrid', 'np.meshgrid', (['gnus', 'Los'], {}), '(gnus, Los)\n', (1909, 1920), True, 'import numpy as np\n'), ((2382, 2401), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (2399, 2401), True, 'import seaborn as sns\n')] |
import warnings
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm, tqdm_notebook
from itertools import chain
import qucumber.cplx as cplx
__all__ = [
"RBM_Module",
"BinomialRBM"
]
class RBM_Module(nn.Module):
def __init__(self, num_visible, num_hidden, zero_weights=False,
gpu=True, seed=1234):
super(RBM_Module, self).__init__()
self.num_visible = int(num_visible)
self.num_hidden = int(num_hidden)
if gpu and not torch.cuda.is_available():
warnings.warn("Could not find GPU: will continue with CPU.",
ResourceWarning)
self.gpu = gpu and torch.cuda.is_available()
if seed:
if self.gpu:
torch.cuda.manual_seed(seed)
else:
torch.manual_seed(seed)
self.device = torch.device('cuda') if self.gpu else torch.device('cpu')
if zero_weights:
self.weights = nn.Parameter((torch.zeros(self.num_hidden,
self.num_visible,
device=self.device,
dtype=torch.double)),
requires_grad=True)
else:
self.weights = nn.Parameter(
(torch.randn(self.num_hidden, self.num_visible,
device=self.device, dtype=torch.double)
/ np.sqrt(self.num_visible)),
requires_grad=True)
self.visible_bias = nn.Parameter(torch.zeros(self.num_visible,
device=self.device,
dtype=torch.double),
requires_grad=True)
self.hidden_bias = nn.Parameter(torch.zeros(self.num_hidden,
device=self.device,
dtype=torch.double),
requires_grad=True)
def __repr__(self):
return ("RBM_Module(num_visible={}, num_hidden={}, gpu={})"
.format(self.num_visible, self.num_hidden, self.gpu))
def effective_energy(self, v):
r"""The effective energies of the given visible states.
.. math::
\mathcal{E}(\bm{v}) &= \sum_{j}b_j v_j
+ \sum_{i}\log
\left\lbrack 1 +
\exp\left(c_{i} + \sum_{j} W_{ij} v_j\right)
\right\rbrack
:param v: The visible states.
:type v: torch.doubleTensor
:returns: The effective energies of the given visible states.
:rtype: torch.doubleTensor
"""
if len(v.shape) < 2:
v = v.view(1, -1)
visible_bias_term = torch.mv(v, self.visible_bias)
hidden_bias_term = F.softplus(
F.linear(v, self.weights, self.hidden_bias)
).sum(1)
return visible_bias_term + hidden_bias_term
def prob_v_given_h(self, h):
"""Given a hidden unit configuration, compute the probability
vector of the visible units being on.
:param h: The hidden unit
:type h: torch.doubleTensor
:returns: The probability of visible units being active given the
hidden state.
:rtype: torch.doubleTensor
"""
p = F.sigmoid(F.linear(h, self.weights.t(), self.visible_bias))
return p
def prob_h_given_v(self, v):
"""Given a visible unit configuration, compute the probability
vector of the hidden units being on.
:param h: The hidden unit.
:type h: torch.doubleTensor
:returns: The probability of hidden units being active given the
visible state.
:rtype: torch.doubleTensor
"""
p = F.sigmoid(F.linear(v, self.weights, self.hidden_bias))
return p
def sample_v_given_h(self, h):
"""Sample/generate a visible state given a hidden state.
:param h: The hidden state.
:type h: torch.doubleTensor
:returns: Tuple containing prob_v_given_h(h) and the sampled visible
state.
:rtype: tuple(torch.doubleTensor, torch.doubleTensor)
"""
p = self.prob_v_given_h(h)
v = p.bernoulli()
return p, v
def sample_h_given_v(self, v):
"""Sample/generate a hidden state given a visible state.
:param h: The visible state.
:type h: torch.doubleTensor
:returns: Tuple containing prob_h_given_v(v) and the sampled hidden
state.
:rtype: tuple(torch.doubleTensor, torch.doubleTensor)
"""
p = self.prob_h_given_v(v)
h = p.bernoulli()
return p, h
def gibbs_sampling(self, k, v0):
"""Performs k steps of Block Gibbs sampling given an initial visible
state v0.
:param k: Number of Block Gibbs steps.
:type k: int
:param v0: The initial visible state.
:type v0: torch.doubleTensor
:returns: Tuple containing the initial visible state, v0,
the hidden state sampled from v0,
the visible state sampled after k steps,
the hidden state sampled after k steps and its corresponding
probability vector.
:rtype: tuple(torch.doubleTensor, torch.doubleTensor,
torch.doubleTensor, torch.doubleTensor,
torch.doubleTensor)
"""
ph, h0 = self.sample_h_given_v(v0)
v, h = v0, h0
for _ in range(k):
pv, v = self.sample_v_given_h(h)
ph, h = self.sample_h_given_v(v)
return v0, h0, v, h, ph
def sample(self, num_samples, k=10):
"""Samples from the RBM using k steps of Block Gibbs sampling.
:param num_samples: The number of samples to be generated
:type num_samples: int
:param k: Number of Block Gibbs steps.
:type k: int
:returns: Samples drawn from the RBM
:rtype: torch.doubleTensor
"""
dist = torch.distributions.bernoulli.Bernoulli(probs=0.5)
v0 = (dist.sample(torch.Size([num_samples, self.num_visible]))
.to(device=self.device, dtype=torch.double))
_, _, v, _, _ = self.gibbs_sampling(k, v0)
return v
def unnormalized_probability(self, v):
r"""The unnormalized probabilities of the given visible states.
.. math:: p(\bm{v}) = e^{\mathcal{E}(\bm{v})}
:param v: The visible states.
:type v: torch.doubleTensor
:returns: The unnormalized probability of the given visible state(s).
:rtype: torch.doubleTensor
"""
return self.effective_energy(v).exp()
def generate_visible_space(self):
"""Generates all possible visible states.
:returns: A tensor of all possible spin configurations.
:rtype: torch.doubleTensor
"""
space = torch.zeros((1 << self.num_visible, self.num_visible),
device=self.device, dtype=torch.double)
for i in range(1 << self.num_visible):
d = i
for j in range(self.num_visible):
d, r = divmod(d, 2)
space[i, self.num_visible - j - 1] = int(r)
return space
def log_partition(self, visible_space):
"""The natural logarithm of the partition function of the RBM.
:param visible_space: A rank 2 tensor of the entire visible space.
:type visible_space: torch.doubleTensor
:returns: The natural log of the partition function.
:rtype: torch.doubleTensor
"""
free_energies = self.effective_energy(visible_space)
max_free_energy = free_energies.max()
f_reduced = free_energies - max_free_energy
logZ = max_free_energy + f_reduced.exp().sum().log()
return logZ
def partition(self, visible_space):
"""The partition function of the RBM.
:param visible_space: A rank 2 tensor of the entire visible space.
:type visible_space: torch.doubleTensor
:returns: The partition function.
:rtype: torch.doubleTensor
"""
return self.log_partition(visible_space).exp()
def probability(self, v, Z):
"""Evaluates the probability of the given vector(s) of visible
units; NOT RECOMMENDED FOR RBMS WITH A LARGE # OF VISIBLE UNITS
:param v: The visible states.
:type v: torch.doubleTensor
:param Z: The partition function.
:type Z: float
:returns: The probability of the given vector(s) of visible units.
:rtype: torch.doubleTensor
"""
return self.unnormalized_probability(v) / Z
class BinomialRBM(nn.Module):
def __init__(self, num_visible, num_hidden=None, gpu=True, seed=1234):
super(BinomialRBM, self).__init__()
self.num_visible = int(num_visible)
self.num_hidden = int(num_hidden) if num_hidden is not None else self.num_visible
self.rbm_module = RBM_Module(self.num_visible, self.num_hidden,
gpu=gpu, seed=seed)
self.stop_training = False
def save(self, location, metadata={}):
"""Saves the RBM parameters to the given location along with
any given metadata.
:param location: The location to save the RBM parameters + metadata
:type location: str or file
:param metadata: Any extra metadata to store alongside the RBM
parameters
:type metadata: dict
"""
# add extra metadata to dictionary before saving it to disk
data = {**self.state_dict(), **metadata}
torch.save(data, location)
def load(self, location):
"""Loads the RBM parameters from the given location ignoring any
metadata stored in the file. Overwrites the RBM's parameters.
.. note::
The RBM object on which this function is called must
have the same shape as the one who's parameters are being
loaded.
:param location: The location to load the RBM parameters from
:type location: str or file
"""
self.load_state_dict(torch.load(location), strict=False)
def compute_batch_gradients(self, k, pos_batch, neg_batch):
"""This function will compute the gradients of a batch of the training
data (data_file) given the basis measurements (chars_file).
:param k: Number of contrastive divergence steps in training.
:type k: int
:param pos_batch: Batch of the input data for the positive phase.
:type pos_batch: |DoubleTensor|
:param neg_batch: Batch of the input data for the negative phase.
:type neg_batch: |DoubleTensor|
:returns: Dictionary containing all the gradients of the parameters.
:rtype: dict
"""
v0, _, _, _, _ = self.rbm_module.gibbs_sampling(k, pos_batch)
_, _, vk, hk, phk = self.rbm_module.gibbs_sampling(k, neg_batch)
prob = F.sigmoid(F.linear(v0, self.rbm_module.weights,
self.rbm_module.hidden_bias))
pos_batch_size = float(len(pos_batch))
neg_batch_size = float(len(neg_batch))
w_grad = torch.einsum("ij,ik->jk", (prob, v0))/pos_batch_size
vb_grad = torch.einsum("ij->j", (v0,))/pos_batch_size
hb_grad = torch.einsum("ij->j", (prob,))/pos_batch_size
w_grad -= torch.einsum("ij,ik->jk", (phk, vk))/neg_batch_size
vb_grad -= torch.einsum("ij->j", (vk,))/neg_batch_size
hb_grad -= torch.einsum("ij->j", (phk,))/neg_batch_size
# Return negative gradients to match up nicely with the usual
# parameter update rules, which *subtract* the gradient from
# the parameters. This is in contrast with the RBM update
# rules which ADD the gradients (scaled by the learning rate)
# to the parameters.
return {"rbm_module": {"weights": -w_grad,
"visible_bias": -vb_grad,
"hidden_bias": -hb_grad}}
def fit(self, data, epochs=100, pos_batch_size=100, neg_batch_size=200,
k=1, lr=1e-2, progbar=False, callbacks=[]):
"""Execute the training of the RBM.
:param data: The actual training data
:type data: list(float)
:param epochs: The number of parameter (i.e. weights and biases)
updates
:type epochs: int
:param pos_batch_size: The size of batches for the positive phase
taken from the data.
:type pos_batch_size: int
:param neg_batch_size: The size of batches for the negative phase
taken from the data
:type neg_batch_size: int
:param k: The number of contrastive divergence steps
:type k: int
:param lr: Learning rate
:type lr: float
:param progbar: Whether or not to display a progress bar. If "notebook"
is passed, will use a Jupyter notebook compatible
progress bar.
:type progbar: bool or str
:param callbacks: Callbacks to run while training.
:type callbacks: list(qucumber.callbacks.Callback)
"""
disable_progbar = (progbar is False)
progress_bar = tqdm_notebook if progbar == "notebook" else tqdm
data = torch.tensor(data, device=self.rbm_module.device,
dtype=torch.double)
optimizer = torch.optim.SGD([self.rbm_module.weights,
self.rbm_module.visible_bias,
self.rbm_module.hidden_bias],
lr=lr)
for cb in callbacks:
cb.on_train_start(self)
for ep in progress_bar(range(epochs), desc="Epochs ",
disable=disable_progbar):
pos_batches = DataLoader(data, batch_size=pos_batch_size,
shuffle=True)
multiplier = int((neg_batch_size / pos_batch_size) + 0.5)
neg_batches = [DataLoader(data, batch_size=neg_batch_size,
shuffle=True)
for i in range(multiplier)]
neg_batches = chain(*neg_batches)
for cb in callbacks:
cb.on_epoch_start(self, ep)
if self.stop_training: # check for stop_training signal
break
for batch_num, (pos_batch, neg_batch) in enumerate(zip(pos_batches,
neg_batches)):
for cb in callbacks:
cb.on_batch_start(self, ep, batch_num)
all_grads = self.compute_batch_gradients(k, pos_batch,
neg_batch)
optimizer.zero_grad() # clear any cached gradients
# assign all available gradients to the corresponding parameter
for name, grads in all_grads.items():
selected_RBM = getattr(self, name)
for param in grads.keys():
getattr(selected_RBM, param).grad = grads[param]
optimizer.step() # tell the optimizer to apply the gradients
for cb in callbacks:
cb.on_batch_end(self, ep, batch_num)
for cb in callbacks:
cb.on_epoch_end(self, ep)
for cb in callbacks:
cb.on_train_end(self)
def sample(self, num_samples, k):
"""Samples from the RBM using k steps of Block Gibbs sampling.
:param num_samples: The number of samples to be generated
:type num_samples: int
:param k: Number of Block Gibbs steps.
:type k: int
:returns: Samples drawn from the RBM.
:rtype: torch.doubleTensor
"""
return self.rbm_module.sample(num_samples, k)
class ComplexRBM:
# NOTE: In development. Might be unstable.
# NOTE: The 'full_unitaries' argument is not needed for training/sampling.
# This is only here for debugging the gradients. Delete this argument when
# gradients have been debugged.
def __init__(self, full_unitaries, psi_dictionary, num_visible,
num_hidden_amp, num_hidden_phase, test_grads, gpu=True,
seed=1234):
self.num_visible = int(num_visible)
self.num_hidden_amp = int(num_hidden_amp)
self.num_hidden_phase = int(num_hidden_phase)
self.full_unitaries = full_unitaries
self.psi_dictionary = psi_dictionary
self.rbm_amp = RBM_Module(num_visible, num_hidden_amp, gpu=gpu,
seed=seed)
self.rbm_phase = RBM_Module(num_visible, num_hidden_phase,
zero_weights=True, gpu=gpu, seed=None)
self.device = self.rbm_amp.device
self.test_grads = test_grads
def basis_state_generator(self, s):
"""Only works for binary visible units at the moment. Generates a vector
given a spin value (0 or 1).
:param s: A spin's value (either 0 or 1).
:type s: float
:returns: If s = 0, this is the (1,0) state in the basis of the
measurement. If s = 1, this is the (0,1) state in the basis
of the measurement.
:rtype: torch.doubleTensor
"""
if s == 0.:
return torch.tensor([[1., 0.], [0., 0.]], dtype=torch.double)
if s == 1.:
return torch.tensor([[0., 1.], [0., 0.]], dtype=torch.double)
def state_generator(self, num_non_trivial_unitaries):
"""A function that returns all possible configurations of
'num_non_trivial_unitaries' spins. Similar to generate_visible_space.
:param num_non_trivial_unitaries: The number of sites measured in the
non-computational basis.
:type num_non_trivial_unitaries: int
:returns: An array of all possible spin configurations of
'num_non_trivial_unitaries' spins.
:rtype: torch.doubleTensor
"""
states = torch.zeros((2**num_non_trivial_unitaries,
num_non_trivial_unitaries),
device=self.device,
dtype=torch.double)
for i in range(2**num_non_trivial_unitaries):
temp = i
for j in range(num_non_trivial_unitaries):
temp, remainder = divmod(temp, 2)
states[i][num_non_trivial_unitaries - j - 1] = remainder
return states
def unnormalized_probability_amp(self, v):
r"""The effective energy of the phase RBM.
:param v: Visible unit(s).
:type v: torch.doubleTensor
:returns:
:math:`p_{\lambda}(\bm{v}) = e^{\mathcal{E}_{\lambda}(\bm{v})}`
:rtype: torch.doubleTensor
"""
return self.rbm_amp.unnormalized_probability(v)
def unnormalized_probability_phase(self, v):
r"""The effective energy of the phase RBM.
:param v: Visible unit(s).
:type v: torch.doubleTensor
:returns: :math:`p_{\mu}(\bm{v}) = e^{\mathcal{E}_{\mu}(\bm{v})}`
:rtype: torch.doubleTensor
"""
return self.rbm_phase.unnormalized_probability(v)
def normalized_wavefunction(self, v):
r"""The RBM wavefunction.
:param v: Visible unit(s).
:type v: torch.doubleTensor
:returns:
.. math:: \psi_{\lambda\mu} =
\sqrt{\frac{p_{\lambda}}{Z_{\lambda}}}
\exp\left(\frac{i\log(p_{\mu})}{2}\right)
:rtype: torch.doubleTensor
"""
v_prime = v.view(-1, self.num_visible)
temp1 = (self.unnormalized_probability_amp(v_prime)).sqrt()
temp2 = ((self.unnormalized_probability_phase(v_prime)).log())*0.5
cos_angle = temp2.cos()
sin_angle = temp2.sin()
psi = torch.zeros(2, v_prime.size()[0], dtype=torch.double)
psi[0] = temp1*cos_angle
psi[1] = temp1*sin_angle
sqrt_Z = (self.rbm_amp.partition(
self.rbm_amp.generate_visible_space())).sqrt()
return psi / sqrt_Z
def unnormalized_wavefunction(self, v):
r"""The unnormalized RBM wavefunction.
:param v: Visible unit(s).
:type v: torch.doubleTensor
:returns:
.. math:: \tilde{\psi}_{\lambda\mu} =
\sqrt{p_{\lambda}}
\exp\left(\frac{i\log(p_{\mu})}{2}\right)
:rtype: torch.doubleTensor
"""
v_prime = v.view(-1, self.num_visible)
temp1 = (self.unnormalized_probability_amp(v_prime)).sqrt()
temp2 = ((self.unnormalized_probability_phase(v_prime)).log())*0.5
cos_angle = temp2.cos()
sin_angle = temp2.sin()
psi = torch.zeros(2, v_prime.size()[0], dtype=torch.double)
psi[0] = temp1*cos_angle
psi[1] = temp1*sin_angle
return psi
def compute_batch_gradients(self, unitary_dict, k, batch, chars_batch):
"""This function will compute the gradients of a batch of the training
data (data_file) given the basis measurements (chars_file).
:param k: Number of contrastive divergence steps in amplitude training.
:type k: int
:param batch: Batch of the input data.
:type batch: torch.doubleTensor
:param chars_batch: Batch of bases that correspondingly indicates the
basis each site in the batch was measured in.
:type chars_batch: list(str)
:returns: Dictionary containing all the gradients (negative): Gradient
of weights, visible bias and hidden bias for the amplitude,
Gradients of weights, visible bias and hidden bias for the
phase.
:rtype: dict
"""
batch_size = len(batch)
g_weights_amp = torch.zeros_like(self.rbm_amp.weights)
g_vb_amp = torch.zeros_like(self.rbm_amp.visible_bias)
g_hb_amp = torch.zeros_like(self.rbm_amp.hidden_bias)
g_weights_phase = torch.zeros_like(self.rbm_phase.weights)
g_vb_phase = torch.zeros_like(self.rbm_phase.visible_bias)
g_hb_phase = torch.zeros_like(self.rbm_phase.hidden_bias)
[batch, h0_amp_batch, vk_amp_batch, hk_amp_batch, phk_amp_batch] = \
self.rbm_amp.gibbs_sampling(k, batch)
# Iterate through every data point in the batch.
for row_count, v0 in enumerate(batch):
# A counter for the number of non-trivial unitaries
# (non-computational basis) in the data point.
num_non_trivial_unitaries = 0
# tau_indices will contain the index numbers of spins not in the
# computational basis (Z). z_indices will contain the index numbers
# of spins in the computational basis.
tau_indices = []
z_indices = []
for j in range(self.num_visible):
# Go through the unitaries (chars_batch[row_count]) of each
# site in the data point, v0, and save inidices of non-trivial.
if chars_batch[row_count][j] != 'Z':
num_non_trivial_unitaries += 1
tau_indices.append(j)
else:
z_indices.append(j)
if num_non_trivial_unitaries == 0:
# If there are no non-trivial unitaries for the data point v0,
# calculate the positive phase of regular (i.e. non-complex
# RBM) gradient. Use the actual data point, v0.
prob_amp = F.sigmoid(F.linear(v0, self.rbm_amp.weights,
self.rbm_amp.hidden_bias))
g_weights_amp -= (torch.einsum("i,j->ij", (prob_amp, v0))
/ batch_size)
g_vb_amp -= v0 / batch_size
g_hb_amp -= prob_amp / batch_size
else:
# Compute the rotated gradients.
[L_weights_amp, L_vb_amp, L_hb_amp,
L_weights_phase, L_vb_phase, L_hb_phase] = \
self.compute_rotated_grads(unitary_dict, k, v0,
chars_batch[row_count],
num_non_trivial_unitaries,
z_indices, tau_indices)
# Gradents of amplitude parameters take the real part of the
# rotated gradients.
g_weights_amp -= L_weights_amp[0] / batch_size
g_vb_amp -= L_vb_amp[0] / batch_size
g_hb_amp -= L_hb_amp[0] / batch_size
# Gradents of phase parameters take the imaginary part of the
# rotated gradients.
g_weights_phase += L_weights_phase[1] / batch_size
g_vb_phase += L_vb_phase[1] / batch_size
g_hb_phase += L_hb_phase[1] / batch_size
# Block gibbs sampling for negative phase.
g_weights_amp += (torch.einsum("ij,ik->jk",
(phk_amp_batch, vk_amp_batch))
/ batch_size)
g_vb_amp += torch.einsum("ij->j", (vk_amp_batch,)) / batch_size
g_hb_amp += torch.einsum("ij->j", (phk_amp_batch,)) / batch_size
"""Return negative gradients to match up nicely with the usual
parameter update rules, which *subtract* the gradient from
the parameters. This is in contrast with the RBM update
rules which ADD the gradients (scaled by the learning rate)
to the parameters."""
return {
"rbm_amp": {
"weights": g_weights_amp,
"visible_bias": g_vb_amp,
"hidden_bias": g_hb_amp
},
"rbm_phase": {
"weights": g_weights_phase,
"visible_bias": g_vb_phase,
"hidden_bias": g_hb_phase
}
}
def compute_rotated_grads(self, unitary_dict, k, v0, characters,
num_non_trivial_unitaries,
z_indices, tau_indices):
"""Computes the rotated gradients.
:param v0: A visible unit.
:type v0: torch.doubleTensor
:param characters: A string of characters corresponding to the basis
that each site in v0 was measured in.
:type characters: str
:param num_non_trivial_unitaries: The number of sites in v0 that aren't
measured in the computational basis.
:type num_non_trivial_unitaries: int
:param z_indices: A list of indices that correspond to sites of v0 that
are measured in the computational basis.
:type z_indices: list(int)
:param tau_indices: A list of indices that correspond to sites of v0
that are not measured in the computational basis.
:type tau_indices: list(int)
:returns: Dictionary of the rotated gradients: L_weights_amp, L_vb_amp,
L_hb_amp, L_weights_phase, L_vb_phase, L_hb_phase
:rtype: dict
"""
"""Initialize the 'A' parameters (see alg 4.2)."""
A_weights_amp = torch.zeros(2, self.rbm_amp.weights.size()[0],
self.rbm_amp.weights.size()[1],
device=self.device, dtype=torch.double)
A_vb_amp = torch.zeros(2, self.rbm_amp.visible_bias.size()[0],
device=self.device, dtype=torch.double)
A_hb_amp = torch.zeros(2, self.rbm_amp.hidden_bias.size()[0],
device=self.device, dtype=torch.double)
A_weights_phase = torch.zeros(2, self.rbm_phase.weights.size()[0],
self.rbm_phase.weights.size()[1],
device=self.device, dtype=torch.double)
A_vb_phase = torch.zeros(2, self.rbm_phase.visible_bias.size()[0],
device=self.device, dtype=torch.double)
A_hb_phase = torch.zeros(2, self.rbm_phase.hidden_bias.size()[0],
device=self.device, dtype=torch.double)
# 'B' will contain the coefficients of the rotated unnormalized
# wavefunction.
B = torch.zeros(2, device=self.device, dtype=torch.double)
w_grad_amp = torch.zeros_like(self.rbm_amp.weights)
vb_grad_amp = torch.zeros_like(self.rbm_amp.visible_bias)
hb_grad_amp = torch.zeros_like(self.rbm_amp.hidden_bias)
w_grad_phase = torch.zeros_like(self.rbm_phase.weights)
vb_grad_phase = torch.zeros_like(self.rbm_phase.visible_bias)
hb_grad_phase = torch.zeros_like(self.rbm_phase.hidden_bias)
zeros_for_w_amp = torch.zeros_like(w_grad_amp)
zeros_for_w_phase = torch.zeros_like(w_grad_phase)
zeros_for_vb = torch.zeros_like(vb_grad_amp)
zeros_for_hb_amp = torch.zeros_like(hb_grad_amp)
zeros_for_hb_phase = torch.zeros_like(hb_grad_phase)
# Loop over Hilbert space of the non trivial unitaries to build
# the state.
for j in range(2**num_non_trivial_unitaries):
s = self.state_generator(num_non_trivial_unitaries)[j]
# Creates a matrix where the jth row is the desired state, |S>,
# a vector.
# This is the sigma state.
constructed_state = torch.zeros(
self.num_visible, dtype=torch.double)
U = torch.tensor([1., 0.], dtype=torch.double, device=self.device)
# Populate the |sigma> state (aka constructed_state) accirdingly.
for index in range(len(z_indices)):
# These are the sites in the computational basis.
constructed_state[z_indices[index]] = v0[z_indices[index]]
for index in range(len(tau_indices)):
# These are the sites that are NOT in the computational basis.
constructed_state[tau_indices[index]] = s[index]
aa = unitary_dict[characters[tau_indices[index]]]
bb = self.basis_state_generator(v0[tau_indices[index]])
cc = self.basis_state_generator(s[index])
temp = cplx.inner_prod(cplx.MV_mult(
cplx.compT_matrix(aa), bb), cc)
U = cplx.scalar_mult(U, temp)
# Positive phase gradients for phase and amp. Will be added into
# the 'A' parameters.
prob_amp = F.sigmoid(F.linear(constructed_state,
self.rbm_amp.weights,
self.rbm_amp.hidden_bias))
prob_phase = F.sigmoid(F.linear(constructed_state,
self.rbm_phase.weights,
self.rbm_phase.hidden_bias))
w_grad_amp = torch.einsum("i,j->ij", (prob_amp, constructed_state))
vb_grad_amp = constructed_state
hb_grad_amp = prob_amp
w_grad_phase = torch.einsum("i,j->ij",
(prob_phase, constructed_state))
vb_grad_phase = constructed_state
hb_grad_phase = prob_phase
"""
In order to calculate the 'A' parameters below with the current
complex library, I need to make the weights and biases complex.
I fill the complex parts of the parameters with a tensor of zeros.
"""
temp_w_grad_amp = cplx.make_complex_matrix(w_grad_amp,
zeros_for_w_amp)
temp_vb_grad_amp = cplx.make_complex_vector(vb_grad_amp,
zeros_for_vb)
temp_hb_grad_amp = cplx.make_complex_vector(hb_grad_amp,
zeros_for_hb_amp)
temp_w_grad_phase = cplx.make_complex_matrix(w_grad_phase,
zeros_for_w_phase)
temp_vb_grad_phase = cplx.make_complex_vector(vb_grad_phase,
zeros_for_vb)
temp_hb_grad_phase = cplx.make_complex_vector(hb_grad_phase,
zeros_for_hb_phase)
# Temp = U*psi(sigma)
temp = cplx.scalar_mult(
U, self.unnormalized_wavefunction(constructed_state))
A_weights_amp += cplx.MS_mult(temp, temp_w_grad_amp)
A_vb_amp += cplx.VS_mult(temp, temp_vb_grad_amp)
A_hb_amp += cplx.VS_mult(temp, temp_hb_grad_amp)
A_weights_phase += cplx.MS_mult(temp, temp_w_grad_phase)
A_vb_phase += cplx.VS_mult(temp, temp_vb_grad_phase)
A_hb_phase += cplx.VS_mult(temp, temp_hb_grad_phase)
# Rotated wavefunction.
B += temp
L_weights_amp = cplx.MS_divide(A_weights_amp, B)
L_vb_amp = cplx.VS_divide(A_vb_amp, B)
L_hb_amp = cplx.VS_divide(A_hb_amp, B)
L_weights_phase = cplx.MS_divide(A_weights_phase, B)
L_vb_phase = cplx.VS_divide(A_vb_phase, B)
L_hb_phase = cplx.VS_divide(A_hb_phase, B)
return [L_weights_amp, L_vb_amp, L_hb_amp, L_weights_phase, L_vb_phase,
L_hb_phase]
def fit(self, data, character_data, unitary_dict, epochs, batch_size,
k=1, lr=1e-2, log_every=0, progbar=False):
"""Execute the training of the RBM.
:param data: The actual training data
:type data: list(float)
:param character_data: The corresponding bases that each site in the
data has been measured in.
:type character_data: list(str)
:param epochs: The number of parameter (i.e. weights and biases)
updates
:type epochs: int
:param batch_size: The size of batches taken from the data
:type batch_size: int
:param k: The number of contrastive divergence steps
:type k: int
:param lr: Learning rate
:type lr: float
:param progbar: Whether or not to display a progress bar. If "notebook"
is passed, will use a Jupyter notebook compatible
progress bar.
:type progbar: bool or str
"""
# Make data file into a torch tensor.
data = torch.tensor(data, dtype=torch.double).to(device=self.device)
# Use the Adam optmizer to update the weights and biases.
optimizer = torch.optim.Adam([self.rbm_amp.weights,
self.rbm_amp.visible_bias,
self.rbm_amp.hidden_bias,
self.rbm_phase.weights,
self.rbm_phase.visible_bias,
self.rbm_phase.hidden_bias],
lr=lr)
disable_progbar = (progbar is False)
progress_bar = tqdm_notebook if progbar == "notebook" else tqdm
vis = self.rbm_amp.generate_visible_space()
for ep in progress_bar(range(0, epochs + 1),
desc="Epochs ", total=epochs,
disable=disable_progbar):
# Shuffle the data to ensure that the batches taken from the data
# are random data points.
random_permutation = torch.randperm(data.shape[0])
shuffled_data = data[random_permutation]
shuffled_character_data = character_data[random_permutation]
# List of all the batches.
batches = [shuffled_data[batch_start:(batch_start + batch_size)]
for batch_start in range(0, len(data), batch_size)]
# List of all the bases.
char_batches = [shuffled_character_data[batch_start:(batch_start + batch_size)]
for batch_start in range(0, len(data), batch_size)]
# Calculate convergence quantities every "log-every" steps.
if ep % log_every == 0:
fidelity_ = self.fidelity(vis, 'Z' 'Z')
print ('Epoch = ',ep,'\nFidelity = ',fidelity_)
# Save parameters at the end of training.
if ep == epochs:
print('Finished training. Saving results...')
self.save_params()
print('Done.')
break
# Loop through all of the batches and calculate the batch
# gradients.
for index, batch in progress_bar(enumerate(batches),
desc="Batches",
leave=False, disable=True):
all_grads = self.compute_batch_gradients(unitary_dict, k, batch,
char_batches[index])
if self.test_grads:
self.test_gradients(unitary_dict, vis, k, batches[index],
char_batches[index], all_grads)
# Clear any cached gradients.
optimizer.zero_grad()
# Assign all available gradients to the
# corresponding parameter.
for name, grads in all_grads.items():
selected_RBM = getattr(self, name)
for param in grads.keys():
getattr(selected_RBM, param).grad = grads[param]
# Tell the optimizer to apply the gradients and update
# the parameters.
optimizer.step()
def save_params(self):
"""A function that saves the weights and biases for the amplitude and
phase individually."""
trained_params = [self.rbm_amp.weights.data.numpy(),
self.rbm_amp.visible_bias.data.numpy(),
self.rbm_amp.hidden_bias.data.numpy(),
self.rbm_phase.weights.data.numpy(),
self.rbm_phase.visible_bias.data.numpy(),
self.rbm_phase.hidden_bias.data.numpy()]
with open('trained_weights_amp.csv', 'w') as csvfile:
np.savetxt(csvfile, trained_params[0], fmt='%.5f', delimiter=',')
with open('trained_visible_bias_amp.csv', 'w') as csvfile:
np.savetxt(csvfile, trained_params[1], fmt='%.5f', delimiter=',')
with open('trained_hidden_bias_amp.csv', 'w') as csvfile:
np.savetxt(csvfile, trained_params[2], fmt='%.5f', delimiter=',')
with open('trained_weights_phase.csv', 'w') as csvfile:
np.savetxt(csvfile, trained_params[3], fmt='%.5f', delimiter=',')
with open('trained_visible_bias_phase.csv', 'w') as csvfile:
np.savetxt(csvfile, trained_params[4], fmt='%.5f', delimiter=',')
with open('trained_hidden_bias_phase.csv', 'w') as csvfile:
np.savetxt(csvfile, trained_params[5], fmt='%.5f', delimiter=',')
def get_true_psi(self, basis):
"""Picks out the true psi in the correct basis.
:param basis: E.g. XZZZX.
:type basis: str
:returns: The true wavefunction in the basis.
:rtype: torch.doubleTensor
"""
key = ''
for i in range(len(basis)):
key += basis[i]
return self.psi_dictionary[key]
def overlap(self, visible_space, basis):
"""Computes the overlap between the RBM and true wavefunctions.
:param visible_space: An array of all possible spin configurations.
:type visible_space: torch.doubleTensor
:param basis: E.g. XZZZX.
:type basis: str
:returns: :math:`O = \\langle{\\psi_{true}}\\vert\\psi_{\\lambda\\mu}\\rangle`.
:rtype: float
"""
overlap_ = cplx.inner_prod(self.get_true_psi(basis),
self.normalized_wavefunction(visible_space))
return overlap_
def fidelity(self, visible_space, basis):
"""Computed the fidelity of the RBM and true wavefunctions.
:param visible_space: An array of all possible spin configurations.
:type visible_space: torch.doubleTensor
:param basis: E.g. XZZZX.
:type basis: str
:returns: :math:`F = |O|^2`.
:rtype: float
"""
return cplx.norm(self.overlap(visible_space, basis))
def KL_divergence(self, visible_space):
'''Computes the total KL divergence.
'''
KL = 0.0
basis_list = ['Z' 'Z', 'X' 'Z', 'Z' 'X', 'Y' 'Z', 'Z' 'Y']
'''Wavefunctions (RBM and true) in the computational basis.'''
# psi_ZZ = self.normalized_wavefunction(visible_space)
# true_psi_ZZ = self.get_true_psi('ZZ')
'''Compute the KL divergence for the non computational bases.'''
for i in range(len(basis_list)):
rotated_RBM_psi = cplx.MV_mult(
self.full_unitaries[basis_list[i]],
self.normalized_wavefunction(visible_space))
rotated_true_psi = self.get_true_psi(basis_list[i])
for j in range(len(visible_space)):
elementof_rotated_RBM_psi = torch.tensor(
[rotated_RBM_psi[0][j], rotated_RBM_psi[1][j]]).view(2, 1)
elementof_rotated_true_psi = (torch.tensor(
[rotated_true_psi[0][j], rotated_true_psi[1][j]]
).view(2, 1))
norm_true_psi = cplx.norm(cplx.inner_prod(
elementof_rotated_true_psi, elementof_rotated_true_psi))
norm_RBM_psi = cplx.norm(cplx.inner_prod(
elementof_rotated_RBM_psi, elementof_rotated_RBM_psi))
if norm_true_psi > 0.0:
KL += norm_true_psi*torch.log(norm_true_psi)
if norm_RBM_psi > 0.0:
KL -= norm_true_psi*torch.log(norm_RBM_psi)
'''Compute KL divergence for the computational basis.'''
'''
for j in range(len(visible_space)):
elementof_ZZ_RBM_psi = torch.tensor([psi_ZZ[0][j], psi_ZZ[1][j]]).view(2,1)
elementof_ZZ_true_psi = torch.tensor([true_psi_ZZ[0][j], true_psi_ZZ[1][j]]).view(2,1)
norm_ZZ_true_psi = cplx.norm( cplx.inner_prod(elementof_ZZ_true_psi, elementof_ZZ_true_psi) )
norm_ZZ_RBM_psi = cplx.norm( cplx.inner_prod(elementof_ZZ_RBM_psi, elementof_ZZ_RBM_psi) )
if norm_ZZ_true_psi > 0.0:
KL += norm_ZZ_true_psi*torch.log(norm_ZZ_true_psi)
KL -= norm_ZZ_true_psi*torch.log(norm_ZZ_RBM_psi)
'''
return KL
def compute_numerical_gradient(self, visible_space, param, alg_grad):
eps = 1.e-6
print("Numerical\t Exact\t\t Abs. Diff.")
for i in range(len(param)):
param[i].data += eps
KL_pos = self.KL_divergence(visible_space)
param[i].data -= 2*eps
KL_neg = self.KL_divergence(visible_space)
param[i].data += eps
num_grad = (KL_pos - KL_neg) / (2*eps)
print("{: 10.8f}\t{: 10.8f}\t{: 10.8f}\t"
.format(num_grad, alg_grad[i],
abs(num_grad - alg_grad[i])))
def test_gradients(self, unitary_dict, visible_space, k, batch, chars_batch, alg_grads):
# Must have negative sign because the compute_batch_grads returns the neg of the grads.
# key_list = ["weights_amp", "visible_bias_amp", "hidden_bias_amp", "weights_phase", "visible_bias_phase", "hidden_bias_phase"]
flat_weights_amp = self.rbm_amp.weights.data.view(-1)
flat_weights_phase = self.rbm_phase.weights.data.view(-1)
flat_grad_weights_amp = alg_grads["rbm_amp"]["weights"].view(-1)
flat_grad_weights_phase = alg_grads["rbm_phase"]["weights"].view(-1)
print('-------------------------------------------------------------------------------')
print('Weights amp gradient')
self.compute_numerical_gradient(
visible_space, flat_weights_amp, -flat_grad_weights_amp)
print ('\n')
print('Visible bias amp gradient')
self.compute_numerical_gradient(
visible_space, self.rbm_amp.visible_bias, -alg_grads["rbm_amp"]["visible_bias"])
print ('\n')
print('Hidden bias amp gradient')
self.compute_numerical_gradient(
visible_space, self.rbm_amp.hidden_bias, -alg_grads["rbm_amp"]["hidden_bias"])
print ('\n')
print('Weights phase gradient')
self.compute_numerical_gradient(
visible_space, flat_weights_phase, -flat_grad_weights_phase)
print ('\n')
print('Visible bias phase gradient')
self.compute_numerical_gradient(
visible_space, self.rbm_phase.visible_bias, -alg_grads["rbm_phase"]["visible_bias"])
print ('\n')
print('Hidden bias phase gradient')
self.compute_numerical_gradient(
visible_space, self.rbm_phase.hidden_bias, -alg_grads["rbm_phase"]["hidden_bias"])
def state_to_index(self, state):
''' Only for debugging how the unitary is applied to the unnormalized wavefunction - the 'B' term in alg 4.2.'''
states = torch.zeros(2**self.num_visible, self.num_visible)
npstates = states.numpy()
npstate = state.numpy()
for i in range(2**self.num_visible):
temp = i
for j in range(self.num_visible):
temp, remainder = divmod(temp, 2)
npstates[i][self.num_visible - j - 1] = remainder
if np.array_equal(npstates[i], npstate):
return i
| [
"itertools.chain",
"numpy.sqrt",
"torch.randperm",
"qucumber.cplx.make_complex_vector",
"torch.cuda.is_available",
"torch.nn.functional.linear",
"qucumber.cplx.MS_mult",
"torch.distributions.bernoulli.Bernoulli",
"warnings.warn",
"torch.zeros_like",
"qucumber.cplx.VS_mult",
"torch.randn",
"q... | [((3029, 3059), 'torch.mv', 'torch.mv', (['v', 'self.visible_bias'], {}), '(v, self.visible_bias)\n', (3037, 3059), False, 'import torch\n'), ((6371, 6421), 'torch.distributions.bernoulli.Bernoulli', 'torch.distributions.bernoulli.Bernoulli', ([], {'probs': '(0.5)'}), '(probs=0.5)\n', (6410, 6421), False, 'import torch\n'), ((7259, 7357), 'torch.zeros', 'torch.zeros', (['(1 << self.num_visible, self.num_visible)'], {'device': 'self.device', 'dtype': 'torch.double'}), '((1 << self.num_visible, self.num_visible), device=self.device,\n dtype=torch.double)\n', (7270, 7357), False, 'import torch\n'), ((10026, 10052), 'torch.save', 'torch.save', (['data', 'location'], {}), '(data, location)\n', (10036, 10052), False, 'import torch\n'), ((13796, 13865), 'torch.tensor', 'torch.tensor', (['data'], {'device': 'self.rbm_module.device', 'dtype': 'torch.double'}), '(data, device=self.rbm_module.device, dtype=torch.double)\n', (13808, 13865), False, 'import torch\n'), ((13914, 14026), 'torch.optim.SGD', 'torch.optim.SGD', (['[self.rbm_module.weights, self.rbm_module.visible_bias, self.rbm_module.\n hidden_bias]'], {'lr': 'lr'}), '([self.rbm_module.weights, self.rbm_module.visible_bias,\n self.rbm_module.hidden_bias], lr=lr)\n', (13929, 14026), False, 'import torch\n'), ((18657, 18773), 'torch.zeros', 'torch.zeros', (['(2 ** num_non_trivial_unitaries, num_non_trivial_unitaries)'], {'device': 'self.device', 'dtype': 'torch.double'}), '((2 ** num_non_trivial_unitaries, num_non_trivial_unitaries),\n device=self.device, dtype=torch.double)\n', (18668, 18773), False, 'import torch\n'), ((22500, 22538), 'torch.zeros_like', 'torch.zeros_like', (['self.rbm_amp.weights'], {}), '(self.rbm_amp.weights)\n', (22516, 22538), False, 'import torch\n'), ((22558, 22601), 'torch.zeros_like', 'torch.zeros_like', (['self.rbm_amp.visible_bias'], {}), '(self.rbm_amp.visible_bias)\n', (22574, 22601), False, 'import torch\n'), ((22621, 22663), 'torch.zeros_like', 'torch.zeros_like', (['self.rbm_amp.hidden_bias'], {}), '(self.rbm_amp.hidden_bias)\n', (22637, 22663), False, 'import torch\n'), ((22691, 22731), 'torch.zeros_like', 'torch.zeros_like', (['self.rbm_phase.weights'], {}), '(self.rbm_phase.weights)\n', (22707, 22731), False, 'import torch\n'), ((22753, 22798), 'torch.zeros_like', 'torch.zeros_like', (['self.rbm_phase.visible_bias'], {}), '(self.rbm_phase.visible_bias)\n', (22769, 22798), False, 'import torch\n'), ((22820, 22864), 'torch.zeros_like', 'torch.zeros_like', (['self.rbm_phase.hidden_bias'], {}), '(self.rbm_phase.hidden_bias)\n', (22836, 22864), False, 'import torch\n'), ((29053, 29107), 'torch.zeros', 'torch.zeros', (['(2)'], {'device': 'self.device', 'dtype': 'torch.double'}), '(2, device=self.device, dtype=torch.double)\n', (29064, 29107), False, 'import torch\n'), ((29130, 29168), 'torch.zeros_like', 'torch.zeros_like', (['self.rbm_amp.weights'], {}), '(self.rbm_amp.weights)\n', (29146, 29168), False, 'import torch\n'), ((29191, 29234), 'torch.zeros_like', 'torch.zeros_like', (['self.rbm_amp.visible_bias'], {}), '(self.rbm_amp.visible_bias)\n', (29207, 29234), False, 'import torch\n'), ((29257, 29299), 'torch.zeros_like', 'torch.zeros_like', (['self.rbm_amp.hidden_bias'], {}), '(self.rbm_amp.hidden_bias)\n', (29273, 29299), False, 'import torch\n'), ((29324, 29364), 'torch.zeros_like', 'torch.zeros_like', (['self.rbm_phase.weights'], {}), '(self.rbm_phase.weights)\n', (29340, 29364), False, 'import torch\n'), ((29389, 29434), 'torch.zeros_like', 'torch.zeros_like', (['self.rbm_phase.visible_bias'], {}), '(self.rbm_phase.visible_bias)\n', (29405, 29434), False, 'import torch\n'), ((29459, 29503), 'torch.zeros_like', 'torch.zeros_like', (['self.rbm_phase.hidden_bias'], {}), '(self.rbm_phase.hidden_bias)\n', (29475, 29503), False, 'import torch\n'), ((29531, 29559), 'torch.zeros_like', 'torch.zeros_like', (['w_grad_amp'], {}), '(w_grad_amp)\n', (29547, 29559), False, 'import torch\n'), ((29588, 29618), 'torch.zeros_like', 'torch.zeros_like', (['w_grad_phase'], {}), '(w_grad_phase)\n', (29604, 29618), False, 'import torch\n'), ((29642, 29671), 'torch.zeros_like', 'torch.zeros_like', (['vb_grad_amp'], {}), '(vb_grad_amp)\n', (29658, 29671), False, 'import torch\n'), ((29699, 29728), 'torch.zeros_like', 'torch.zeros_like', (['hb_grad_amp'], {}), '(hb_grad_amp)\n', (29715, 29728), False, 'import torch\n'), ((29758, 29789), 'torch.zeros_like', 'torch.zeros_like', (['hb_grad_phase'], {}), '(hb_grad_phase)\n', (29774, 29789), False, 'import torch\n'), ((33761, 33793), 'qucumber.cplx.MS_divide', 'cplx.MS_divide', (['A_weights_amp', 'B'], {}), '(A_weights_amp, B)\n', (33775, 33793), True, 'import qucumber.cplx as cplx\n'), ((33813, 33840), 'qucumber.cplx.VS_divide', 'cplx.VS_divide', (['A_vb_amp', 'B'], {}), '(A_vb_amp, B)\n', (33827, 33840), True, 'import qucumber.cplx as cplx\n'), ((33860, 33887), 'qucumber.cplx.VS_divide', 'cplx.VS_divide', (['A_hb_amp', 'B'], {}), '(A_hb_amp, B)\n', (33874, 33887), True, 'import qucumber.cplx as cplx\n'), ((33915, 33949), 'qucumber.cplx.MS_divide', 'cplx.MS_divide', (['A_weights_phase', 'B'], {}), '(A_weights_phase, B)\n', (33929, 33949), True, 'import qucumber.cplx as cplx\n'), ((33971, 34000), 'qucumber.cplx.VS_divide', 'cplx.VS_divide', (['A_vb_phase', 'B'], {}), '(A_vb_phase, B)\n', (33985, 34000), True, 'import qucumber.cplx as cplx\n'), ((34022, 34051), 'qucumber.cplx.VS_divide', 'cplx.VS_divide', (['A_hb_phase', 'B'], {}), '(A_hb_phase, B)\n', (34036, 34051), True, 'import qucumber.cplx as cplx\n'), ((35404, 35595), 'torch.optim.Adam', 'torch.optim.Adam', (['[self.rbm_amp.weights, self.rbm_amp.visible_bias, self.rbm_amp.hidden_bias,\n self.rbm_phase.weights, self.rbm_phase.visible_bias, self.rbm_phase.\n hidden_bias]'], {'lr': 'lr'}), '([self.rbm_amp.weights, self.rbm_amp.visible_bias, self.\n rbm_amp.hidden_bias, self.rbm_phase.weights, self.rbm_phase.\n visible_bias, self.rbm_phase.hidden_bias], lr=lr)\n', (35420, 35595), False, 'import torch\n'), ((46179, 46231), 'torch.zeros', 'torch.zeros', (['(2 ** self.num_visible)', 'self.num_visible'], {}), '(2 ** self.num_visible, self.num_visible)\n', (46190, 46231), False, 'import torch\n'), ((624, 701), 'warnings.warn', 'warnings.warn', (['"""Could not find GPU: will continue with CPU."""', 'ResourceWarning'], {}), "('Could not find GPU: will continue with CPU.', ResourceWarning)\n", (637, 701), False, 'import warnings\n'), ((756, 781), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (779, 781), False, 'import torch\n'), ((951, 971), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (963, 971), False, 'import torch\n'), ((989, 1008), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1001, 1008), False, 'import torch\n'), ((1698, 1767), 'torch.zeros', 'torch.zeros', (['self.num_visible'], {'device': 'self.device', 'dtype': 'torch.double'}), '(self.num_visible, device=self.device, dtype=torch.double)\n', (1709, 1767), False, 'import torch\n'), ((1976, 2044), 'torch.zeros', 'torch.zeros', (['self.num_hidden'], {'device': 'self.device', 'dtype': 'torch.double'}), '(self.num_hidden, device=self.device, dtype=torch.double)\n', (1987, 2044), False, 'import torch\n'), ((4087, 4130), 'torch.nn.functional.linear', 'F.linear', (['v', 'self.weights', 'self.hidden_bias'], {}), '(v, self.weights, self.hidden_bias)\n', (4095, 4130), True, 'from torch.nn import functional as F\n'), ((10549, 10569), 'torch.load', 'torch.load', (['location'], {}), '(location)\n', (10559, 10569), False, 'import torch\n'), ((11398, 11464), 'torch.nn.functional.linear', 'F.linear', (['v0', 'self.rbm_module.weights', 'self.rbm_module.hidden_bias'], {}), '(v0, self.rbm_module.weights, self.rbm_module.hidden_bias)\n', (11406, 11464), True, 'from torch.nn import functional as F\n'), ((11613, 11650), 'torch.einsum', 'torch.einsum', (['"""ij,ik->jk"""', '(prob, v0)'], {}), "('ij,ik->jk', (prob, v0))\n", (11625, 11650), False, 'import torch\n'), ((11684, 11712), 'torch.einsum', 'torch.einsum', (['"""ij->j"""', '(v0,)'], {}), "('ij->j', (v0,))\n", (11696, 11712), False, 'import torch\n'), ((11746, 11776), 'torch.einsum', 'torch.einsum', (['"""ij->j"""', '(prob,)'], {}), "('ij->j', (prob,))\n", (11758, 11776), False, 'import torch\n'), ((11811, 11847), 'torch.einsum', 'torch.einsum', (['"""ij,ik->jk"""', '(phk, vk)'], {}), "('ij,ik->jk', (phk, vk))\n", (11823, 11847), False, 'import torch\n'), ((11882, 11910), 'torch.einsum', 'torch.einsum', (['"""ij->j"""', '(vk,)'], {}), "('ij->j', (vk,))\n", (11894, 11910), False, 'import torch\n'), ((11945, 11974), 'torch.einsum', 'torch.einsum', (['"""ij->j"""', '(phk,)'], {}), "('ij->j', (phk,))\n", (11957, 11974), False, 'import torch\n'), ((14345, 14402), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': 'pos_batch_size', 'shuffle': '(True)'}), '(data, batch_size=pos_batch_size, shuffle=True)\n', (14355, 14402), False, 'from torch.utils.data import DataLoader\n'), ((14715, 14734), 'itertools.chain', 'chain', (['*neg_batches'], {}), '(*neg_batches)\n', (14720, 14734), False, 'from itertools import chain\n'), ((17930, 17988), 'torch.tensor', 'torch.tensor', (['[[1.0, 0.0], [0.0, 0.0]]'], {'dtype': 'torch.double'}), '([[1.0, 0.0], [0.0, 0.0]], dtype=torch.double)\n', (17942, 17988), False, 'import torch\n'), ((18024, 18082), 'torch.tensor', 'torch.tensor', (['[[0.0, 1.0], [0.0, 0.0]]'], {'dtype': 'torch.double'}), '([[0.0, 1.0], [0.0, 0.0]], dtype=torch.double)\n', (18036, 18082), False, 'import torch\n'), ((25692, 25748), 'torch.einsum', 'torch.einsum', (['"""ij,ik->jk"""', '(phk_amp_batch, vk_amp_batch)'], {}), "('ij,ik->jk', (phk_amp_batch, vk_amp_batch))\n", (25704, 25748), False, 'import torch\n'), ((25848, 25886), 'torch.einsum', 'torch.einsum', (['"""ij->j"""', '(vk_amp_batch,)'], {}), "('ij->j', (vk_amp_batch,))\n", (25860, 25886), False, 'import torch\n'), ((25920, 25959), 'torch.einsum', 'torch.einsum', (['"""ij->j"""', '(phk_amp_batch,)'], {}), "('ij->j', (phk_amp_batch,))\n", (25932, 25959), False, 'import torch\n'), ((30177, 30226), 'torch.zeros', 'torch.zeros', (['self.num_visible'], {'dtype': 'torch.double'}), '(self.num_visible, dtype=torch.double)\n', (30188, 30226), False, 'import torch\n'), ((30261, 30325), 'torch.tensor', 'torch.tensor', (['[1.0, 0.0]'], {'dtype': 'torch.double', 'device': 'self.device'}), '([1.0, 0.0], dtype=torch.double, device=self.device)\n', (30273, 30325), False, 'import torch\n'), ((31674, 31728), 'torch.einsum', 'torch.einsum', (['"""i,j->ij"""', '(prob_amp, constructed_state)'], {}), "('i,j->ij', (prob_amp, constructed_state))\n", (31686, 31728), False, 'import torch\n'), ((31836, 31892), 'torch.einsum', 'torch.einsum', (['"""i,j->ij"""', '(prob_phase, constructed_state)'], {}), "('i,j->ij', (prob_phase, constructed_state))\n", (31848, 31892), False, 'import torch\n'), ((32312, 32365), 'qucumber.cplx.make_complex_matrix', 'cplx.make_complex_matrix', (['w_grad_amp', 'zeros_for_w_amp'], {}), '(w_grad_amp, zeros_for_w_amp)\n', (32336, 32365), True, 'import qucumber.cplx as cplx\n'), ((32452, 32503), 'qucumber.cplx.make_complex_vector', 'cplx.make_complex_vector', (['vb_grad_amp', 'zeros_for_vb'], {}), '(vb_grad_amp, zeros_for_vb)\n', (32476, 32503), True, 'import qucumber.cplx as cplx\n'), ((32591, 32646), 'qucumber.cplx.make_complex_vector', 'cplx.make_complex_vector', (['hb_grad_amp', 'zeros_for_hb_amp'], {}), '(hb_grad_amp, zeros_for_hb_amp)\n', (32615, 32646), True, 'import qucumber.cplx as cplx\n'), ((32736, 32793), 'qucumber.cplx.make_complex_matrix', 'cplx.make_complex_matrix', (['w_grad_phase', 'zeros_for_w_phase'], {}), '(w_grad_phase, zeros_for_w_phase)\n', (32760, 32793), True, 'import qucumber.cplx as cplx\n'), ((32884, 32937), 'qucumber.cplx.make_complex_vector', 'cplx.make_complex_vector', (['vb_grad_phase', 'zeros_for_vb'], {}), '(vb_grad_phase, zeros_for_vb)\n', (32908, 32937), True, 'import qucumber.cplx as cplx\n'), ((33029, 33088), 'qucumber.cplx.make_complex_vector', 'cplx.make_complex_vector', (['hb_grad_phase', 'zeros_for_hb_phase'], {}), '(hb_grad_phase, zeros_for_hb_phase)\n', (33053, 33088), True, 'import qucumber.cplx as cplx\n'), ((33319, 33354), 'qucumber.cplx.MS_mult', 'cplx.MS_mult', (['temp', 'temp_w_grad_amp'], {}), '(temp, temp_w_grad_amp)\n', (33331, 33354), True, 'import qucumber.cplx as cplx\n'), ((33379, 33415), 'qucumber.cplx.VS_mult', 'cplx.VS_mult', (['temp', 'temp_vb_grad_amp'], {}), '(temp, temp_vb_grad_amp)\n', (33391, 33415), True, 'import qucumber.cplx as cplx\n'), ((33440, 33476), 'qucumber.cplx.VS_mult', 'cplx.VS_mult', (['temp', 'temp_hb_grad_amp'], {}), '(temp, temp_hb_grad_amp)\n', (33452, 33476), True, 'import qucumber.cplx as cplx\n'), ((33509, 33546), 'qucumber.cplx.MS_mult', 'cplx.MS_mult', (['temp', 'temp_w_grad_phase'], {}), '(temp, temp_w_grad_phase)\n', (33521, 33546), True, 'import qucumber.cplx as cplx\n'), ((33573, 33611), 'qucumber.cplx.VS_mult', 'cplx.VS_mult', (['temp', 'temp_vb_grad_phase'], {}), '(temp, temp_vb_grad_phase)\n', (33585, 33611), True, 'import qucumber.cplx as cplx\n'), ((33638, 33676), 'qucumber.cplx.VS_mult', 'cplx.VS_mult', (['temp', 'temp_hb_grad_phase'], {}), '(temp, temp_hb_grad_phase)\n', (33650, 33676), True, 'import qucumber.cplx as cplx\n'), ((36305, 36334), 'torch.randperm', 'torch.randperm', (['data.shape[0]'], {}), '(data.shape[0])\n', (36319, 36334), False, 'import torch\n'), ((39124, 39189), 'numpy.savetxt', 'np.savetxt', (['csvfile', 'trained_params[0]'], {'fmt': '"""%.5f"""', 'delimiter': '""","""'}), "(csvfile, trained_params[0], fmt='%.5f', delimiter=',')\n", (39134, 39189), True, 'import numpy as np\n'), ((39270, 39335), 'numpy.savetxt', 'np.savetxt', (['csvfile', 'trained_params[1]'], {'fmt': '"""%.5f"""', 'delimiter': '""","""'}), "(csvfile, trained_params[1], fmt='%.5f', delimiter=',')\n", (39280, 39335), True, 'import numpy as np\n'), ((39415, 39480), 'numpy.savetxt', 'np.savetxt', (['csvfile', 'trained_params[2]'], {'fmt': '"""%.5f"""', 'delimiter': '""","""'}), "(csvfile, trained_params[2], fmt='%.5f', delimiter=',')\n", (39425, 39480), True, 'import numpy as np\n'), ((39558, 39623), 'numpy.savetxt', 'np.savetxt', (['csvfile', 'trained_params[3]'], {'fmt': '"""%.5f"""', 'delimiter': '""","""'}), "(csvfile, trained_params[3], fmt='%.5f', delimiter=',')\n", (39568, 39623), True, 'import numpy as np\n'), ((39706, 39771), 'numpy.savetxt', 'np.savetxt', (['csvfile', 'trained_params[4]'], {'fmt': '"""%.5f"""', 'delimiter': '""","""'}), "(csvfile, trained_params[4], fmt='%.5f', delimiter=',')\n", (39716, 39771), True, 'import numpy as np\n'), ((39853, 39918), 'numpy.savetxt', 'np.savetxt', (['csvfile', 'trained_params[5]'], {'fmt': '"""%.5f"""', 'delimiter': '""","""'}), "(csvfile, trained_params[5], fmt='%.5f', delimiter=',')\n", (39863, 39918), True, 'import numpy as np\n'), ((46541, 46577), 'numpy.array_equal', 'np.array_equal', (['npstates[i]', 'npstate'], {}), '(npstates[i], npstate)\n', (46555, 46577), True, 'import numpy as np\n'), ((585, 610), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (608, 610), False, 'import torch\n'), ((841, 869), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (863, 869), False, 'import torch\n'), ((904, 927), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (921, 927), False, 'import torch\n'), ((1076, 1167), 'torch.zeros', 'torch.zeros', (['self.num_hidden', 'self.num_visible'], {'device': 'self.device', 'dtype': 'torch.double'}), '(self.num_hidden, self.num_visible, device=self.device, dtype=\n torch.double)\n', (1087, 1167), False, 'import torch\n'), ((14538, 14595), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': 'neg_batch_size', 'shuffle': '(True)'}), '(data, batch_size=neg_batch_size, shuffle=True)\n', (14548, 14595), False, 'from torch.utils.data import DataLoader\n'), ((31111, 31136), 'qucumber.cplx.scalar_mult', 'cplx.scalar_mult', (['U', 'temp'], {}), '(U, temp)\n', (31127, 31136), True, 'import qucumber.cplx as cplx\n'), ((31283, 31358), 'torch.nn.functional.linear', 'F.linear', (['constructed_state', 'self.rbm_amp.weights', 'self.rbm_amp.hidden_bias'], {}), '(constructed_state, self.rbm_amp.weights, self.rbm_amp.hidden_bias)\n', (31291, 31358), True, 'from torch.nn import functional as F\n'), ((31479, 31558), 'torch.nn.functional.linear', 'F.linear', (['constructed_state', 'self.rbm_phase.weights', 'self.rbm_phase.hidden_bias'], {}), '(constructed_state, self.rbm_phase.weights, self.rbm_phase.hidden_bias)\n', (31487, 31558), True, 'from torch.nn import functional as F\n'), ((35255, 35293), 'torch.tensor', 'torch.tensor', (['data'], {'dtype': 'torch.double'}), '(data, dtype=torch.double)\n', (35267, 35293), False, 'import torch\n'), ((1457, 1548), 'torch.randn', 'torch.randn', (['self.num_hidden', 'self.num_visible'], {'device': 'self.device', 'dtype': 'torch.double'}), '(self.num_hidden, self.num_visible, device=self.device, dtype=\n torch.double)\n', (1468, 1548), False, 'import torch\n'), ((1592, 1617), 'numpy.sqrt', 'np.sqrt', (['self.num_visible'], {}), '(self.num_visible)\n', (1599, 1617), True, 'import numpy as np\n'), ((3111, 3154), 'torch.nn.functional.linear', 'F.linear', (['v', 'self.weights', 'self.hidden_bias'], {}), '(v, self.weights, self.hidden_bias)\n', (3119, 3154), True, 'from torch.nn import functional as F\n'), ((6448, 6491), 'torch.Size', 'torch.Size', (['[num_samples, self.num_visible]'], {}), '([num_samples, self.num_visible])\n', (6458, 6491), False, 'import torch\n'), ((24243, 24303), 'torch.nn.functional.linear', 'F.linear', (['v0', 'self.rbm_amp.weights', 'self.rbm_amp.hidden_bias'], {}), '(v0, self.rbm_amp.weights, self.rbm_amp.hidden_bias)\n', (24251, 24303), True, 'from torch.nn import functional as F\n'), ((24385, 24424), 'torch.einsum', 'torch.einsum', (['"""i,j->ij"""', '(prob_amp, v0)'], {}), "('i,j->ij', (prob_amp, v0))\n", (24397, 24424), False, 'import torch\n'), ((42406, 42477), 'qucumber.cplx.inner_prod', 'cplx.inner_prod', (['elementof_rotated_true_psi', 'elementof_rotated_true_psi'], {}), '(elementof_rotated_true_psi, elementof_rotated_true_psi)\n', (42421, 42477), True, 'import qucumber.cplx as cplx\n'), ((42541, 42610), 'qucumber.cplx.inner_prod', 'cplx.inner_prod', (['elementof_rotated_RBM_psi', 'elementof_rotated_RBM_psi'], {}), '(elementof_rotated_RBM_psi, elementof_rotated_RBM_psi)\n', (42556, 42610), True, 'import qucumber.cplx as cplx\n'), ((31058, 31079), 'qucumber.cplx.compT_matrix', 'cplx.compT_matrix', (['aa'], {}), '(aa)\n', (31075, 31079), True, 'import qucumber.cplx as cplx\n'), ((42111, 42171), 'torch.tensor', 'torch.tensor', (['[rotated_RBM_psi[0][j], rotated_RBM_psi[1][j]]'], {}), '([rotated_RBM_psi[0][j], rotated_RBM_psi[1][j]])\n', (42123, 42171), False, 'import torch\n'), ((42250, 42312), 'torch.tensor', 'torch.tensor', (['[rotated_true_psi[0][j], rotated_true_psi[1][j]]'], {}), '([rotated_true_psi[0][j], rotated_true_psi[1][j]])\n', (42262, 42312), False, 'import torch\n'), ((42714, 42738), 'torch.log', 'torch.log', (['norm_true_psi'], {}), '(norm_true_psi)\n', (42723, 42738), False, 'import torch\n'), ((42818, 42841), 'torch.log', 'torch.log', (['norm_RBM_psi'], {}), '(norm_RBM_psi)\n', (42827, 42841), False, 'import torch\n')] |
#!/usr/bin/python
# coding: utf-8
import numpy as np
import netCDF4
import math
import sys
import time
import calendar
import datetime
import os
from math import pi
from numpy import cos, sin, arccos, power, sqrt, exp,arctan2
## Entrada
path_wrf = (sys.argv[1])
filename = (sys.argv[2])
lat1 = (sys.argv[3])
lon1 = (sys.argv[4])
gep = (sys.argv[5])
pic1 = (sys.argv[6])
pic2 = (sys.argv[7])
date2 = (sys.argv[8])
date3 = (sys.argv[9])
bk = 0
date0 = datetime.datetime.strptime(date2, '%Y%m%d%H')
date1 = datetime.datetime.strptime(date3, '%Y%m%d%H')
lat0 = float(lat1)
lon0 = float(lon1)
#utc0 = float(utc1)
def tunnel_fast(latvar,lonvar,lat0,lon0):
rad_factor = pi/180.0 # radianos
latvals = latvar[::] * rad_factor # latitude longitude ==> numpy arrays
lonvals = lonvar[::] * rad_factor
ny, nx, nz = latvals.shape
lat0_rad = lat0 * rad_factor
lon0_rad = lon0 * rad_factor
clat, clon = cos(latvals),cos(lonvals)
slat, slon = sin(latvals),sin(lonvals)
delX = cos(lat0_rad) * cos(lon0_rad) - clat * clon
delY = cos(lat0_rad) * sin(lon0_rad) - clat * slon
delZ = sin(lat0_rad) - slat
dist_sq = delX**2 + delY**2 + delZ**2
minindex_1d = dist_sq.argmin() # 1D index do elemento minimo
iz_min,ix_min,iy_min = np.unravel_index( minindex_1d, latvals.shape)
return iz_min,ix_min,iy_min
if (-28 <= lat0 <= -14) and (-54 <= lon0 <= -36):
diferenca = date1 - date0
d2 = date0 + datetime.timedelta(days = diferenca.days )
for i in range(0, diferenca.days):
d1 = date0 + datetime.timedelta(days = i)
data = d1.strftime('%Y%m%d%h')
data2 = d1.strftime('%Y-%m-%d_')
path = path_wrf + "/" + data + "/" + filename + gep + "_" + data2 + '00:00:00'
data_q = data
while os.path.isfile(path) != True:
i += 1
d1 = date0 + datetime.timedelta(days = i)
data = d1.strftime('%Y%m%d%H')
if d1 > d2:
bk = 1
break
data2 = d1.strftime('%Y-%m-%d_')
path = path_wrf + "/" + data + "/" + filename + gep + "_" + data2 + '00:00:00'
if bk == 1:
data_inexistente = "Data nao existe %s \n" % (data_q)
f = open('out/data_inexistente.txt', 'a')
f.write(data_inexistente)
f.close()
exit()
ncfile = netCDF4.Dataset(path, 'r')
# variaveis do netcdf
latvar = ncfile.variables['XLAT'] #latitude e longitude
lonvar = ncfile.variables['XLONG']
cu_chuva = ncfile.variables['RAINC']
scu_chuva = ncfile.variables['RAINNC']
time = ncfile.variables['Times']
tempk = ncfile.variables['T2']
press = ncfile.variables['PSFC']
q2 = ncfile.variables['Q2']
#indices das coordenandas
iz,ix,iy = tunnel_fast(latvar, lonvar, lat0, lon0)
max_i = len(time)
chuva = np.full((max_i, 1),-999.9)
chuva_c = np.full((max_i, 1),-999.9)
chuva_nc= np.full((max_i, 1),-999.9)
tempc = np.full((max_i, 1),-999.9)
pressao = np.full((max_i, 1),-999.9)
q_2 = np.full((max_i, 1),-999.9)
umidade = np.full((max_i, 1),-999.9)
teste1 = np.full((max_i+1, 1),-999.9)
teste2 = np.full((max_i+1, 1),-999.9)
teste3 = np.full((max_i+1, 1),-999.9)
teste4 = np.full((max_i+1, 1),-999.9)
teste5 = np.full((max_i+1, 1),-999.9)
teste6 = np.full((max_i+1, 1),-999.9)
teste7 = np.full((max_i+1, 1),-999.9)
teste8 = np.full((max_i+1, 1),-999.9)
saida = np.full((max_i+1, 1),-999.9)
if max_i < 14:
chu = np.full((14 + 1, 1),-999.9)
tma = np.full((14 + 1, 1),-999.9)
tmi = np.full((14 + 1, 1),-999.9)
uma = np.full((14 + 1, 1),-999.9)
umi = np.full((14 + 1, 1),-999.9)
else:
chu = np.full((max_i + 1, 1),-999.9)
tma = np.full((max_i + 1, 1),-999.9)
tmi = np.full((max_i + 1, 1),-999.9)
uma = np.full((max_i + 1, 1),-999.9)
umi = np.full((max_i + 1, 1),-999.9)
for i in range(0, max_i):
if i == 0:
chuva[i] = cu_chuva[i,ix,iy] + scu_chuva[i,ix,iy]
# chuva_c[i] = cu_chuva[i,ix,iy]
# chuva_nc[i] = scu_chuva[i,ix,iy]
# print '%s %s %4.2f mm lat:%f lon:%f' % (data, pic1, chuva[i], lat0, lon0)
else:
chuva[i] = (cu_chuva[i,ix,iy] + scu_chuva[i,ix,iy]) - (cu_chuva[i-4,ix,iy] - scu_chuva[i-4,ix,iy])
# chuva_c[i] = cu_chuva[i,ix,iy] - cu_chuva[i-1,ix,iy]
# chuva_nc[i] = scu_chuva[i,ix,iy] - scu_chuva[i-1,ix,iy]
tempc[i] = tempk[i,ix,iy] - 273.15
pressao[i] = press[i,ix,iz] / 100
q_2[i] = q2[i,ix,iz]
a5 = 17.2693882 * (tempc[i]) / (tempc[i] + (273.15 - 35.86))
umidade[i] = 100 * (q_2[i] / ((379.90516 /(pressao[i]*100 )) * exp(a5)))
# if chuva[i] > 0:
# print '%s %s %4.2f mm lat:%f lon:%f' % (data, pic1, chuva[i], lat0, lon0)
ncfile.close()
a=0
b=4
for i in range(0, max_i):
if i > 14:
break
if a < max_i:
chu[i] = chuva[a]
um = np.argmin(umidade[a:b])
umi[i] = umidade[um + a]
UM = np.argmax(umidade[a:b])
uma[i] = umidade[UM + a]
tm = np.argmin(tempc[a:b])
tmi[i] = tempc[tm + a]
TM = np.argmax(tempc[a:b])
tma[i] = tempc[TM + a]
teste1[i] = tma[i]
teste2[i] = tmi[i]
teste3[i] = uma[i]
teste4[i] = umi[i]
teste5[i] = pressao[i]
teste6[i] = chu[i]
teste7[i] = tempc[i]
teste8[i] = umidade[i]
# teste8[i] = TM
else:
chu[i] = -999.9
umi[i] = -999.9
uma[i] = -999.9
tmi[i] = -999.9
tma[i] = -999.9
teste1[i] = -999.9
teste2[i] = -999.9
teste3[i] = -999.9
teste4[i] = -999.9
teste5[i] = -999.9
teste6[i] = -999.9
teste7[i] = -999.9
teste8[i] = -999.9
a += 4
b += 4
pasta_de_saida = "/DATA/CROPNET/ENSEMBLE/WRFGEFS/" + data
arquivo_de_saida1 = "/DATA/CROPNET/ENSEMBLE/WRFGEFS/" + data + "/RAIN_" + gep + "_" + pic1 + "_" + pic2 + "_" + data + ".asc"
arquivo_de_saida2 = "/DATA/CROPNET/ENSEMBLE/WRFGEFS/" + data + "/URMI_" + gep + "_" + pic1 + "_" + pic2 + "_" + data + ".asc"
arquivo_de_saida3 = "/DATA/CROPNET/ENSEMBLE/WRFGEFS/" + data + "/URMA_" + gep + "_" + pic1 + "_" + pic2 + "_" + data + ".asc"
arquivo_de_saida4 = "/DATA/CROPNET/ENSEMBLE/WRFGEFS/" + data + "/TMAX_" + gep + "_" + pic1 + "_" + pic2 + "_" + data + ".asc"
arquivo_de_saida5 = "/DATA/CROPNET/ENSEMBLE/WRFGEFS/" + data + "/TMIN_" + gep + "_" + pic1 + "_" + pic2 + "_" + data + ".asc"
if os.path.isdir(pasta_de_saida) == True:
np.savetxt(arquivo_de_saida1, chu[1:15], fmt='%4.2f')
np.savetxt(arquivo_de_saida2, umi[1:15], fmt='%4.2f')
np.savetxt(arquivo_de_saida3, uma[1:15], fmt='%4.2f')
np.savetxt(arquivo_de_saida4, tma[1:15], fmt='%4.2f')
np.savetxt(arquivo_de_saida5, tmi[1:15], fmt='%4.2f')
else:
os.makedirs("/DATA/CROPNET/ENSEMBLE/WRFGEFS/" + data)
np.savetxt(arquivo_de_saida1, chu[1:15], fmt='%4.2f')
np.savetxt(arquivo_de_saida2, umi[1:15], fmt='%4.2f')
np.savetxt(arquivo_de_saida3, uma[1:15], fmt='%4.2f')
np.savetxt(arquivo_de_saida4, tma[1:15], fmt='%4.2f')
np.savetxt(arquivo_de_saida5, tmi[1:15], fmt='%4.2f')
np.savetxt('out/temp_max_wrf.txt', teste1[1:15], fmt='%4.2f')
np.savetxt('out/temp_min_wrf.txt', teste2[1:15], fmt='%4.2f')
np.savetxt('out/umidade_max_wrf.txt', teste3[1:15], fmt='%4.2f')
np.savetxt('out/umidade_min_wrf.txt', teste4[1:15], fmt='%4.2f')
np.savetxt('out/pressao_wrf.txt', teste5[1:15], fmt='%4.2f')
np.savetxt('out/chuva_wrf.txt', teste6[1:15], fmt='%4.2f')
np.savetxt('out/temperatura_wrf.txt', teste7[1:15], fmt='%4.2f')
np.savetxt('out/umidade_wrf.txt', teste8[1:15], fmt='%4.2f')
else:
out_of_range = '%s lat:%f lon:%f Out of Range\n' % (pic1, lat0, lon0)
f = open('out/out_of_range.txt', 'a')
f.write(out_of_range)
f.close()
exit()
| [
"os.makedirs",
"datetime.datetime.strptime",
"netCDF4.Dataset",
"numpy.argmax",
"os.path.isfile",
"numpy.exp",
"os.path.isdir",
"numpy.cos",
"numpy.unravel_index",
"numpy.savetxt",
"numpy.sin",
"numpy.full",
"datetime.timedelta",
"numpy.argmin"
] | [((453, 498), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date2', '"""%Y%m%d%H"""'], {}), "(date2, '%Y%m%d%H')\n", (479, 498), False, 'import datetime\n'), ((507, 552), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date3', '"""%Y%m%d%H"""'], {}), "(date3, '%Y%m%d%H')\n", (533, 552), False, 'import datetime\n'), ((1266, 1310), 'numpy.unravel_index', 'np.unravel_index', (['minindex_1d', 'latvals.shape'], {}), '(minindex_1d, latvals.shape)\n', (1282, 1310), True, 'import numpy as np\n'), ((919, 931), 'numpy.cos', 'cos', (['latvals'], {}), '(latvals)\n', (922, 931), False, 'from numpy import cos, sin, arccos, power, sqrt, exp, arctan2\n'), ((932, 944), 'numpy.cos', 'cos', (['lonvals'], {}), '(lonvals)\n', (935, 944), False, 'from numpy import cos, sin, arccos, power, sqrt, exp, arctan2\n'), ((962, 974), 'numpy.sin', 'sin', (['latvals'], {}), '(latvals)\n', (965, 974), False, 'from numpy import cos, sin, arccos, power, sqrt, exp, arctan2\n'), ((975, 987), 'numpy.sin', 'sin', (['lonvals'], {}), '(lonvals)\n', (978, 987), False, 'from numpy import cos, sin, arccos, power, sqrt, exp, arctan2\n'), ((1109, 1122), 'numpy.sin', 'sin', (['lat0_rad'], {}), '(lat0_rad)\n', (1112, 1122), False, 'from numpy import cos, sin, arccos, power, sqrt, exp, arctan2\n'), ((1436, 1475), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'diferenca.days'}), '(days=diferenca.days)\n', (1454, 1475), False, 'import datetime\n'), ((2185, 2211), 'netCDF4.Dataset', 'netCDF4.Dataset', (['path', '"""r"""'], {}), "(path, 'r')\n", (2200, 2211), False, 'import netCDF4\n'), ((2654, 2681), 'numpy.full', 'np.full', (['(max_i, 1)', '(-999.9)'], {}), '((max_i, 1), -999.9)\n', (2661, 2681), True, 'import numpy as np\n'), ((2693, 2720), 'numpy.full', 'np.full', (['(max_i, 1)', '(-999.9)'], {}), '((max_i, 1), -999.9)\n', (2700, 2720), True, 'import numpy as np\n'), ((2732, 2759), 'numpy.full', 'np.full', (['(max_i, 1)', '(-999.9)'], {}), '((max_i, 1), -999.9)\n', (2739, 2759), True, 'import numpy as np\n'), ((2769, 2796), 'numpy.full', 'np.full', (['(max_i, 1)', '(-999.9)'], {}), '((max_i, 1), -999.9)\n', (2776, 2796), True, 'import numpy as np\n'), ((2808, 2835), 'numpy.full', 'np.full', (['(max_i, 1)', '(-999.9)'], {}), '((max_i, 1), -999.9)\n', (2815, 2835), True, 'import numpy as np\n'), ((2843, 2870), 'numpy.full', 'np.full', (['(max_i, 1)', '(-999.9)'], {}), '((max_i, 1), -999.9)\n', (2850, 2870), True, 'import numpy as np\n'), ((2882, 2909), 'numpy.full', 'np.full', (['(max_i, 1)', '(-999.9)'], {}), '((max_i, 1), -999.9)\n', (2889, 2909), True, 'import numpy as np\n'), ((2920, 2951), 'numpy.full', 'np.full', (['(max_i + 1, 1)', '(-999.9)'], {}), '((max_i + 1, 1), -999.9)\n', (2927, 2951), True, 'import numpy as np\n'), ((2960, 2991), 'numpy.full', 'np.full', (['(max_i + 1, 1)', '(-999.9)'], {}), '((max_i + 1, 1), -999.9)\n', (2967, 2991), True, 'import numpy as np\n'), ((3000, 3031), 'numpy.full', 'np.full', (['(max_i + 1, 1)', '(-999.9)'], {}), '((max_i + 1, 1), -999.9)\n', (3007, 3031), True, 'import numpy as np\n'), ((3040, 3071), 'numpy.full', 'np.full', (['(max_i + 1, 1)', '(-999.9)'], {}), '((max_i + 1, 1), -999.9)\n', (3047, 3071), True, 'import numpy as np\n'), ((3080, 3111), 'numpy.full', 'np.full', (['(max_i + 1, 1)', '(-999.9)'], {}), '((max_i + 1, 1), -999.9)\n', (3087, 3111), True, 'import numpy as np\n'), ((3120, 3151), 'numpy.full', 'np.full', (['(max_i + 1, 1)', '(-999.9)'], {}), '((max_i + 1, 1), -999.9)\n', (3127, 3151), True, 'import numpy as np\n'), ((3160, 3191), 'numpy.full', 'np.full', (['(max_i + 1, 1)', '(-999.9)'], {}), '((max_i + 1, 1), -999.9)\n', (3167, 3191), True, 'import numpy as np\n'), ((3200, 3231), 'numpy.full', 'np.full', (['(max_i + 1, 1)', '(-999.9)'], {}), '((max_i + 1, 1), -999.9)\n', (3207, 3231), True, 'import numpy as np\n'), ((3239, 3270), 'numpy.full', 'np.full', (['(max_i + 1, 1)', '(-999.9)'], {}), '((max_i + 1, 1), -999.9)\n', (3246, 3270), True, 'import numpy as np\n'), ((6783, 6844), 'numpy.savetxt', 'np.savetxt', (['"""out/temp_max_wrf.txt"""', 'teste1[1:15]'], {'fmt': '"""%4.2f"""'}), "('out/temp_max_wrf.txt', teste1[1:15], fmt='%4.2f')\n", (6793, 6844), True, 'import numpy as np\n'), ((6847, 6908), 'numpy.savetxt', 'np.savetxt', (['"""out/temp_min_wrf.txt"""', 'teste2[1:15]'], {'fmt': '"""%4.2f"""'}), "('out/temp_min_wrf.txt', teste2[1:15], fmt='%4.2f')\n", (6857, 6908), True, 'import numpy as np\n'), ((6911, 6975), 'numpy.savetxt', 'np.savetxt', (['"""out/umidade_max_wrf.txt"""', 'teste3[1:15]'], {'fmt': '"""%4.2f"""'}), "('out/umidade_max_wrf.txt', teste3[1:15], fmt='%4.2f')\n", (6921, 6975), True, 'import numpy as np\n'), ((6978, 7042), 'numpy.savetxt', 'np.savetxt', (['"""out/umidade_min_wrf.txt"""', 'teste4[1:15]'], {'fmt': '"""%4.2f"""'}), "('out/umidade_min_wrf.txt', teste4[1:15], fmt='%4.2f')\n", (6988, 7042), True, 'import numpy as np\n'), ((7045, 7105), 'numpy.savetxt', 'np.savetxt', (['"""out/pressao_wrf.txt"""', 'teste5[1:15]'], {'fmt': '"""%4.2f"""'}), "('out/pressao_wrf.txt', teste5[1:15], fmt='%4.2f')\n", (7055, 7105), True, 'import numpy as np\n'), ((7108, 7166), 'numpy.savetxt', 'np.savetxt', (['"""out/chuva_wrf.txt"""', 'teste6[1:15]'], {'fmt': '"""%4.2f"""'}), "('out/chuva_wrf.txt', teste6[1:15], fmt='%4.2f')\n", (7118, 7166), True, 'import numpy as np\n'), ((7169, 7233), 'numpy.savetxt', 'np.savetxt', (['"""out/temperatura_wrf.txt"""', 'teste7[1:15]'], {'fmt': '"""%4.2f"""'}), "('out/temperatura_wrf.txt', teste7[1:15], fmt='%4.2f')\n", (7179, 7233), True, 'import numpy as np\n'), ((7236, 7296), 'numpy.savetxt', 'np.savetxt', (['"""out/umidade_wrf.txt"""', 'teste8[1:15]'], {'fmt': '"""%4.2f"""'}), "('out/umidade_wrf.txt', teste8[1:15], fmt='%4.2f')\n", (7246, 7296), True, 'import numpy as np\n'), ((999, 1012), 'numpy.cos', 'cos', (['lat0_rad'], {}), '(lat0_rad)\n', (1002, 1012), False, 'from numpy import cos, sin, arccos, power, sqrt, exp, arctan2\n'), ((1015, 1028), 'numpy.cos', 'cos', (['lon0_rad'], {}), '(lon0_rad)\n', (1018, 1028), False, 'from numpy import cos, sin, arccos, power, sqrt, exp, arctan2\n'), ((1054, 1067), 'numpy.cos', 'cos', (['lat0_rad'], {}), '(lat0_rad)\n', (1057, 1067), False, 'from numpy import cos, sin, arccos, power, sqrt, exp, arctan2\n'), ((1070, 1083), 'numpy.sin', 'sin', (['lon0_rad'], {}), '(lon0_rad)\n', (1073, 1083), False, 'from numpy import cos, sin, arccos, power, sqrt, exp, arctan2\n'), ((1531, 1557), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'i'}), '(days=i)\n', (1549, 1557), False, 'import datetime\n'), ((1733, 1753), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1747, 1753), False, 'import os\n'), ((3294, 3322), 'numpy.full', 'np.full', (['(14 + 1, 1)', '(-999.9)'], {}), '((14 + 1, 1), -999.9)\n', (3301, 3322), True, 'import numpy as np\n'), ((3331, 3359), 'numpy.full', 'np.full', (['(14 + 1, 1)', '(-999.9)'], {}), '((14 + 1, 1), -999.9)\n', (3338, 3359), True, 'import numpy as np\n'), ((3368, 3396), 'numpy.full', 'np.full', (['(14 + 1, 1)', '(-999.9)'], {}), '((14 + 1, 1), -999.9)\n', (3375, 3396), True, 'import numpy as np\n'), ((3405, 3433), 'numpy.full', 'np.full', (['(14 + 1, 1)', '(-999.9)'], {}), '((14 + 1, 1), -999.9)\n', (3412, 3433), True, 'import numpy as np\n'), ((3442, 3470), 'numpy.full', 'np.full', (['(14 + 1, 1)', '(-999.9)'], {}), '((14 + 1, 1), -999.9)\n', (3449, 3470), True, 'import numpy as np\n'), ((3487, 3518), 'numpy.full', 'np.full', (['(max_i + 1, 1)', '(-999.9)'], {}), '((max_i + 1, 1), -999.9)\n', (3494, 3518), True, 'import numpy as np\n'), ((3527, 3558), 'numpy.full', 'np.full', (['(max_i + 1, 1)', '(-999.9)'], {}), '((max_i + 1, 1), -999.9)\n', (3534, 3558), True, 'import numpy as np\n'), ((3567, 3598), 'numpy.full', 'np.full', (['(max_i + 1, 1)', '(-999.9)'], {}), '((max_i + 1, 1), -999.9)\n', (3574, 3598), True, 'import numpy as np\n'), ((3607, 3638), 'numpy.full', 'np.full', (['(max_i + 1, 1)', '(-999.9)'], {}), '((max_i + 1, 1), -999.9)\n', (3614, 3638), True, 'import numpy as np\n'), ((3647, 3678), 'numpy.full', 'np.full', (['(max_i + 1, 1)', '(-999.9)'], {}), '((max_i + 1, 1), -999.9)\n', (3654, 3678), True, 'import numpy as np\n'), ((6105, 6134), 'os.path.isdir', 'os.path.isdir', (['pasta_de_saida'], {}), '(pasta_de_saida)\n', (6118, 6134), False, 'import os\n'), ((6147, 6200), 'numpy.savetxt', 'np.savetxt', (['arquivo_de_saida1', 'chu[1:15]'], {'fmt': '"""%4.2f"""'}), "(arquivo_de_saida1, chu[1:15], fmt='%4.2f')\n", (6157, 6200), True, 'import numpy as np\n'), ((6204, 6257), 'numpy.savetxt', 'np.savetxt', (['arquivo_de_saida2', 'umi[1:15]'], {'fmt': '"""%4.2f"""'}), "(arquivo_de_saida2, umi[1:15], fmt='%4.2f')\n", (6214, 6257), True, 'import numpy as np\n'), ((6261, 6314), 'numpy.savetxt', 'np.savetxt', (['arquivo_de_saida3', 'uma[1:15]'], {'fmt': '"""%4.2f"""'}), "(arquivo_de_saida3, uma[1:15], fmt='%4.2f')\n", (6271, 6314), True, 'import numpy as np\n'), ((6318, 6371), 'numpy.savetxt', 'np.savetxt', (['arquivo_de_saida4', 'tma[1:15]'], {'fmt': '"""%4.2f"""'}), "(arquivo_de_saida4, tma[1:15], fmt='%4.2f')\n", (6328, 6371), True, 'import numpy as np\n'), ((6375, 6428), 'numpy.savetxt', 'np.savetxt', (['arquivo_de_saida5', 'tmi[1:15]'], {'fmt': '"""%4.2f"""'}), "(arquivo_de_saida5, tmi[1:15], fmt='%4.2f')\n", (6385, 6428), True, 'import numpy as np\n'), ((6440, 6493), 'os.makedirs', 'os.makedirs', (["('/DATA/CROPNET/ENSEMBLE/WRFGEFS/' + data)"], {}), "('/DATA/CROPNET/ENSEMBLE/WRFGEFS/' + data)\n", (6451, 6493), False, 'import os\n'), ((6498, 6551), 'numpy.savetxt', 'np.savetxt', (['arquivo_de_saida1', 'chu[1:15]'], {'fmt': '"""%4.2f"""'}), "(arquivo_de_saida1, chu[1:15], fmt='%4.2f')\n", (6508, 6551), True, 'import numpy as np\n'), ((6555, 6608), 'numpy.savetxt', 'np.savetxt', (['arquivo_de_saida2', 'umi[1:15]'], {'fmt': '"""%4.2f"""'}), "(arquivo_de_saida2, umi[1:15], fmt='%4.2f')\n", (6565, 6608), True, 'import numpy as np\n'), ((6612, 6665), 'numpy.savetxt', 'np.savetxt', (['arquivo_de_saida3', 'uma[1:15]'], {'fmt': '"""%4.2f"""'}), "(arquivo_de_saida3, uma[1:15], fmt='%4.2f')\n", (6622, 6665), True, 'import numpy as np\n'), ((6669, 6722), 'numpy.savetxt', 'np.savetxt', (['arquivo_de_saida4', 'tma[1:15]'], {'fmt': '"""%4.2f"""'}), "(arquivo_de_saida4, tma[1:15], fmt='%4.2f')\n", (6679, 6722), True, 'import numpy as np\n'), ((6726, 6779), 'numpy.savetxt', 'np.savetxt', (['arquivo_de_saida5', 'tmi[1:15]'], {'fmt': '"""%4.2f"""'}), "(arquivo_de_saida5, tmi[1:15], fmt='%4.2f')\n", (6736, 6779), True, 'import numpy as np\n'), ((1789, 1815), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'i'}), '(days=i)\n', (1807, 1815), False, 'import datetime\n'), ((4629, 4652), 'numpy.argmin', 'np.argmin', (['umidade[a:b]'], {}), '(umidade[a:b])\n', (4638, 4652), True, 'import numpy as np\n'), ((4691, 4714), 'numpy.argmax', 'np.argmax', (['umidade[a:b]'], {}), '(umidade[a:b])\n', (4700, 4714), True, 'import numpy as np\n'), ((4753, 4774), 'numpy.argmin', 'np.argmin', (['tempc[a:b]'], {}), '(tempc[a:b])\n', (4762, 4774), True, 'import numpy as np\n'), ((4811, 4832), 'numpy.argmax', 'np.argmax', (['tempc[a:b]'], {}), '(tempc[a:b])\n', (4820, 4832), True, 'import numpy as np\n'), ((4389, 4396), 'numpy.exp', 'exp', (['a5'], {}), '(a5)\n', (4392, 4396), False, 'from numpy import cos, sin, arccos, power, sqrt, exp, arctan2\n')] |
#
# Data generator for training the SELDnet
#
import os
import numpy as np
import cls_feature_class
from IPython import embed
from collections import deque
import random
import parameter
class DataGenerator(object):
def __init__(
self, datagen_mode='train', dataset='resim', ov=1, ov_num=1, split=1, db=30, batch_size=32, seq_len=64,
shuffle=True, nfft=512, classifier_mode='regr', weakness=0, cnn3d=False, xyz_def_zero=False, extra_name='',
azi_only=False
):
self._datagen_mode = datagen_mode
self._classifier_mode = classifier_mode
self._batch_size = batch_size
self._seq_len = seq_len
self._shuffle = shuffle
self._split = split;
self._ov_num = ov_num;
self._feat_cls = cls_feature_class.FeatureClass(dataset=dataset, ov=ov, split=split, db=db, nfft=nfft)
self._label_dir = self._feat_cls.get_label_dir(classifier_mode, weakness, extra_name)
self._feat_dir = self._feat_cls.get_normalized_feat_dir(extra_name)
self._thickness = weakness
self._xyz_def_zero = xyz_def_zero
self._azi_only = azi_only
self._filenames_list = list()
self._nb_frames_file = None # Assuming number of frames in feat files are the same
self._feat_len = None
self._2_nb_ch = 8
self._label_len = None # total length of label - DOA + SED
self._doa_len = None # DOA label length
self._class_dict = self._feat_cls.get_classes()
self._nb_classes = len(self._class_dict.keys())
self._default_azi, self._default_ele = self._feat_cls.get_default_azi_ele_regr()
self._is_cnn3d_model = cnn3d
self._get_label_filenames_sizes()
self._batch_seq_len = self._batch_size*self._seq_len
self._circ_buf_feat = None
self._circ_buf_label = None
self._nb_total_batches = int(np.floor((len(self._filenames_list) * self._nb_frames_file /
float(self._seq_len * self._batch_size))))
print(
'Datagen_mode: {}, nb_files: {}, nb_classes:{}\n'
'nb_frames_file: {}, feat_len: {}, nb_ch: {}, label_len:{}\n'.format(
self._datagen_mode, len(self._filenames_list), self._nb_classes,
self._nb_frames_file, self._feat_len, self._2_nb_ch, self._label_len
)
)
print(
'Dataset: {}, ov: {}, split: {}\n'
'batch_size: {}, seq_len: {}, shuffle: {}\n'
'label_dir: {}\n '
'feat_dir: {}\n'.format(
dataset, ov, split,
self._batch_size, self._seq_len, self._shuffle,
self._label_dir, self._feat_dir
)
)
def get_data_sizes(self):
feat_shape = (self._batch_size, self._2_nb_ch, self._seq_len, self._feat_len)
label_shape = [
(self._batch_size, self._seq_len, self._nb_classes),
(self._batch_size, self._seq_len, self._nb_classes*(2 if self._azi_only else 3))
]
return feat_shape, label_shape
def get_total_batches_in_data(self):
return self._nb_total_batches
def _get_label_filenames_sizes(self):
#for filename in os.listdir(self._label_dir):
# if self._datagen_mode in filename:
# self._filenames_list.append(filename)
#1 stands for default configuration
_params = parameter.get_params('1')
cnt_train = 0
cnt_test = 0
for filename in os.listdir(self._label_dir):
if self._datagen_mode == "train":
for split_n in _params["train_split"]:
if "split"+str(split_n) in filename:
self._filenames_list.append(filename)
print("TRAIN " + str(cnt_train) + ": "+filename)
cnt_train = cnt_train+1
elif self._datagen_mode == "validation":
if "split"+str(self._split) in filename:
self._filenames_list.append(filename)
print("VALID " + str(cnt_test) + ": "+filename)
cnt_test = cnt_test+1
else:
if ("split"+str(self._split) in filename) and ("ov"+str(self._ov_num) in filename):
self._filenames_list.append(filename)
print("TEST " + str(cnt_test) + ": "+filename)
cnt_test = cnt_test+1
temp_feat = np.load(os.path.join(self._feat_dir, self._filenames_list[0]))
self._nb_frames_file = temp_feat.shape[0]
self._feat_len = int(temp_feat.shape[1] / self._2_nb_ch)
temp_label = np.load(os.path.join(self._label_dir, self._filenames_list[0]))
self._label_len = temp_label.shape[-1]
self._doa_len = (self._label_len - self._nb_classes)/self._nb_classes
return
def generate(self):
"""
Generates batches of samples
:return:
"""
while 1:
if self._shuffle:
random.shuffle(self._filenames_list)
# Ideally this should have been outside the while loop. But while generating the test data we want the data
# to be the same exactly for all epoch's hence we keep it here.
self._circ_buf_feat = deque()
self._circ_buf_label = deque()
file_cnt = 0
for i in range(self._nb_total_batches):
# load feat and label to circular buffer. Always maintain atleast one batch worth feat and label in the
# circular buffer. If not keep refilling it.
while len(self._circ_buf_feat) < int(self._batch_seq_len):
temp_feat = np.load(os.path.join(self._feat_dir, self._filenames_list[file_cnt]))
temp_label = np.load(os.path.join(self._label_dir, self._filenames_list[file_cnt]))
for row_cnt, row in enumerate(temp_feat):
self._circ_buf_feat.append(row)
self._circ_buf_label.append(temp_label[row_cnt])
file_cnt = file_cnt + 1
# Read one batch size from the circular buffer
feat = np.zeros((int(self._batch_seq_len), int(self._feat_len) * int(self._2_nb_ch)))
label = np.zeros((int(self._batch_seq_len), int(self._label_len)))
for j in range(self._batch_seq_len):
if self._circ_buf_feat:
feat[j, :] = self._circ_buf_feat.popleft()
if self._circ_buf_label:
label[j, :] = self._circ_buf_label.popleft()
feat = np.reshape(feat, (int(self._batch_seq_len), int(self._feat_len), int(self._2_nb_ch)))
# Split to sequences
feat = self._split_in_seqs(feat)
#feat = np.transpose(feat, (0, 3, 1, 2))
label = self._split_in_seqs(label)
if self._azi_only:
# Get Cartesian coordinates from azi/ele
azi_rad = label[:, :, self._nb_classes:2 * self._nb_classes] * np.pi / 180
x = np.cos(azi_rad)
y = np.sin(azi_rad)
# Set default Cartesian x,y,z coordinates to 0,0,0
if self._xyz_def_zero:
no_ele_ind = np.where(label[:, :, 2 * self._nb_classes:] == self._default_ele)
x[no_ele_ind] = 0
y[no_ele_ind] = 0
label = [
label[:, :, :self._nb_classes], # SED labels
np.concatenate((x, y), -1) # DOA Cartesian labels
]
else:
# Get Cartesian coordinates from azi/ele
azi_rad = label[:, :, self._nb_classes:2 * self._nb_classes] * np.pi / 180
ele_rad = label[:, :, 2 * self._nb_classes:] * np.pi / 180
tmp_label = np.cos(ele_rad)
x = np.cos(azi_rad) * tmp_label
y = np.sin(azi_rad) * tmp_label
z = np.sin(ele_rad)
# Set default Cartesian x,y,z coordinates to 0,0,0
if self._xyz_def_zero:
no_ele_ind = np.where(label[:, :, 2 * self._nb_classes:] == self._default_ele)
x[no_ele_ind] = 0
z[no_ele_ind] = 0
y[no_ele_ind] = 0
label = [
label[:, :, :self._nb_classes], # SED labels
np.concatenate((x, y, z), -1) # DOA Cartesian labels
]
yield feat, label
def _split_in_seqs(self, data):
if len(data.shape) == 1:
if data.shape[0] % self._seq_len:
data = data[:-(data.shape[0] % self._seq_len), :]
data = data.reshape((data.shape[0] // self._seq_len, int(self._seq_len), 1))
elif len(data.shape) == 2:
if data.shape[0] % self._seq_len:
data = data[:-(data.shape[0] % self._seq_len), :]
data = data.reshape((data.shape[0] // self._seq_len, int(self._seq_len), data.shape[1]))
elif len(data.shape) == 3:
if data.shape[0] % int(self._seq_len):
data = data[:-(data.shape[0] % self._seq_len), :, :]
data = data.reshape((data.shape[0] // self._seq_len, int(self._seq_len), data.shape[1], data.shape[2]))
else:
print('ERROR: Unknown data dimensions: {}'.format(data.shape))
exit()
return data
@staticmethod
def split_multi_channels(data, num_channels):
tmp = None
in_shape = data.shape
if len(in_shape) == 3:
hop = in_shape[2] / num_channels
tmp = np.zeros((in_shape[0], num_channels, in_shape[1], hop))
for i in range(num_channels):
tmp[:, i, :, :] = data[:, :, i * hop:(i + 1) * hop]
elif len(in_shape) == 4 and num_channels == 1:
tmp = np.zeros((in_shape[0], 1, in_shape[1], in_shape[2], in_shape[3]))
tmp[:, 0, :, :, :] = data
else:
print('ERROR: The input should be a 3D matrix but it seems to have dimensions: {}'.format(in_shape))
exit()
return tmp
def get_list_index(self, azi, ele):
return self._feat_cls.get_list_index(azi, ele)
def get_matrix_index(self, ind):
return np.array(self._feat_cls.get_vector_index(ind))
def get_nb_classes(self):
return self._nb_classes
def nb_frames_1s(self):
return self._feat_cls.nb_frames_1s()
| [
"os.listdir",
"collections.deque",
"random.shuffle",
"numpy.where",
"os.path.join",
"numpy.zeros",
"numpy.cos",
"numpy.concatenate",
"numpy.sin",
"parameter.get_params",
"cls_feature_class.FeatureClass"
] | [((784, 873), 'cls_feature_class.FeatureClass', 'cls_feature_class.FeatureClass', ([], {'dataset': 'dataset', 'ov': 'ov', 'split': 'split', 'db': 'db', 'nfft': 'nfft'}), '(dataset=dataset, ov=ov, split=split, db=db,\n nfft=nfft)\n', (814, 873), False, 'import cls_feature_class\n'), ((3477, 3502), 'parameter.get_params', 'parameter.get_params', (['"""1"""'], {}), "('1')\n", (3497, 3502), False, 'import parameter\n'), ((3571, 3598), 'os.listdir', 'os.listdir', (['self._label_dir'], {}), '(self._label_dir)\n', (3581, 3598), False, 'import os\n'), ((4535, 4588), 'os.path.join', 'os.path.join', (['self._feat_dir', 'self._filenames_list[0]'], {}), '(self._feat_dir, self._filenames_list[0])\n', (4547, 4588), False, 'import os\n'), ((4735, 4789), 'os.path.join', 'os.path.join', (['self._label_dir', 'self._filenames_list[0]'], {}), '(self._label_dir, self._filenames_list[0])\n', (4747, 4789), False, 'import os\n'), ((5367, 5374), 'collections.deque', 'deque', ([], {}), '()\n', (5372, 5374), False, 'from collections import deque\n'), ((5410, 5417), 'collections.deque', 'deque', ([], {}), '()\n', (5415, 5417), False, 'from collections import deque\n'), ((9971, 10026), 'numpy.zeros', 'np.zeros', (['(in_shape[0], num_channels, in_shape[1], hop)'], {}), '((in_shape[0], num_channels, in_shape[1], hop))\n', (9979, 10026), True, 'import numpy as np\n'), ((5099, 5135), 'random.shuffle', 'random.shuffle', (['self._filenames_list'], {}), '(self._filenames_list)\n', (5113, 5135), False, 'import random\n'), ((10210, 10275), 'numpy.zeros', 'np.zeros', (['(in_shape[0], 1, in_shape[1], in_shape[2], in_shape[3])'], {}), '((in_shape[0], 1, in_shape[1], in_shape[2], in_shape[3]))\n', (10218, 10275), True, 'import numpy as np\n'), ((7243, 7258), 'numpy.cos', 'np.cos', (['azi_rad'], {}), '(azi_rad)\n', (7249, 7258), True, 'import numpy as np\n'), ((7283, 7298), 'numpy.sin', 'np.sin', (['azi_rad'], {}), '(azi_rad)\n', (7289, 7298), True, 'import numpy as np\n'), ((8093, 8108), 'numpy.cos', 'np.cos', (['ele_rad'], {}), '(ele_rad)\n', (8099, 8108), True, 'import numpy as np\n'), ((8238, 8253), 'numpy.sin', 'np.sin', (['ele_rad'], {}), '(ele_rad)\n', (8244, 8253), True, 'import numpy as np\n'), ((5794, 5854), 'os.path.join', 'os.path.join', (['self._feat_dir', 'self._filenames_list[file_cnt]'], {}), '(self._feat_dir, self._filenames_list[file_cnt])\n', (5806, 5854), False, 'import os\n'), ((5897, 5958), 'os.path.join', 'os.path.join', (['self._label_dir', 'self._filenames_list[file_cnt]'], {}), '(self._label_dir, self._filenames_list[file_cnt])\n', (5909, 5958), False, 'import os\n'), ((7451, 7516), 'numpy.where', 'np.where', (['(label[:, :, 2 * self._nb_classes:] == self._default_ele)'], {}), '(label[:, :, 2 * self._nb_classes:] == self._default_ele)\n', (7459, 7516), True, 'import numpy as np\n'), ((7726, 7752), 'numpy.concatenate', 'np.concatenate', (['(x, y)', '(-1)'], {}), '((x, y), -1)\n', (7740, 7752), True, 'import numpy as np\n'), ((8134, 8149), 'numpy.cos', 'np.cos', (['azi_rad'], {}), '(azi_rad)\n', (8140, 8149), True, 'import numpy as np\n'), ((8186, 8201), 'numpy.sin', 'np.sin', (['azi_rad'], {}), '(azi_rad)\n', (8192, 8201), True, 'import numpy as np\n'), ((8406, 8471), 'numpy.where', 'np.where', (['(label[:, :, 2 * self._nb_classes:] == self._default_ele)'], {}), '(label[:, :, 2 * self._nb_classes:] == self._default_ele)\n', (8414, 8471), True, 'import numpy as np\n'), ((8723, 8752), 'numpy.concatenate', 'np.concatenate', (['(x, y, z)', '(-1)'], {}), '((x, y, z), -1)\n', (8737, 8752), True, 'import numpy as np\n')] |
import math
import numpy as np
from gym import spaces
import furuta_env_torque as fet
import common as cm
class FurutaEnvTorquePpo2(fet.FurutaEnvTorque):
def __init__(self, state, render=False):
super(FurutaEnvTorquePpo2, self).__init__(state=state, action_space=spaces.Box(np.array([-1]), np.array([1])), render=render)
def decodeAction(self, action):
return action
def compute_reward(self, action, done=None):
return math.cos(abs(cm.rad2Norm(self.pole_angle_real))) - abs(self.decodeAction(action)) * 0.0001
| [
"numpy.array",
"common.rad2Norm"
] | [((290, 304), 'numpy.array', 'np.array', (['[-1]'], {}), '([-1])\n', (298, 304), True, 'import numpy as np\n'), ((306, 319), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (314, 319), True, 'import numpy as np\n'), ((482, 515), 'common.rad2Norm', 'cm.rad2Norm', (['self.pole_angle_real'], {}), '(self.pole_angle_real)\n', (493, 515), True, 'import common as cm\n')] |
from flow.envs.base_env import SumoEnvironment
from flow.core import rewards
from flow.controllers.car_following_models import *
from gym.spaces.box import Box
from gym.spaces.discrete import Discrete
from gym.spaces.tuple_space import Tuple
import numpy as np
class SimpleLaneChangingAccelerationEnvironment(SumoEnvironment):
"""
Fully functional environment for multi lane closed loop settings. Takes in
an *acceleration* and *lane-change* as an action. Reward function is
negative norm of the difference between the velocities of each vehicle, and
the target velocity. State function is a vector of the velocities and
absolute positions for each vehicle.
"""
@property
def action_space(self):
"""
See parent class
Actions are:
- a (continuous) acceleration from max-deacc to max-acc
- a (continuous) lane-change action from -1 to 1, used to determine the
lateral direction the vehicle will take.
"""
max_deacc = self.env_params.max_deacc
max_acc = self.env_params.max_acc
lb = [-abs(max_deacc), -1] * self.vehicles.num_rl_vehicles
ub = [max_acc, 1] * self.vehicles.num_rl_vehicles
return Box(np.array(lb), np.array(ub))
@property
def observation_space(self):
"""
See parent class
An observation consists of the velocity, absolute position, and lane
index of each vehicle in the fleet
"""
speed = Box(low=-np.inf, high=np.inf, shape=(self.vehicles.num_vehicles,))
lane = Box(low=0, high=self.scenario.lanes-1, shape=(self.vehicles.num_vehicles,))
absolute_pos = Box(low=0., high=np.inf, shape=(self.vehicles.num_vehicles,))
return Tuple((speed, absolute_pos, lane))
def compute_reward(self, state, rl_actions, **kwargs):
"""
See parent class
The reward function is negative norm of the difference between the
velocities of each vehicle, and the target velocity. Also, a small
penalty is added for rl lane changes in order to encourage mimizing
lane-changing action.
"""
# compute the system-level performance of vehicles from a velocity
# perspective
reward = rewards.desired_velocity(self, fail=kwargs["fail"])
# punish excessive lane changes by reducing the reward by a set value
# every time an rl car changes lanes
for veh_id in self.rl_ids:
if self.vehicles.get_state(veh_id, "last_lc") == self.timer:
reward -= 1
return reward
def get_state(self):
"""
See parent class
The state is an array the velocities, absolute positions, and lane
numbers for each vehicle.
"""
return np.array([[self.vehicles.get_speed(veh_id),
self.vehicles.get_absolute_position(veh_id),
self.vehicles.get_lane(veh_id)]
for veh_id in self.sorted_ids])
def apply_rl_actions(self, actions):
"""
See parent class
Takes a tuple and applies a lane change or acceleration. if a lane
change is applied, don't issue any commands for the duration of the lane
change and return negative rewards for actions during that lane change.
if a lane change isn't applied, and sufficient time has passed, issue an
acceleration like normal.
"""
acceleration = actions[::2]
direction = np.round(actions[1::2])
# re-arrange actions according to mapping in observation space
sorted_rl_ids = [veh_id for veh_id in self.sorted_ids if veh_id in self.rl_ids]
# sorted_rl_ids = self.rl_ids
# represents vehicles that are allowed to change lanes
non_lane_changing_veh = \
[self.timer <= self.lane_change_duration + self.vehicles.get_state(veh_id, 'last_lc')
for veh_id in sorted_rl_ids]
# vehicle that are not allowed to change have their directions set to 0
direction[non_lane_changing_veh] = np.array([0] * sum(non_lane_changing_veh))
self.apply_acceleration(sorted_rl_ids, acc=acceleration)
self.apply_lane_change(sorted_rl_ids, direction=direction)
class LaneChangeOnlyEnvironment(SimpleLaneChangingAccelerationEnvironment):
"""
Am extension of SimpleLaneChangingAccelerationEnvironment. Autonomous
vehicles in this environment can only make lane-changing decisions. Their
accelerations, on the other hand, are controlled by an human car-following
model specified under "rl_acc_controller" in the in additional_params
attribute of env_params.
"""
def __init__(self, env_params, sumo_params, scenario):
super().__init__(env_params, sumo_params, scenario)
# acceleration controller used for rl cars
self.rl_controller = dict()
for veh_id in self.rl_ids:
acc_params = env_params.get_additional_param("rl_acc_controller")
self.rl_controller[veh_id] = \
acc_params[0](veh_id=veh_id, **acc_params[1])
@property
def action_space(self):
"""
See parent class
Actions are: a continuous direction for each rl vehicle
"""
return Box(low=-1, high=1, shape=(self.vehicles.num_rl_vehicles,))
def apply_rl_actions(self, actions):
"""
see parent class
Actions are applied to rl vehicles as follows:
- accelerations are derived using the user-specified accel controller
- lane-change commands are collected from rllab
"""
direction = actions
# re-arrange actions according to mapping in observation space
sorted_rl_ids = \
[veh_id for veh_id in self.sorted_ids if veh_id in self.rl_ids]
# represents vehicles that are allowed to change lanes
non_lane_changing_veh = \
[self.timer <= self.lane_change_duration + self.vehicles[veh_id]['last_lc']
for veh_id in sorted_rl_ids]
# vehicle that are not allowed to change have their directions set to 0
direction[non_lane_changing_veh] = np.array([0] * sum(non_lane_changing_veh))
self.apply_lane_change(sorted_rl_ids, direction=direction)
# collect the accelerations for the rl vehicles as specified by the
# human controller
acceleration = []
for veh_id in sorted_rl_ids:
acceleration.append(self.rl_controller[veh_id].get_action(self))
self.apply_acceleration(sorted_rl_ids, acc=acceleration)
| [
"flow.core.rewards.desired_velocity",
"gym.spaces.box.Box",
"numpy.array",
"gym.spaces.tuple_space.Tuple",
"numpy.round"
] | [((1501, 1567), 'gym.spaces.box.Box', 'Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(self.vehicles.num_vehicles,)'}), '(low=-np.inf, high=np.inf, shape=(self.vehicles.num_vehicles,))\n', (1504, 1567), False, 'from gym.spaces.box import Box\n'), ((1583, 1660), 'gym.spaces.box.Box', 'Box', ([], {'low': '(0)', 'high': '(self.scenario.lanes - 1)', 'shape': '(self.vehicles.num_vehicles,)'}), '(low=0, high=self.scenario.lanes - 1, shape=(self.vehicles.num_vehicles,))\n', (1586, 1660), False, 'from gym.spaces.box import Box\n'), ((1682, 1744), 'gym.spaces.box.Box', 'Box', ([], {'low': '(0.0)', 'high': 'np.inf', 'shape': '(self.vehicles.num_vehicles,)'}), '(low=0.0, high=np.inf, shape=(self.vehicles.num_vehicles,))\n', (1685, 1744), False, 'from gym.spaces.box import Box\n'), ((1759, 1793), 'gym.spaces.tuple_space.Tuple', 'Tuple', (['(speed, absolute_pos, lane)'], {}), '((speed, absolute_pos, lane))\n', (1764, 1793), False, 'from gym.spaces.tuple_space import Tuple\n'), ((2274, 2325), 'flow.core.rewards.desired_velocity', 'rewards.desired_velocity', (['self'], {'fail': "kwargs['fail']"}), "(self, fail=kwargs['fail'])\n", (2298, 2325), False, 'from flow.core import rewards\n'), ((3538, 3561), 'numpy.round', 'np.round', (['actions[1::2]'], {}), '(actions[1::2])\n', (3546, 3561), True, 'import numpy as np\n'), ((5333, 5392), 'gym.spaces.box.Box', 'Box', ([], {'low': '(-1)', 'high': '(1)', 'shape': '(self.vehicles.num_rl_vehicles,)'}), '(low=-1, high=1, shape=(self.vehicles.num_rl_vehicles,))\n', (5336, 5392), False, 'from gym.spaces.box import Box\n'), ((1239, 1251), 'numpy.array', 'np.array', (['lb'], {}), '(lb)\n', (1247, 1251), True, 'import numpy as np\n'), ((1253, 1265), 'numpy.array', 'np.array', (['ub'], {}), '(ub)\n', (1261, 1265), True, 'import numpy as np\n')] |
from PIL import Image
import numpy as np
import flask
import io
import base64
from os import path
import cv2
from prediccion import prediccion
import numpy as np
import json
import pruebita_svm
# initialize our Flask application and the Keras model
app = flask.Flask(__name__)
model = None
categorias = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16"]
reconocimiento = prediccion()
escenas = ["desayuno paisa","desayuno paisa cafetero","desayuno rolo","desayuno americano","desayuno americano ligth"]
def readb64(base64_string):
sbuf = io.BytesIO()
sbuf.write(base64.b64decode(base64_string))
pimg = Image.open(sbuf)
return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
def elegirCategoria(categoria):
print(categoria)
if categoria == "0":
return "Huevos"
if categoria == "1":
return "Arepas"
if categoria == "2":
return "Mantequilla"
if categoria == "3":
return "Chocolate"
if categoria == "4":
return "Pan"
if categoria == "5":
return "Cereales"
if categoria == "6":
return "Cafe"
if categoria == "7":
return "Leche"
if categoria == "8":
return "Tocino"
if categoria == "9":
return "Changua"
if categoria == "10":
return "Tamal"
if categoria == "11":
return "Papas"
if categoria == "12":
return "Calentado"
if categoria == "13":
return "Yuca frita"
if categoria == "14":
return "<NAME>"
if categoria == "15":
return "Yogurth"
if categoria == "16":
return "Pollo"
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
return response
@app.route("/analisisEscena",methods=["GET", "POST"])
def analisis():
data = {"success": False}
if flask.request.method == "GET":
return "No implementado"
elif flask.request.method == "POST":
data = json.loads(flask.request.data)
data = data['materiales']
vector = []
for i in data:
vector.append(str(i['idCategoria']))
items = 6 - len(vector)
for i in range(0,items):
vector.append("-1")
resp = pruebita_svm.clf.predict([vector])
resp = escenas[resp[0]]
data = {
"idPrueba": 0,
"analisis_escena": resp,
"probabilidades": []
}
return flask.jsonify(data)
@app.route("/predecir", methods=["GET", "POST"])
def predict():
data = {"success": False}
if flask.request.method == "GET":
if flask.request.args.get("idPrueba"):
idImagen = flask.request.args.get("idPrueba")
image_path = "test/"+idImagen.split("_")[0]+"/"+idImagen+".jpg"
base_path = path.dirname(__file__)
file_path = base_path + "/" + image_path
print(file_path)
image = cv2.imread(file_path, 0)
indiceCategoria, predicciones = reconocimiento.predecir(image)
print(indiceCategoria)
print(predicciones)
predicciones = list(predicciones)
predicciones = list(map(lambda x: float(x), predicciones))
predicciones = list(map(lambda x: round(x,2), predicciones))
data = {
"idImagen": idImagen,
"prediccion": elegirCategoria(categorias[indiceCategoria]).lower(),
"probabilidades": predicciones
}
print(data)
elif flask.request.method == "POST":
if flask.request.form.get("imagen"):
image = flask.request.form["imagen"]
image = readb64(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
indiceCategoria, predicciones = reconocimiento.predecir(image)
print(indiceCategoria)
print(predicciones)
predicciones = list(predicciones)
predicciones = list(map(lambda x: float(x), predicciones))
predicciones = list(map(lambda x: round(x,2), predicciones))
data = {
"idImagen": 0,
"prediccion": elegirCategoria(categorias[indiceCategoria]).lower(),
"probabilidades": predicciones
}
# proccess(image, data)
#pass
return flask.jsonify(data)
# if this is the main thread of execution first load the model and
# then start the server
if __name__ == "__main__":
#load_model()
app.run(debug=False, threaded=False)
| [
"prediccion.prediccion",
"flask.request.args.get",
"PIL.Image.open",
"json.loads",
"flask.Flask",
"io.BytesIO",
"base64.b64decode",
"pruebita_svm.clf.predict",
"flask.request.form.get",
"numpy.array",
"os.path.dirname",
"cv2.cvtColor",
"cv2.imread",
"flask.jsonify"
] | [((258, 279), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (269, 279), False, 'import flask\n'), ((416, 428), 'prediccion.prediccion', 'prediccion', ([], {}), '()\n', (426, 428), False, 'from prediccion import prediccion\n'), ((589, 601), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (599, 601), False, 'import io\n'), ((661, 677), 'PIL.Image.open', 'Image.open', (['sbuf'], {}), '(sbuf)\n', (671, 677), False, 'from PIL import Image\n'), ((4573, 4592), 'flask.jsonify', 'flask.jsonify', (['data'], {}), '(data)\n', (4586, 4592), False, 'import flask\n'), ((617, 648), 'base64.b64decode', 'base64.b64decode', (['base64_string'], {}), '(base64_string)\n', (633, 648), False, 'import base64\n'), ((702, 716), 'numpy.array', 'np.array', (['pimg'], {}), '(pimg)\n', (710, 716), True, 'import numpy as np\n'), ((2843, 2877), 'flask.request.args.get', 'flask.request.args.get', (['"""idPrueba"""'], {}), "('idPrueba')\n", (2865, 2877), False, 'import flask\n'), ((2167, 2197), 'json.loads', 'json.loads', (['flask.request.data'], {}), '(flask.request.data)\n', (2177, 2197), False, 'import json\n'), ((2461, 2495), 'pruebita_svm.clf.predict', 'pruebita_svm.clf.predict', (['[vector]'], {}), '([vector])\n', (2485, 2495), False, 'import pruebita_svm\n'), ((2679, 2698), 'flask.jsonify', 'flask.jsonify', (['data'], {}), '(data)\n', (2692, 2698), False, 'import flask\n'), ((2902, 2936), 'flask.request.args.get', 'flask.request.args.get', (['"""idPrueba"""'], {}), "('idPrueba')\n", (2924, 2936), False, 'import flask\n'), ((3037, 3059), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (3049, 3059), False, 'from os import path\n'), ((3162, 3186), 'cv2.imread', 'cv2.imread', (['file_path', '(0)'], {}), '(file_path, 0)\n', (3172, 3186), False, 'import cv2\n'), ((3799, 3831), 'flask.request.form.get', 'flask.request.form.get', (['"""imagen"""'], {}), "('imagen')\n", (3821, 3831), False, 'import flask\n'), ((3937, 3976), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (3949, 3976), False, 'import cv2\n')] |
"""
Version 1.1.2
"""
from .Tokenizer import DDTokenizer
from sklearn import preprocessing
from .DDModelExceptions import *
from tensorflow.keras import backend
from .Models import Models
from .Parser import Parser
import tensorflow as tf
import pandas as pd
import numpy as np
import keras
import time
import os
import warnings
warnings.filterwarnings('ignore')
class DDModel(Models):
"""
A class responsible for creating, storing, and working with our deep docking models
"""
def __init__(self, mode, input_shape, hyperparameters, metrics=None, loss='binary_crossentropy', regression=False,
name="model"):
"""
Parameters
----------
mode : str
A string indicating which model to use
input_shape : tuple or list
The input shape for the model
hyperparameters : dict
A dictionary containing the hyperparameters for the DDModel's model
metrics : list
The metric(s) used by keras
loss : str
The loss function used by keras
regression : bool
Set to true if the model is performing regression
"""
if metrics is None:
self.metrics = ['accuracy']
else:
self.metrics = metrics
# Use regression or use binary classification
output_activation = 'linear' if regression else 'sigmoid'
# choose the loss function
self.loss_func = loss
if regression and loss == 'binary_crossentropy':
self.loss_func = 'mean_squared_error'
hyperparameters["loss_func"] = self.loss_func
if mode == "loaded_model":
super().__init__(hyperparameters={'bin_array': [],
'dropout_rate': 0,
'learning_rate': 0,
'num_units': 0,
'epsilon': 0},
output_activation=output_activation, name=name)
self.mode = ""
self.input_shape = ()
self.history = keras.callbacks.History()
self.time = {"training_time": -1, "prediction_time": -1}
else:
# Create a model
super().__init__(hyperparameters=hyperparameters,
output_activation=output_activation, name=name)
self.mode = mode
self.input_shape = input_shape
self.history = keras.callbacks.History()
self.time = {'training_time': -1, "prediction_time": -1}
self.model = self._create_model()
self._compile()
def fit(self, train_x, train_y, epochs, batch_size, shuffle, class_weight, verbose, validation_data, callbacks):
"""
Reshapes the input data and fits the model
Parameters
----------
train_x : ndarray
Training data
train_y : ndarray
Training labels
epochs : int
Number of epochs to train on
batch_size : int
The batch size
shuffle : bool
Whether to shuffle the data
class_weight : dict
The class weights
verbose : int
The verbose
validation_data : list
The validation data and labels
callbacks : list
Keras callbacks
"""
# First reshape the data to fit the chosen model
# Here we form the shape
shape_train_x = [train_x.shape[0]]
shape_valid_x = [validation_data[0].shape[0]]
for val in self.model.input_shape[1:]:
shape_train_x.append(val)
shape_valid_x.append(val)
# Here we reshape the data
# Format: shape = (size of data, input_shape[0], ..., input_shape[n]
train_x = np.reshape(train_x, shape_train_x)
validation_data_x = np.reshape(validation_data[0], shape_valid_x)
validation_data_y = validation_data[1]
validation_data = (validation_data_x, validation_data_y)
# Track the training time
training_time = time.time()
# If we are in regression mode, ignore the class weights
if self.output_activation == 'linear':
class_weight = None
# Train the model and store the history
self.history = self.model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, shuffle=shuffle,
class_weight=class_weight, verbose=verbose, validation_data=validation_data,
callbacks=callbacks)
# Store the training time
training_time = time.time() - training_time
self.time['training_time'] = training_time
print("Training time:", training_time)
def predict(self, x_test, verbose=0):
"""
Reshapes the input data and returns the models predictions
Parameters
----------
x_test : ndarray
The test data
verbose : int
The verbose of the model's prediction
Returns
-------
predictions : ndarray
The model's predictions
"""
# We must reshape the test data to fit our model
shape = [x_test.shape[0]]
for val in list(self.model.input_shape)[1:]:
shape.append(val)
x_test = np.reshape(x_test, newshape=shape)
# Predict and return the predictions
prediction_time = time.time() # Keep track of how long prediction took
predictions = self.model.predict(x_test, verbose=verbose) # Predict
prediction_time = time.time() - prediction_time # Update prediction time
self.time['prediction_time'] = prediction_time # Store the prediction time
return predictions
def save(self, path, json=False):
self._write_stats_to_file(path)
if json:
json_model = self.model.to_json()
with open(path + ".json", 'w') as json_file:
json_file.write(json_model)
else:
try:
self.model.save(path, save_format='h5')
except:
print("Could not save as h5 file. This is probably due to tensorflow version.")
print("If the model is saved a directory, it will cause issues.")
print("Trying to save again...")
self.model.save(path)
def load_stats(self, path):
"""
Load the stats from a .ddss file into the current DDModel
Parameters
----------
path : str
"""
info = Parser.parse_ddss(path)
for key in info.keys():
try:
self.__dict__[key] = info[key]
except KeyError:
print(key, 'is not an attribute of this class.')
self.input_shape = "Loaded Model -> Input shape will be inferred"
if self.time == {}:
self.time = {"training_time": 'Could Not Be Loaded', "prediction_time": 'Could Not Be Loaded'}
def _write_stats_to_file(self, path="", return_string=False):
info = "* {}'s Stats * \n".format(self.name)
info += "- Model mode: " + self.mode + " \n"
info += "\n"
# Write the timings
if isinstance(self.time['training_time'], str) == False and self.time['training_time'] > -1:
if isinstance(self.history, dict):
num_eps = self.history['total_epochs']
else:
num_eps = len(self.history.history['loss'])
info += "- Model Time: \n"
info += " - training_time: {train_time}".format(train_time=self.time['training_time']) + " \n"
info += " - time_per_epoch: {epoch_time}".format(epoch_time=(self.time['training_time'] / num_eps)) + " \n"
info += " - prediction_time: {pred_time}".format(pred_time=self.time['prediction_time']) + " \n"
else:
info += "- Model Time: \n"
info += " - Model has not been trained yet. \n"
info += "\n"
# Write the history
try:
info += "- History Stats: \n"
if isinstance(self.history, dict):
hist = self.history
else:
hist = self.history.history
# Get all the history values and keys stores
for key in hist:
try:
info += " - {key}: {val} \n".format(key=key, val=hist[key][-1])
except TypeError:
info += " - {key}: {val} \n".format(key=key, val=hist[key])
try:
try:
info += " - total_epochs: {epochs}".format(epochs=len(hist['loss']))
except TypeError:
pass
info += "\n"
except KeyError:
info += " - Model has not been trained yet. \n"
except AttributeError or KeyError:
# Get all the history values and keys stores
info += " - Model has not been trained yet. \n"
info += "\n"
# Write the hyperparameters
info += "- Hyperparameter Stats: \n"
for key in self.hyperparameters.keys():
if key != 'bin_array' or len(self.hyperparameters[key]) > 0:
info += " - {key}: {val} \n".format(key=key, val=self.hyperparameters[key])
info += "\n"
# Write stats about the model architecture
info += "- Model Architecture Stats: \n"
try:
trainable_count = int(
np.sum([backend.count_params(p) for p in set(self.model.trainable_weights)]))
non_trainable_count = int(
np.sum([backend.count_params(p) for p in set(self.model.non_trainable_weights)]))
info += ' - total_params: {:,} \n'.format(trainable_count + non_trainable_count)
info += ' - trainable_params: {:,} \n'.format(trainable_count)
info += ' - non_trainable_params: {:,} \n'.format(non_trainable_count)
info += "\n"
except TypeError or AttributeError:
info += ' - total_params: Cannot be determined \n'
info += ' - trainable_params: Cannot be determined \n'
info += ' - non_trainable_params: Cannot be determined \n'
info += "\n"
# Create a layer display
display_string = ""
for i, layer in enumerate(self.model.layers):
if i == 0:
display_string += "Input: \n"
display_string += " [ {name} ] \n".format(name=layer.name)
info += display_string
if not return_string:
with open(path + '.ddss', 'w') as stat_file:
stat_file.write(info)
else:
return info
def _create_model(self):
"""Creates and returns a model
Raises
------
IncorrectModelModeError
If a mode was passed that does not exists this error will be raised
"""
# Try creating the model and if failed raise exception
try:
model = getattr(self, self.mode, None)(self.input_shape)
except TypeError:
raise IncorrectModelModeError(self.mode, Models.get_available_modes())
return model
def _compile(self):
"""Compiles the DDModel object's model"""
if 'epsilon' not in self.hyperparameters.keys():
self.hyperparameters['epsilon'] = 1e-06
adam_opt = tf.keras.optimizers.Adam(learning_rate=self.hyperparameters['learning_rate'],
epsilon=self.hyperparameters['epsilon'])
self.model.compile(optimizer=adam_opt, loss=self.loss_func, metrics=self.metrics)
@staticmethod
def load(model, **kwargs):
pre_compiled = True
# Can be a path to a model or a model instance
if type(model) is str:
dd_model = DDModel(mode="loaded_model", input_shape=[], hyperparameters={})
# If we would like to load from a json, we can do that as well.
if '.json' in model:
dd_model.model = tf.keras.models.model_from_json(open(model).read(),
custom_objects=Models.get_custom_objects())
model = model.replace('.json', "")
pre_compiled = False
else:
dd_model.model = tf.keras.models.load_model(model, custom_objects=Models.get_custom_objects())
else:
dd_model = DDModel(mode="loaded_model", input_shape=[], hyperparameters={})
dd_model.model = model
if 'kt_hyperparameters' in kwargs.keys():
hyp = kwargs['kt_hyperparameters'].get_config()['values']
for key in hyp.keys():
try:
dd_model.__dict__['hyperparameters'][key] = hyp[key]
if key == 'kernel_reg':
dd_model.__dict__['hyperparameters'][key] = ['None', 'Lasso', 'l1', 'l2'][int(hyp[key])]
except KeyError:
print(key, 'is not an attribute of this class.')
else:
# Try to load a stats file
try:
dd_model.load_stats(model + ".ddss")
except TypeError or FileNotFoundError:
print("Could not find a stats file...")
if 'metrics' in kwargs.keys():
dd_model.metrics = kwargs['metrics']
else:
dd_model.metrics = ['accuracy']
if not pre_compiled:
dd_model._compile()
if 'name' in kwargs.keys():
dd_model.name = kwargs['name']
dd_model.mode = 'loaded_model'
return dd_model
@staticmethod
def process_smiles(smiles, vocab_size=100, fit_range=1000, normalize=True, use_padding=True, padding_size=None, one_hot=False):
# Create the tokenizer
tokenizer = DDTokenizer(vocab_size)
# Fit the tokenizer
tokenizer.fit(smiles[0:fit_range])
# Encode the smiles
encoded_smiles = tokenizer.encode(data=smiles, use_padding=use_padding,
padding_size=padding_size, normalize=normalize)
if one_hot:
encoded_smiles = DDModel.one_hot_encode(encoded_smiles, len(tokenizer.word_index))
return encoded_smiles
@staticmethod
def one_hot_encode(encoded_smiles, unique_category_count):
one_hot = keras.backend.one_hot(encoded_smiles, unique_category_count)
return one_hot
@staticmethod
def normalize(values: pd.Series):
assert type(values) is pd.Series, "Type Error -> Expected pandas.Series"
# Extract the indices and name
indices = values.index
name = values.index.name
# Normalizes values
normalized_values = preprocessing.minmax_scale(values, (0, 1))
# Create a pandas series to return
values = pd.Series(index=indices, data=normalized_values, name=name)
return values
def __repr__(self):
return self._write_stats_to_file(return_string=True)
| [
"pandas.Series",
"keras.backend.one_hot",
"numpy.reshape",
"keras.callbacks.History",
"tensorflow.keras.backend.count_params",
"tensorflow.keras.optimizers.Adam",
"sklearn.preprocessing.minmax_scale",
"time.time",
"warnings.filterwarnings"
] | [((330, 363), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (353, 363), False, 'import warnings\n'), ((3905, 3939), 'numpy.reshape', 'np.reshape', (['train_x', 'shape_train_x'], {}), '(train_x, shape_train_x)\n', (3915, 3939), True, 'import numpy as np\n'), ((3968, 4013), 'numpy.reshape', 'np.reshape', (['validation_data[0]', 'shape_valid_x'], {}), '(validation_data[0], shape_valid_x)\n', (3978, 4013), True, 'import numpy as np\n'), ((4185, 4196), 'time.time', 'time.time', ([], {}), '()\n', (4194, 4196), False, 'import time\n'), ((5450, 5484), 'numpy.reshape', 'np.reshape', (['x_test'], {'newshape': 'shape'}), '(x_test, newshape=shape)\n', (5460, 5484), True, 'import numpy as np\n'), ((5557, 5568), 'time.time', 'time.time', ([], {}), '()\n', (5566, 5568), False, 'import time\n'), ((11596, 11719), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': "self.hyperparameters['learning_rate']", 'epsilon': "self.hyperparameters['epsilon']"}), "(learning_rate=self.hyperparameters['learning_rate'\n ], epsilon=self.hyperparameters['epsilon'])\n", (11620, 11719), True, 'import tensorflow as tf\n'), ((14594, 14654), 'keras.backend.one_hot', 'keras.backend.one_hot', (['encoded_smiles', 'unique_category_count'], {}), '(encoded_smiles, unique_category_count)\n', (14615, 14654), False, 'import keras\n'), ((14976, 15018), 'sklearn.preprocessing.minmax_scale', 'preprocessing.minmax_scale', (['values', '(0, 1)'], {}), '(values, (0, 1))\n', (15002, 15018), False, 'from sklearn import preprocessing\n'), ((15080, 15139), 'pandas.Series', 'pd.Series', ([], {'index': 'indices', 'data': 'normalized_values', 'name': 'name'}), '(index=indices, data=normalized_values, name=name)\n', (15089, 15139), True, 'import pandas as pd\n'), ((2167, 2192), 'keras.callbacks.History', 'keras.callbacks.History', ([], {}), '()\n', (2190, 2192), False, 'import keras\n'), ((2546, 2571), 'keras.callbacks.History', 'keras.callbacks.History', ([], {}), '()\n', (2569, 2571), False, 'import keras\n'), ((4735, 4746), 'time.time', 'time.time', ([], {}), '()\n', (4744, 4746), False, 'import time\n'), ((5714, 5725), 'time.time', 'time.time', ([], {}), '()\n', (5723, 5725), False, 'import time\n'), ((9668, 9691), 'tensorflow.keras.backend.count_params', 'backend.count_params', (['p'], {}), '(p)\n', (9688, 9691), False, 'from tensorflow.keras import backend\n'), ((9801, 9824), 'tensorflow.keras.backend.count_params', 'backend.count_params', (['p'], {}), '(p)\n', (9821, 9824), False, 'from tensorflow.keras import backend\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.