code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Authored by <NAME> and <NAME>, 2020
"""
A collection of functions for generating specific envelope shapes.
"""
import numpy as np
from scipy.stats import norm
def sin2(nr_samples):
x = np.linspace(0.0, 1.0, nr_samples)
return np.sin(np.pi * x)**2
def sin_p(p, nr_samples):
x = np.linspace(0.0, 1.0, nr_samples)
return np.sin(np.pi * x)**p
def sinc(lim, nr_samples):
x = np.linspace(-lim, lim, nr_samples)
return np.sinc(x)
def triangle(nr_samples):
if nr_samples % 2 == 0:
t1 = np.linspace(0.0, 1.0, nr_samples // 2)
t2 = np.linspace(1.0, 0.0, nr_samples // 2)
return np.concatenate((t1, t2))
else:
t1 = np.linspace(0.0, 1.0, nr_samples // 2, endpoint=False)
t2 = np.flip(t1)
return np.concatenate((t1, [1], t2))
def cool(nr_samples):
x = np.linspace(0.0, 1.0, nr_samples)
s = np.sin(4 * np.pi * x)
t = triangle(nr_samples)
return t * s
def gaussian(nr_samples, trunc):
x = np.linspace(-trunc, trunc, nr_samples)
y = norm.pdf(x, 0, 1)
# Normalise
return y / y.max()
| [
"numpy.flip",
"scipy.stats.norm.pdf",
"numpy.sinc",
"numpy.sin",
"numpy.linspace",
"numpy.concatenate"
] | [((195, 228), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'nr_samples'], {}), '(0.0, 1.0, nr_samples)\n', (206, 228), True, 'import numpy as np\n'), ((297, 330), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'nr_samples'], {}), '(0.0, 1.0, nr_samples)\n', (308, 330), True, 'import numpy as np\n'), ((400, 434), 'numpy.linspace', 'np.linspace', (['(-lim)', 'lim', 'nr_samples'], {}), '(-lim, lim, nr_samples)\n', (411, 434), True, 'import numpy as np\n'), ((446, 456), 'numpy.sinc', 'np.sinc', (['x'], {}), '(x)\n', (453, 456), True, 'import numpy as np\n'), ((837, 870), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'nr_samples'], {}), '(0.0, 1.0, nr_samples)\n', (848, 870), True, 'import numpy as np\n'), ((879, 900), 'numpy.sin', 'np.sin', (['(4 * np.pi * x)'], {}), '(4 * np.pi * x)\n', (885, 900), True, 'import numpy as np\n'), ((990, 1028), 'numpy.linspace', 'np.linspace', (['(-trunc)', 'trunc', 'nr_samples'], {}), '(-trunc, trunc, nr_samples)\n', (1001, 1028), True, 'import numpy as np\n'), ((1037, 1054), 'scipy.stats.norm.pdf', 'norm.pdf', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (1045, 1054), False, 'from scipy.stats import norm\n'), ((240, 257), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (246, 257), True, 'import numpy as np\n'), ((342, 359), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (348, 359), True, 'import numpy as np\n'), ((526, 564), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(nr_samples // 2)'], {}), '(0.0, 1.0, nr_samples // 2)\n', (537, 564), True, 'import numpy as np\n'), ((578, 616), 'numpy.linspace', 'np.linspace', (['(1.0)', '(0.0)', '(nr_samples // 2)'], {}), '(1.0, 0.0, nr_samples // 2)\n', (589, 616), True, 'import numpy as np\n'), ((632, 656), 'numpy.concatenate', 'np.concatenate', (['(t1, t2)'], {}), '((t1, t2))\n', (646, 656), True, 'import numpy as np\n'), ((680, 734), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(nr_samples // 2)'], {'endpoint': '(False)'}), '(0.0, 1.0, nr_samples // 2, endpoint=False)\n', (691, 734), True, 'import numpy as np\n'), ((748, 759), 'numpy.flip', 'np.flip', (['t1'], {}), '(t1)\n', (755, 759), True, 'import numpy as np\n'), ((775, 804), 'numpy.concatenate', 'np.concatenate', (['(t1, [1], t2)'], {}), '((t1, [1], t2))\n', (789, 804), True, 'import numpy as np\n')] |
import os
import argparse
import glob
import numpy as np
import scipy.interpolate
import scipy.io.wavfile
import python_speech_features
import utils
import timit
# This is based on Table I and Section II of
# <NAME> and <NAME>: Speaker-Independent Phone Recognition Using Hidden Markov Models.
# IEEE Transactions on Acoustics, Speech, and Signal Processing. 1989.
FOLDS = {
'ae': 'ae', 'ah': 'ah', 'ax': 'ah', 'ax-h': 'ah', 'ao': 'ao',
'aa': 'ao', 'aw': 'aw', 'ay': 'ay', 'b': 'b', 'ch': 'ch',
'd': 'd', 'dh': 'dh', 'dx': 'dx', 'eh': 'eh', 'el': 'el',
'l': 'el', 'en': 'en', 'n': 'en', 'nx': 'en', 'er': 'er',
'axr': 'er', 'ey': 'ey', 'f': 'f', 'g': 'g', 'h#': 'h#',
'pcl': 'h#', 'tcl': 'h#', 'kcl': 'h#', 'bcl': 'h#', 'dcl': 'h#',
'gcl': 'h#', 'epi': 'h#', 'pau': 'h#', 'hh': 'hh', 'hv': 'hh',
'ih': 'ih', 'ix': 'ih', 'iy': 'iy', 'jh': 'jh', 'k': 'k',
'm': 'm', 'em': 'm', 'ng': 'ng', 'eng': 'ng', 'ow': 'ow',
'oy': 'oy', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's',
'sh': 'sh', 'zh': 'sh', 't': 't', 'th': 'th', 'uh': 'uh',
'uw': 'uw', 'ux': 'uw', 'v': 'v', 'w': 'w', 'y': 'y',
'z': 'z'
}
TOKEN_VOCAB = sorted(list(set(FOLDS.values())))
assert len(TOKEN_VOCAB) == 40
DEFAULT_DATA_DIR = timit.DEFAULT_DATA_DIR
def _audio_and_labels(prefix):
""" Load and align a TIMIT wav file with its folded phonemes.
Returns:
A tuple, `(audio, labels)`. A 1-D float array and a 1-D int array, both
with the same shape.
"""
rate, audio = scipy.io.wavfile.read(prefix + '.wav')
if rate != timit.SAMPLE_RATE:
raise RuntimeError('Encountered an unexpected sampling rate of %d in %s' % (rate, prefix))
audio = np.asarray(audio, dtype=np.float)
phoneme_data = np.loadtxt(prefix + '.phn', dtype=np.object, comments=None, delimiter=' ',
converters={0: int, 1: int, 2: lambda x: str(x, encoding='ascii')})
n = np.arange(audio.size)
labels = -1 * np.ones([audio.size], dtype=np.int8)
for start, end, phoneme in phoneme_data:
phoneme = FOLDS[phoneme]
labels[(n >= start) & (n < end)], = utils.tokens_to_ids([phoneme], TOKEN_VOCAB)
audio_start = np.min(phoneme_data[:, 0])
audio_end = np.max(phoneme_data[:, 1])
audio = audio[audio_start:audio_end]
labels = labels[audio_start:audio_end]
if any(labels == -1):
raise RuntimeError('Encountered incomplete labeling in %s' % prefix)
return audio, labels
def _mfcc_and_labels(audio, labels):
""" Convert to MFCC features and corresponding (interpolated) labels.
Returns:
A tuple, `(mfcc_features, mfcc_labels)`. A 1-D float array and a 1-D int
array, both with the same shape.
"""
mfcc_sample_rate = 100.0
winfunc = lambda x: np.hamming(x)
mfcc_features = python_speech_features.mfcc(audio, samplerate=timit.SAMPLE_RATE, winlen=0.025,
winstep=1.0/mfcc_sample_rate, lowfreq=85.0,
highfreq=timit.SAMPLE_RATE/2, winfunc=winfunc)
t_audio = np.linspace(0.0, audio.shape[0] * 1.0 / timit.SAMPLE_RATE, audio.size, endpoint=False)
t_mfcc = np.linspace(0.0, mfcc_features.shape[0] * 1.0 / mfcc_sample_rate, mfcc_features.shape[0], endpoint=False)
interp_func = scipy.interpolate.interp1d(t_audio, labels, kind='nearest')
mfcc_labels = interp_func(t_mfcc)
return mfcc_features, mfcc_labels
def load(data_dir=DEFAULT_DATA_DIR, mfcc=True):
""" Load all standardized TIMIT data with folded phoneme labels.
Args:
data_dir: A string. The data directory.
mfcc: A boolean. If True, return MFCC sequences and their corresponding
label sequences. Otherwise, return raw audio sequences in their
associated label sequences.
Returns:
A tuple with 6 elements: train inputs, train labels, val inputs,
val labels, test inputs, test labels. Each entry is a list of sequences.
All input sequences are 2-D float arrays with shape
`[length, values_per_step]` and all label sequences are 1-D int8 arrays
with shape `[length]`.
"""
types = ['mfcc', 'mfcc_labels'] if mfcc else ['audio', 'labels']
ret = []
for name in ['train', 'val', 'test']:
for type in types:
path = os.path.join(data_dir, name + '_' + type + '.npy')
if not os.path.exists(path):
raise ValueError('Data not found in %s. Run timit.py and timitphonemerec.py.' % data_dir)
data = np.load(path)
if type == 'audio':
data = [seq[:, np.newaxis] for seq in data]
ret.append(data)
return tuple(ret)
def load_split(data_dir=DEFAULT_DATA_DIR, val=True, mfcc=True, normalize=True):
""" Load a standardized-TIMIT train, test split.
Args:
data_dir: A string. The data directory.
val: A boolean. If True, return the validation set as the test set.
mfcc: A boolean. If True, return MFCC sequences and their corresponding
label Otherwise, return raw audio sequences in their associated
label sequences.
normalize: A boolean. If True, normalize each sequence individually by
centering / scaling.
Returns:
A tuple, `(train_inputs, train_labels, test_inputs, test_labels)`. Each is
a list of sequences. All inputs are 2-D float arrays with shape
`[length, values_per_step]` and all labels are 1-D int8 arrays with shape
`[length]`.
"""
sequence_lists = load(data_dir=data_dir, mfcc=mfcc)
train_inputs, train_labels, val_inputs, val_labels, test_inputs, test_labels = sequence_lists
if val:
test_inputs = val_inputs
test_labels = val_labels
if normalize:
train_inputs = [seq - np.mean(seq, axis=0, keepdims=True) for seq in train_inputs]
train_inputs = [seq / np.std(seq, axis=0, keepdims=True) for seq in train_inputs]
test_inputs = [seq - np.mean(seq, axis=0, keepdims=True) for seq in test_inputs]
test_inputs = [seq / np.std(seq, axis=0, keepdims=True) for seq in test_inputs]
return train_inputs, train_labels, test_inputs, test_labels
def main():
""" Further process and simplify standardized TIMIT. """
description = main.__doc__
formatter_class = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(description=description, formatter_class=formatter_class)
parser.add_argument('--data_dir', type=str, default=DEFAULT_DATA_DIR,
help='''The standardized-TIMIT data directory.''')
args = parser.parse_args()
if not os.path.exists(args.data_dir):
raise ValueError('%s does not exist. Did you run timit.py?' % args.data_dir)
for name in ['train', 'val', 'test']:
print('Processing and saving the %s set..' % name)
pattern = os.path.join(args.data_dir, name, '*', '*.wav')
prefixes = [path[:-4] for path in sorted(glob.glob(pattern))]
audio_label_pairs = [_audio_and_labels(prefix) for prefix in prefixes]
mfcc_label_pairs = [_mfcc_and_labels(*pair) for pair in audio_label_pairs]
audio_seqs, label_seqs = zip(*audio_label_pairs)
np.save(os.path.join(args.data_dir, name + '_audio.npy'), audio_seqs)
np.save(os.path.join(args.data_dir, name + '_labels.npy'), label_seqs)
mfcc_seqs, mfcc_label_seqs = zip(*mfcc_label_pairs)
np.save(os.path.join(args.data_dir, name + '_mfcc.npy'), mfcc_seqs)
np.save(os.path.join(args.data_dir, name + '_mfcc_labels.npy'), mfcc_label_seqs)
if __name__ == '__main__':
main()
| [
"numpy.load",
"argparse.ArgumentParser",
"numpy.hamming",
"numpy.std",
"numpy.asarray",
"os.path.exists",
"numpy.ones",
"numpy.max",
"numpy.min",
"numpy.arange",
"numpy.mean",
"numpy.linspace",
"glob.glob",
"python_speech_features.mfcc",
"os.path.join",
"utils.tokens_to_ids"
] | [((1930, 1963), 'numpy.asarray', 'np.asarray', (['audio'], {'dtype': 'np.float'}), '(audio, dtype=np.float)\n', (1940, 1963), True, 'import numpy as np\n'), ((2159, 2180), 'numpy.arange', 'np.arange', (['audio.size'], {}), '(audio.size)\n', (2168, 2180), True, 'import numpy as np\n'), ((2406, 2432), 'numpy.min', 'np.min', (['phoneme_data[:, 0]'], {}), '(phoneme_data[:, 0])\n', (2412, 2432), True, 'import numpy as np\n'), ((2447, 2473), 'numpy.max', 'np.max', (['phoneme_data[:, 1]'], {}), '(phoneme_data[:, 1])\n', (2453, 2473), True, 'import numpy as np\n'), ((3001, 3184), 'python_speech_features.mfcc', 'python_speech_features.mfcc', (['audio'], {'samplerate': 'timit.SAMPLE_RATE', 'winlen': '(0.025)', 'winstep': '(1.0 / mfcc_sample_rate)', 'lowfreq': '(85.0)', 'highfreq': '(timit.SAMPLE_RATE / 2)', 'winfunc': 'winfunc'}), '(audio, samplerate=timit.SAMPLE_RATE, winlen=\n 0.025, winstep=1.0 / mfcc_sample_rate, lowfreq=85.0, highfreq=timit.\n SAMPLE_RATE / 2, winfunc=winfunc)\n', (3028, 3184), False, 'import python_speech_features\n'), ((3275, 3365), 'numpy.linspace', 'np.linspace', (['(0.0)', '(audio.shape[0] * 1.0 / timit.SAMPLE_RATE)', 'audio.size'], {'endpoint': '(False)'}), '(0.0, audio.shape[0] * 1.0 / timit.SAMPLE_RATE, audio.size,\n endpoint=False)\n', (3286, 3365), True, 'import numpy as np\n'), ((3373, 3482), 'numpy.linspace', 'np.linspace', (['(0.0)', '(mfcc_features.shape[0] * 1.0 / mfcc_sample_rate)', 'mfcc_features.shape[0]'], {'endpoint': '(False)'}), '(0.0, mfcc_features.shape[0] * 1.0 / mfcc_sample_rate,\n mfcc_features.shape[0], endpoint=False)\n', (3384, 3482), True, 'import numpy as np\n'), ((6385, 6471), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description', 'formatter_class': 'formatter_class'}), '(description=description, formatter_class=\n formatter_class)\n', (6408, 6471), False, 'import argparse\n'), ((2197, 2233), 'numpy.ones', 'np.ones', (['[audio.size]'], {'dtype': 'np.int8'}), '([audio.size], dtype=np.int8)\n', (2204, 2233), True, 'import numpy as np\n'), ((2346, 2389), 'utils.tokens_to_ids', 'utils.tokens_to_ids', (['[phoneme]', 'TOKEN_VOCAB'], {}), '([phoneme], TOKEN_VOCAB)\n', (2365, 2389), False, 'import utils\n'), ((2969, 2982), 'numpy.hamming', 'np.hamming', (['x'], {}), '(x)\n', (2979, 2982), True, 'import numpy as np\n'), ((6651, 6680), 'os.path.exists', 'os.path.exists', (['args.data_dir'], {}), '(args.data_dir)\n', (6665, 6680), False, 'import os\n'), ((6875, 6922), 'os.path.join', 'os.path.join', (['args.data_dir', 'name', '"""*"""', '"""*.wav"""'], {}), "(args.data_dir, name, '*', '*.wav')\n", (6887, 6922), False, 'import os\n'), ((4454, 4504), 'os.path.join', 'os.path.join', (['data_dir', "(name + '_' + type + '.npy')"], {}), "(data_dir, name + '_' + type + '.npy')\n", (4466, 4504), False, 'import os\n'), ((4651, 4664), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (4658, 4664), True, 'import numpy as np\n'), ((7209, 7257), 'os.path.join', 'os.path.join', (['args.data_dir', "(name + '_audio.npy')"], {}), "(args.data_dir, name + '_audio.npy')\n", (7221, 7257), False, 'import os\n'), ((7283, 7332), 'os.path.join', 'os.path.join', (['args.data_dir', "(name + '_labels.npy')"], {}), "(args.data_dir, name + '_labels.npy')\n", (7295, 7332), False, 'import os\n'), ((7415, 7462), 'os.path.join', 'os.path.join', (['args.data_dir', "(name + '_mfcc.npy')"], {}), "(args.data_dir, name + '_mfcc.npy')\n", (7427, 7462), False, 'import os\n'), ((7487, 7541), 'os.path.join', 'os.path.join', (['args.data_dir', "(name + '_mfcc_labels.npy')"], {}), "(args.data_dir, name + '_mfcc_labels.npy')\n", (7499, 7541), False, 'import os\n'), ((4518, 4538), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4532, 4538), False, 'import os\n'), ((5834, 5869), 'numpy.mean', 'np.mean', (['seq'], {'axis': '(0)', 'keepdims': '(True)'}), '(seq, axis=0, keepdims=True)\n', (5841, 5869), True, 'import numpy as np\n'), ((5921, 5955), 'numpy.std', 'np.std', (['seq'], {'axis': '(0)', 'keepdims': '(True)'}), '(seq, axis=0, keepdims=True)\n', (5927, 5955), True, 'import numpy as np\n'), ((6006, 6041), 'numpy.mean', 'np.mean', (['seq'], {'axis': '(0)', 'keepdims': '(True)'}), '(seq, axis=0, keepdims=True)\n', (6013, 6041), True, 'import numpy as np\n'), ((6091, 6125), 'numpy.std', 'np.std', (['seq'], {'axis': '(0)', 'keepdims': '(True)'}), '(seq, axis=0, keepdims=True)\n', (6097, 6125), True, 'import numpy as np\n'), ((6968, 6986), 'glob.glob', 'glob.glob', (['pattern'], {}), '(pattern)\n', (6977, 6986), False, 'import glob\n')] |
import glob
import os
import numpy as np
import random
def make_dummy_annotations_file(imgdir, filename):
with open(filename, 'w') as f:
f.write('{}, {}, {}, {}\n'.format('Name', 'Row', 'Column', 'Label'))
for image in glob.glob(os.path.join(imgdir, '*.jpg')):
for i in range(10):
if random.random() < .5:
f.write('{}, {}, {}, {}\n'.format(os.path.basename(image), np.random.randint(10, 500), np.random.randint(10, 500), 'sdf'))
else:
f.write('{}, {}, {}, {}\n'.format(os.path.basename(image), np.random.randint(10, 500), np.random.randint(10, 500), 'sdfddd'))
| [
"random.random",
"numpy.random.randint",
"os.path.join",
"os.path.basename"
] | [((234, 263), 'os.path.join', 'os.path.join', (['imgdir', '"""*.jpg"""'], {}), "(imgdir, '*.jpg')\n", (246, 263), False, 'import os\n'), ((296, 311), 'random.random', 'random.random', ([], {}), '()\n', (309, 311), False, 'import random\n'), ((357, 380), 'os.path.basename', 'os.path.basename', (['image'], {}), '(image)\n', (373, 380), False, 'import os\n'), ((382, 408), 'numpy.random.randint', 'np.random.randint', (['(10)', '(500)'], {}), '(10, 500)\n', (399, 408), True, 'import numpy as np\n'), ((410, 436), 'numpy.random.randint', 'np.random.randint', (['(10)', '(500)'], {}), '(10, 500)\n', (427, 436), True, 'import numpy as np\n'), ((495, 518), 'os.path.basename', 'os.path.basename', (['image'], {}), '(image)\n', (511, 518), False, 'import os\n'), ((520, 546), 'numpy.random.randint', 'np.random.randint', (['(10)', '(500)'], {}), '(10, 500)\n', (537, 546), True, 'import numpy as np\n'), ((548, 574), 'numpy.random.randint', 'np.random.randint', (['(10)', '(500)'], {}), '(10, 500)\n', (565, 574), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
from keras.models import Sequential
from keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPooling2D
from keras.activations import relu,softmax
import keras
import os
mnist=np.load("mnist.npz")
x_train,y_train=mnist["x_train"],mnist["y_train"]
x_test,y_test=mnist["x_test"],mnist["y_test"]
n_train=x_train.shape[0]
n_test=x_test.shape[0]
assert(keras.backend.image_data_format()!="channels_first")
x_train=x_train.reshape(n_train,28,28,1).astype(np.float64)
x_test=x_test.reshape(n_test,28,28,1).astype(np.float64)
# Normalization
x_train/=0xFF
x_test/=0xFF
print("Found %d samples in training set and %d in testing set" \
% (n_train,n_test))
# to_categorical helps convert type 3 to [0 0 0 1 ... 0]
y_train=keras.utils.to_categorical(y_train,10)
y_test=keras.utils.to_categorical(y_test,10)
fnn=None
if os.path.exists("mnist_fnn.h5"):
fnn=keras.models.load_model("mnist_fnn.h5")
else: # If no config exists, then train a new network
fnn=Sequential()
fnn.add(Conv2D(32,kernel_size=(3,3),activation="relu",
input_shape=(28,28,1)))
fnn.add(Conv2D(64,(3,3),activation="relu"))
fnn.add(MaxPooling2D(pool_size=(2,2)))
fnn.add(Dropout(0.25))
fnn.add(Flatten())
fnn.add(Dense(128,activation="relu"))
fnn.add(Dropout(0.25))
fnn.add(Dense(10,activation="softmax"))
fnn.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=["accuracy"])
fnn.fit(x_train,y_train,batch_size=128,epochs=12,verbose=1,
validation_data=(x_test,y_test))
fnn.save("mnist_fnn.h5")
score=fnn.evaluate(x_test,y_test,verbose=1)
print("Test result: loss=%f, accuracy=%.f" % (score[0],score[1]))
| [
"keras.models.load_model",
"keras.optimizers.Adadelta",
"numpy.load",
"keras.backend.image_data_format",
"keras.layers.Dropout",
"os.path.exists",
"keras.layers.Flatten",
"keras.layers.Dense",
"keras.layers.Conv2D",
"keras.models.Sequential",
"keras.layers.MaxPooling2D",
"keras.utils.to_catego... | [((218, 238), 'numpy.load', 'np.load', (['"""mnist.npz"""'], {}), "('mnist.npz')\n", (225, 238), True, 'import numpy as np\n'), ((763, 802), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', '(10)'], {}), '(y_train, 10)\n', (789, 802), False, 'import keras\n'), ((809, 847), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', '(10)'], {}), '(y_test, 10)\n', (835, 847), False, 'import keras\n'), ((861, 891), 'os.path.exists', 'os.path.exists', (['"""mnist_fnn.h5"""'], {}), "('mnist_fnn.h5')\n", (875, 891), False, 'import os\n'), ((392, 425), 'keras.backend.image_data_format', 'keras.backend.image_data_format', ([], {}), '()\n', (423, 425), False, 'import keras\n'), ((901, 940), 'keras.models.load_model', 'keras.models.load_model', (['"""mnist_fnn.h5"""'], {}), "('mnist_fnn.h5')\n", (924, 940), False, 'import keras\n'), ((1005, 1017), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1015, 1017), False, 'from keras.models import Sequential\n'), ((1030, 1104), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': '(28, 28, 1)'}), "(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))\n", (1036, 1104), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1121, 1158), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (1127, 1158), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1169, 1199), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1181, 1199), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1212, 1225), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1219, 1225), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1239, 1248), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1246, 1248), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1262, 1291), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1267, 1291), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1304, 1317), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1311, 1317), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1331, 1362), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (1336, 1362), False, 'from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1441, 1468), 'keras.optimizers.Adadelta', 'keras.optimizers.Adadelta', ([], {}), '()\n', (1466, 1468), False, 'import keras\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 24 08:50:57 2019
@author: Gary
This code is used to create new versions of the xlate files that include
any NEW codes that were not in the previous data set. These files
can then be hand curated to fully process the new dataset.
"""
import pandas as pd
import numpy as np
import csv
import core.Categorize_records as cat_rec
import core.CAS_tools as ct
tmpdir = './tmp/'
indir = './sources/'
valid_carrier_fn = indir+'valid_carrier.csv'
out_carrier_fn = tmpdir+'valid_carrierWORK.csv'
single_xlate_set = [] # currently NO one-to-one xlate files in use
# in this shared_xlate, the key is the name of the xlate table and the
# items in the list are the sources for this hybrid xlate list.
shared_xlate = {'company':[('supplier','Supplier'), # (tableName,fieldName)
('operator','OperatorName')]}
def check_for_new_cas(tab_manager=None):
# Make a list of CAS numbers to run through SciFinder
n_p = pd.DataFrame({'not_perf':cat_rec.Categorize_CAS(tab_manager=tab_manager).get_corrected_not_perf()})
n_p['keep'] = n_p.not_perf.map(lambda x : ct.is_valid_CAS_code(x))
n_p[n_p.keep].to_csv(tmpdir+'cas_to_check.csv',index=False)
def gen_new_files(tab_manager=None):
"""NOTE: THIS MAY CLOBBER ANY FILES NAMED ABOVE AND REPLACE THEM
WITH CLEAN COPIES. PAY ATTENTION!"""
# =============================================================================
# # first, the fields that have a unique translation table
# for field in single_xlate_set:
# xlate = pd.read_csv(indir+field+'_xlate.csv',
# quotechar='$')
# original = list(xlate.original)
# lst = list(df[field].str.lower().str.strip().unique())
#
# print(f'{field}: Len original: {len(original)}, new {len(lst)} ')
# for new in lst:
# if new not in original:
# print(f'adding <{new}> to {field}')
# df2 = pd.DataFrame({'primary':['?'],
# 'original':[new],
# 'status':['new']})
# xlate = xlate.append(pd.DataFrame(df2),sort=True)
#
# xlate.to_csv(outdir+field+'_xlateNEW.csv',
# quotechar='$',quoting=csv.QUOTE_ALL,index=False)
#
# =============================================================================
# next the fields that share a translation table
for shared in shared_xlate.keys():
xlate = pd.read_csv(indir+shared+'_xlate.csv',
quotechar='$')
original = list(xlate.original)
masterset = set()
for tup in shared_xlate[shared]:
tableName = tup[0]
fieldName = tup[1]
df = tab_manager.tables[tableName].get_df()
lst = list(df[fieldName].str.lower().str.strip().unique())
for x in lst:
masterset.add(x)
for new in masterset:
if new not in original:
print(f'adding <{new}> to {shared} from {fieldName}')
df2 = pd.DataFrame({'primary':['?'],
'original':[new],
'status':['new']})
xlate = xlate.append(pd.DataFrame(df2),sort=True)
xlate.to_csv(tmpdir+shared+'_xlateNEW.csv',
quotechar='$',quoting=csv.QUOTE_ALL,index=False)
# Now update the CASNumber file cas_labels
cas_old = pd.read_csv(indir+'cas_labels.csv',quotechar='"')
cas_old.drop_duplicates(subset='clean',keep='first',inplace=True)
#print(cas_old.clean.unique())
df = tab_manager.tables['cas'].get_df(['CASNumber','iCASNumber'])
df['cas_strip'] = df.CASNumber.str.lower().str.strip()
df = pd.merge(tab_manager.tables['allrec'].get_df(fields=['iCASNumber','iUploadKey']),
df, on='iCASNumber',how='left')
gb = df.groupby('cas_strip',as_index=False)['iUploadKey'].count()
gb.columns = ['clean','new_cnt']
#print(gb.clean.unique())
new_df = pd.merge(cas_old,gb,
on=['clean'],how='outer',validate='m:1')
new_df.to_csv(tmpdir+'cas_labelsNEW.csv',index=False)
def truncStr(s):
if len(s)>20:
s = s[:20]+'...'
return s
def process_carrier_fields(tab_manager=None):
"""check for new combinations of Purpose,CASNumber,IngredientName
for records that are candidates as carriers. Add those combinations
to the xlate_carrier.csv file to be curated."""
val_car = pd.read_csv(valid_carrier_fn) #,quotechar='$')
existing = []
for i,row in val_car.iterrows():
existing.append((row.Purpose,row.CASNumber,row.IngredientName))
df_cas = tab_manager.get_df_cas(keepcodes='',removecodes='',event_fields=[])
df_cas = df_cas[['Purpose','iPurpose','bgCAS','CASNumber','PercentHFJob',
'TotalBaseWaterVolume','record_flags','IngredientName',
'is_carrier','UploadKey','date','APINumber']]
print('Starting carrier analysis')
has_TBWV = df_cas.TotalBaseWaterVolume>0
wi_perc = df_cas.record_flags.str.contains('%')
cond1 = df_cas.PercentHFJob>= 50
cond2 = df_cas.bgCAS=='7732-18-5'
df_cas['water50'] = np.where(cond1&cond2&has_TBWV&wi_perc,True,False)
df_cas['notwater50'] = np.where(cond1&~(cond2)&has_TBWV&wi_perc,True,False)
t = df_cas[df_cas.notwater50].copy()
t = pd.merge(t,val_car,on=['Purpose','CASNumber','IngredientName'],
how='outer',indicator=True)
t.to_csv('./tmp/temp.csv')
allcomb = set()
for i,row in df_cas[df_cas.notwater50].iterrows():
tup = (row.Purpose,
row.CASNumber,
row.IngredientName)
allcomb.add(tup)
print(len(allcomb))
alldic = {}
for i,item in enumerate(allcomb):
alldic[i] = {'Purpose':item[0],
'CASNumber':item[1],
'IngredientName':item[2],
'truncPurpose':truncStr(item[0])}
out=pd.DataFrame.from_dict(alldic, orient='index')
out = pd.merge(val_car,out,on=['Purpose','CASNumber','IngredientName','truncPurpose'],
how='right')
out.to_csv(out_carrier_fn)
| [
"pandas.DataFrame",
"pandas.DataFrame.from_dict",
"pandas.read_csv",
"core.Categorize_records.Categorize_CAS",
"pandas.merge",
"core.CAS_tools.is_valid_CAS_code",
"numpy.where"
] | [((3591, 3643), 'pandas.read_csv', 'pd.read_csv', (["(indir + 'cas_labels.csv')"], {'quotechar': '"""\\""""'}), '(indir + \'cas_labels.csv\', quotechar=\'"\')\n', (3602, 3643), True, 'import pandas as pd\n'), ((4171, 4235), 'pandas.merge', 'pd.merge', (['cas_old', 'gb'], {'on': "['clean']", 'how': '"""outer"""', 'validate': '"""m:1"""'}), "(cas_old, gb, on=['clean'], how='outer', validate='m:1')\n", (4179, 4235), True, 'import pandas as pd\n'), ((4651, 4680), 'pandas.read_csv', 'pd.read_csv', (['valid_carrier_fn'], {}), '(valid_carrier_fn)\n', (4662, 4680), True, 'import pandas as pd\n'), ((5371, 5428), 'numpy.where', 'np.where', (['(cond1 & cond2 & has_TBWV & wi_perc)', '(True)', '(False)'], {}), '(cond1 & cond2 & has_TBWV & wi_perc, True, False)\n', (5379, 5428), True, 'import numpy as np\n'), ((5448, 5506), 'numpy.where', 'np.where', (['(cond1 & ~cond2 & has_TBWV & wi_perc)', '(True)', '(False)'], {}), '(cond1 & ~cond2 & has_TBWV & wi_perc, True, False)\n', (5456, 5506), True, 'import numpy as np\n'), ((5551, 5652), 'pandas.merge', 'pd.merge', (['t', 'val_car'], {'on': "['Purpose', 'CASNumber', 'IngredientName']", 'how': '"""outer"""', 'indicator': '(True)'}), "(t, val_car, on=['Purpose', 'CASNumber', 'IngredientName'], how=\n 'outer', indicator=True)\n", (5559, 5652), True, 'import pandas as pd\n'), ((6169, 6215), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['alldic'], {'orient': '"""index"""'}), "(alldic, orient='index')\n", (6191, 6215), True, 'import pandas as pd\n'), ((6229, 6331), 'pandas.merge', 'pd.merge', (['val_car', 'out'], {'on': "['Purpose', 'CASNumber', 'IngredientName', 'truncPurpose']", 'how': '"""right"""'}), "(val_car, out, on=['Purpose', 'CASNumber', 'IngredientName',\n 'truncPurpose'], how='right')\n", (6237, 6331), True, 'import pandas as pd\n'), ((2556, 2613), 'pandas.read_csv', 'pd.read_csv', (["(indir + shared + '_xlate.csv')"], {'quotechar': '"""$"""'}), "(indir + shared + '_xlate.csv', quotechar='$')\n", (2567, 2613), True, 'import pandas as pd\n'), ((1133, 1156), 'core.CAS_tools.is_valid_CAS_code', 'ct.is_valid_CAS_code', (['x'], {}), '(x)\n', (1153, 1156), True, 'import core.CAS_tools as ct\n'), ((3151, 3221), 'pandas.DataFrame', 'pd.DataFrame', (["{'primary': ['?'], 'original': [new], 'status': ['new']}"], {}), "({'primary': ['?'], 'original': [new], 'status': ['new']})\n", (3163, 3221), True, 'import pandas as pd\n'), ((1012, 1059), 'core.Categorize_records.Categorize_CAS', 'cat_rec.Categorize_CAS', ([], {'tab_manager': 'tab_manager'}), '(tab_manager=tab_manager)\n', (1034, 1059), True, 'import core.Categorize_records as cat_rec\n'), ((3328, 3345), 'pandas.DataFrame', 'pd.DataFrame', (['df2'], {}), '(df2)\n', (3340, 3345), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
@file
@brief Helpers to process data from logs.
"""
import re
from datetime import datetime
import hashlib
import numpy
import pandas
import ujson
def _duration(seq):
dt = None
t1 = None
for t, e in seq:
if e == 'enter':
t1 = t
elif e == 'leave':
if t1 is None:
# raise RuntimeError("Wrong logging {0}".format(seq))
return datetime(2018, 1, 2) - datetime(2018, 1, 1)
if dt is None:
dt = t - t1
else:
dt += t - t1
t1 = None
return dt
def _enumerate_processed_row(rows, data, cache, last_key, set_expected_answers=None):
"""
Converts time, data as dictionary into other data
as dictionary.
@param rows previous rows
@param data data as dictionaries
@param cache cache events
@param last_key last seen key
@param set_expected_answers set of expected answers,
adds a field if one is found
@return iterator on clean rows
"""
def comma_semi(st):
if st is None:
return {}
res = {}
for val in st.split(','):
spl = val.split(':')
if len(spl) == 1:
res[spl[0]] = True
elif len(spl) == 2:
res[spl[0]] = spl[1]
else:
raise ValueError( # pragma: no cover
"Unable to parse value '{0}'".format(st))
return res
def hash4alias(st):
by = st.encode("utf-8")
m = hashlib.sha256()
m.update(by)
res = m.hexdigest()
return res[:20] if len(res) > 20 else res
session = data.get('session', None)
ipadd = data.get('client', ['NN.NN.NN.NN'])[0]
if ipadd is None:
raise ValueError( # pragma: no cover
"Unable to extract an ip address from {0}".format(data))
keys = {'qn', 'game', 'next', 'events'}
if session is not None: # pylint: disable=R1702
alias = session['alias']
person_id = hash4alias(alias + ipadd)
res = dict(person_id=person_id, alias=alias, time=data['time'])
event = data.get('msg', None)
if event == 'qcm':
res['qtime'] = 'begin'
key = person_id, alias, data['game'], data['qn']
if key not in cache:
cache[key] = []
cache[key].append((data['time'], 'enter'))
if len(last_key) > 0:
cache[last_key[0]].append((data['time'], 'leave'))
last_key.clear()
last_key.append(key)
yield res
events = data.get('events', None)
res0 = res.copy()
res0['qtime'] = 'event'
if events is not None:
if not isinstance(events, list):
events = [events]
res = res0.copy()
for event in events:
ev = comma_semi(event)
res.update(ev)
yield res
elif event == "answer":
res["qtime"] = 'end'
q = data.get('data', None)
good = {}
if q is not None:
qn = q['qn']
game = q['game']
q2 = {}
for k, v in q.items():
if k in keys:
q2[k] = v
else:
key = "{0}-{1}-{2}".format(game, qn, k)
q2[key] = v
key_short = "{0}-{1}".format(game, qn)
if key in set_expected_answers:
good[key_short] = 1
elif key_short not in good:
good[key_short] = 0
res.update(q2)
key = person_id, alias, q['game'], q['qn']
if key not in cache:
cache[key] = []
cache[key].append((data['time'], 'leave'))
duration = _duration(cache[key])
res["{0}-{1}-{2}".format(
game, qn, 'nbvisit')] = len(cache[key]) * 0.5
res["{0}-{1}-{2}".format(game, qn, 'duration')] = duration
for k, v in good.items():
res[k + '-good'] = v
last_key.clear()
yield res
events = data.get('events', None)
res0 = res.copy()
res0['qtime'] = 'event'
if events is not None:
if not isinstance(events, list):
events = [events]
res = res0.copy()
for event in events:
ev = comma_semi(event)
res.update(ev)
yield res
def enumerate_qcmlog(files, expected_answers=None):
"""
Processes many files of logs produced by application
@see cl QCMApp.
:param files: list of filenames
:param expected_answers: expected answers
:return: iterator on observations as dictionary
Example of data it processes::
2018-12-12 17:56:42,833,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"events":["game:simple_french_qcm,qn:2"]}
2018-12-12 17:56:44,270,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"events":["game:simple_french_qcm,qn:2"]}
2018-12-12 17:56:44,349,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"events":["game:simple_french_qcm,qn:2"]}
2018-12-12 17:56:44,458,INFO,[DATA],{"msg":"qcm","session":{"alias":"xavierd"},"game":"simple_french_qcm","qn":"3"}
2018-12-12 17:56:49,427,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"events":["game:simple_french_qcm,qn:3"]}
2018-12-12 17:56:50,817,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"events":["game:simple_french_qcm,qn:3"]}
2018-12-12 17:56:50,864,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"events":["game:simple_french_qcm,qn:3"]}
2018-12-12 17:56:53,302,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"events":["game:simple_french_qcm,qn:3"]}
2018-12-12 17:56:53,333,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"events":["game:simple_french_qcm,qn:3"]}
2018-12-12 17:56:54,208,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"events":["game:simple_french_qcm,qn:3"]}
2018-12-12 17:56:54,239,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"events":["game:simple_french_qcm,qn:3"]}
"""
set_expected_answers = set()
if expected_answers is not None:
for a in expected_answers:
for _ in a:
set_expected_answers.add(_)
rows = []
cache = {}
last_key = []
for name in files:
if len(rows) > 1000:
rows = rows[-1000:]
with open(name, "r", encoding="utf-8") as f:
for line in f.readlines():
if "[DATA]" not in line:
continue
line = line.strip("\n\r")
spl = line.split(",INFO,[DATA],")
ti = spl[0]
sdata = ",INFO,[DATA],".join(spl[1:])
try:
data = ujson.loads(sdata) # pylint: disable=E1101
except ValueError:
if '"' not in sdata and "'" in sdata:
sdata2 = sdata.replace("'", '"')
try:
data = ujson.loads(sdata2) # pylint: disable=E1101
except ValueError:
if '"msg": "finish"' in sdata2:
# Fix the code somewhere else.
sdata3 = sdata2.replace(
'"client": ("', '"client": ["')
sdata3 = sdata3.replace(
'), "data": QueryParams', '], "data": QueryParams')
sdata3 = re.sub(
'QueryParams\\(\\"game=([a-z_]+)\\"\\)', '{"game":"\\1"}', sdata3)
try:
data = ujson.loads( # pylint: disable=E1101
sdata3)
except ValueError as e:
raise ValueError(
"Unable to process line\n{}\n{}\n{}".format(
sdata, sdata2, sdata3)) from e
else:
raise ValueError(
"Unable to process line\n{}\n{}".format(
sdata, sdata2)) from e
tid = datetime.strptime(ti, '%Y-%m-%d %H:%M:%S,%f')
data['time'] = tid
obss = _enumerate_processed_row(
rows, data, cache, last_key, set_expected_answers)
for obs in obss:
yield obs
rows.append(data)
def _aggnotnan_serie(values):
res = []
for v in values:
if isinstance(v, float) and numpy.isnan(v):
continue
if pandas.isnull(v):
continue
if v in ('ok', 'on'):
v = 1
elif v == 'skip':
v = 1000
res.append(v)
if len(res) > 0:
if isinstance(res[0], str):
r = ",".join(str(_) for _ in res)
else:
if len(res) == 1:
r = res[0]
else:
try:
r = sum(res)
except TypeError:
r = 0
else:
r = numpy.nan
return r
def _aggnotnan(values):
if isinstance(values, pandas.core.series.Series):
r = _aggnotnan_serie(values)
return r
res = []
for col in values.columns:
val = list(values[col])
res.append(_aggnotnan_serie(val))
df = pandas.DataFrame(res, values.columns)
return df
def enumerate_qcmlogdf(files, expected_answers=None):
"""
Processes many files of logs produced by application
@see cl QCMApp in dataframe.
:param files: list of filenames
:param expected_answers: expected answers
:return: iterator on observations as dictionary
Example of data it processes::
2018-12-12 17:56:42,833,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"client":["N.N.N.N",N]", events":["game:sfq,qn:2"]}
2018-12-12 17:56:44,270,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"client":["N.N.N.N",N]","events":["game:sfq,qn:2"]}
2018-12-12 17:56:44,349,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"client":["N.N.N.N",N]","events":["game:sfq,qn:2"]}
2018-12-12 17:56:44,458,INFO,[DATA],{"msg":"qcm","session":{"alias":"xavierd"},"client":["N.N.N.N",N]","game":"sfq","qn":"3"}
2018-12-12 17:56:49,427,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"client":["N.N.N.N",N]","events":["game:sfq,qn:3"]}
2018-12-12 17:56:50,817,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"client":["N.N.N.N",N]","events":["game:sfq,qn:3"]}
2018-12-12 17:56:50,864,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"client":["N.N.N.N",N]","events":["game:sfq,qn:3"]}
2018-12-12 17:56:53,302,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"client":["N.N.N.N",N]","events":["game:sfq,qn:3"]}
2018-12-12 17:56:53,333,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"client":["N.N.N.N",N]","events":["game:sfq,qn:3"]}
2018-12-12 17:56:54,208,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"client":["N.N.N.N",N]","events":["game:sfq,qn:3"]}
2018-12-12 17:56:54,239,INFO,[DATA],{"msg":"event","session":{"alias":"xavierd"},"client":["N.N.N.N",N]","events":["game:sfq,qn:3"]}
"""
def select_name(col):
return "-" in col
def prepare_df(rows):
df = pandas.DataFrame(rows)
df2 = df[df.qtime == 'end']
cols = ['person_id']
cols2 = [c for c in df2.columns if select_name(c)]
cols2.sort()
df_question = df2[cols + cols2]
gr_ans = df_question.groupby("person_id").agg(_aggnotnan)
return gr_ans
stack = {}
index = {}
for i, row in enumerate(enumerate_qcmlog(files, expected_answers)):
person_id = row.get('person_id', None)
if person_id is None:
continue
index[person_id] = i
if person_id not in stack:
stack[person_id] = []
stack[person_id].append(row)
rem = []
for k, ind in index.items():
if i - ind > 500:
rem.append(k)
for k in rem:
yield prepare_df(stack[k])
del stack[k]
del index[k]
for k, rows in stack.items():
yield prepare_df(rows)
| [
"pandas.DataFrame",
"ujson.loads",
"numpy.isnan",
"pandas.isnull",
"datetime.datetime",
"hashlib.sha256",
"datetime.datetime.strptime",
"re.sub"
] | [((10141, 10178), 'pandas.DataFrame', 'pandas.DataFrame', (['res', 'values.columns'], {}), '(res, values.columns)\n', (10157, 10178), False, 'import pandas\n'), ((1725, 1741), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (1739, 1741), False, 'import hashlib\n'), ((9373, 9389), 'pandas.isnull', 'pandas.isnull', (['v'], {}), '(v)\n', (9386, 9389), False, 'import pandas\n'), ((12163, 12185), 'pandas.DataFrame', 'pandas.DataFrame', (['rows'], {}), '(rows)\n', (12179, 12185), False, 'import pandas\n'), ((9325, 9339), 'numpy.isnan', 'numpy.isnan', (['v'], {}), '(v)\n', (9336, 9339), False, 'import numpy\n'), ((8925, 8970), 'datetime.datetime.strptime', 'datetime.strptime', (['ti', '"""%Y-%m-%d %H:%M:%S,%f"""'], {}), "(ti, '%Y-%m-%d %H:%M:%S,%f')\n", (8942, 8970), False, 'from datetime import datetime\n'), ((7351, 7369), 'ujson.loads', 'ujson.loads', (['sdata'], {}), '(sdata)\n', (7362, 7369), False, 'import ujson\n'), ((437, 457), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(2)'], {}), '(2018, 1, 2)\n', (445, 457), False, 'from datetime import datetime\n'), ((460, 480), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)'], {}), '(2018, 1, 1)\n', (468, 480), False, 'from datetime import datetime\n'), ((7609, 7628), 'ujson.loads', 'ujson.loads', (['sdata2'], {}), '(sdata2)\n', (7620, 7628), False, 'import ujson\n'), ((8131, 8204), 're.sub', 're.sub', (['"""QueryParams\\\\(\\\\"game=([a-z_]+)\\\\"\\\\)"""', '"""{"game":"\\\\1"}"""', 'sdata3'], {}), '(\'QueryParams\\\\(\\\\"game=([a-z_]+)\\\\"\\\\)\', \'{"game":"\\\\1"}\', sdata3)\n', (8137, 8204), False, 'import re\n'), ((8322, 8341), 'ujson.loads', 'ujson.loads', (['sdata3'], {}), '(sdata3)\n', (8333, 8341), False, 'import ujson\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import sys
import textwrap
from io import StringIO
from typing import List, Optional, Dict, Any, Tuple, Union
import numpy as np
import gym
from gym.utils import colorize
import textworld
import textworld.text_utils
from textworld import EnvInfos
from textworld.envs.wrappers import Filter
from textworld.gym.spaces import text_spaces
from textworld.gym.envs.utils import shuffled_cycle
class TextworldGamesEnv(gym.Env):
metadata = {'render.modes': ['human', 'ansi', 'text']}
def __init__(self, game_files: List[str],
request_infos: Optional[EnvInfos] = None,
action_space: Optional[gym.Space] = None,
observation_space: Optional[gym.Space] = None) -> None:
""" Environment for playing text-based games.
Each time `TextworldGamesEnv.reset()` is called, a new game from the
pool starts. Each game of the pool is guaranteed to be played exactly
once before a same game is played for a second time.
Arguments:
game_files:
Paths of every game composing the pool (`*.ulx` + `*.json`, `*.z[1-8]`).
request_infos:
For customizing the information returned by this environment
(see
:py:class:`textworld.EnvInfos <textworld.envs.wrappers.filter.EnvInfos>`
for the list of available information).
.. warning:: This is only supported for `*.ulx` games generated with TextWorld.
action_space:
The action space of this TextWorld environment. By default, a
:py:class:`textworld.gym.spaces.Word <textworld.gym.spaces.text_spaces.Word>`
instance is used with a `max_length` of 8 and a vocabulary
extracted from the TextWorld game.
observation_space:
The observation space of this TextWorld environment. By default, a
:py:class:`textworld.gym.spaces.Word <textworld.gym.spaces.text_spaces.Word>`
instance is used with a `max_length` of 200 and a vocabulary
extracted from the TextWorld game.
"""
self.gamefiles = game_files
self.request_infos = request_infos or EnvInfos()
self.ob = None
self.last_command = None
self.textworld_env = None
self.current_gamefile = None
self.seed(1234)
if action_space is None or observation_space is None:
# Extract vocabulary from all games.
vocab = sorted(textworld.text_utils.extract_vocab_from_gamefiles(self.gamefiles))
self.action_space = action_space or text_spaces.Word(max_length=8, vocab=vocab)
self.observation_space = observation_space or text_spaces.Word(max_length=200, vocab=vocab)
def seed(self, seed: Optional[int] = None) -> List[int]:
""" Set the seed for this environment's random generator(s).
This environment use a random generator to shuffle the order in which
the games are played.
Arguments:
seed: Number that will be used to seed the random generators.
Returns:
All the seeds used to set this environment's random generator(s).
"""
# We shuffle the order in which the game will be seen.
rng = np.random.RandomState(seed)
gamefiles = list(self.gamefiles) # Soft copy to avoid shuffling original list.
rng.shuffle(gamefiles)
# Prepare iterator used for looping through the games.
self._gamefiles_iterator = shuffled_cycle(gamefiles, rng=rng)
return [seed]
def reset(self) -> Tuple[str, Dict[str, Any]]:
""" Resets the text-based environment.
Resetting this environment means starting the next game in the pool.
Returns:
A tuple (observation, info) where
* observation: text observed in the initial state;
* infos: additional information as requested.
"""
self.current_gamefile = next(self._gamefiles_iterator)
if self.textworld_env is None:
env = textworld.start(self.current_gamefile, self.request_infos)
self.textworld_env = Filter(env)
else:
self.textworld_env.load(self.current_gamefile)
self.ob, infos = self.textworld_env.reset()
return self.ob, infos
def skip(self, nb_games: int = 1) -> None:
""" Skip games.
Arguments:
nb_games: Number of games to skip.
"""
for _ in range(nb_games):
next(self._gamefiles_iterator)
def step(self, command) -> Tuple[str, Dict[str, Any]]:
""" Runs a command in the text-based environment.
Arguments:
command: Text command to send to the game interpreter.
Returns:
A tuple (observation, score, done, info) where
* observation: text observed in the new state;
* score: total number of points accumulated so far;
* done: whether the game is finished or not;
* infos: additional information as requested.
"""
self.last_command = command
self.ob, score, done, infos = self.textworld_env.step(self.last_command)
return self.ob, score, done, infos
def close(self) -> None:
""" Close this environment. """
if self.textworld_env is not None:
self.textworld_env.close()
self.textworld_env = None
def render(self, mode: str = 'human') -> Optional[Union[StringIO, str]]:
""" Renders the current state of this environment.
The rendering is composed of the previous text command (if there's one) and
the text describing the current observation.
Arguments:
mode:
Controls where and how the text is rendered. Supported modes are:
* human: Display text to the current display or terminal and
return nothing.
* ansi: Return a `StringIO` containing a terminal-style
text representation. The text can include newlines and ANSI
escape sequences (e.g. for colors).
* text: Return a string (`str`) containing the text without
any ANSI escape sequences.
Returns:
Depending on the `mode`, this method returns either nothing, a
string, or a `StringIO` object.
"""
outfile = StringIO() if mode in ['ansi', "text"] else sys.stdout
msg = self.ob.rstrip() + "\n"
if self.last_command is not None:
command = "> " + self.last_command
if mode in ["ansi", "human"]:
command = colorize(command, "yellow", highlight=False)
msg = command + "\n" + msg
if mode == "human":
# Wrap each paragraph at 80 characters.
paragraphs = msg.split("\n")
paragraphs = ["\n".join(textwrap.wrap(paragraph, width=80)) for paragraph in paragraphs]
msg = "\n".join(paragraphs)
outfile.write(msg + "\n")
if mode == "text":
outfile.seek(0)
return outfile.read()
if mode == 'ansi':
return outfile
| [
"textworld.gym.spaces.text_spaces.Word",
"io.StringIO",
"textworld.text_utils.extract_vocab_from_gamefiles",
"textworld.gym.envs.utils.shuffled_cycle",
"textworld.envs.wrappers.Filter",
"textwrap.wrap",
"numpy.random.RandomState",
"gym.utils.colorize",
"textworld.EnvInfos",
"textworld.start"
] | [((3420, 3447), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (3441, 3447), True, 'import numpy as np\n'), ((3666, 3700), 'textworld.gym.envs.utils.shuffled_cycle', 'shuffled_cycle', (['gamefiles'], {'rng': 'rng'}), '(gamefiles, rng=rng)\n', (3680, 3700), False, 'from textworld.gym.envs.utils import shuffled_cycle\n'), ((2344, 2354), 'textworld.EnvInfos', 'EnvInfos', ([], {}), '()\n', (2352, 2354), False, 'from textworld import EnvInfos\n'), ((2757, 2800), 'textworld.gym.spaces.text_spaces.Word', 'text_spaces.Word', ([], {'max_length': '(8)', 'vocab': 'vocab'}), '(max_length=8, vocab=vocab)\n', (2773, 2800), False, 'from textworld.gym.spaces import text_spaces\n'), ((2855, 2900), 'textworld.gym.spaces.text_spaces.Word', 'text_spaces.Word', ([], {'max_length': '(200)', 'vocab': 'vocab'}), '(max_length=200, vocab=vocab)\n', (2871, 2900), False, 'from textworld.gym.spaces import text_spaces\n'), ((4219, 4277), 'textworld.start', 'textworld.start', (['self.current_gamefile', 'self.request_infos'], {}), '(self.current_gamefile, self.request_infos)\n', (4234, 4277), False, 'import textworld\n'), ((4311, 4322), 'textworld.envs.wrappers.Filter', 'Filter', (['env'], {}), '(env)\n', (4317, 4322), False, 'from textworld.envs.wrappers import Filter\n'), ((6615, 6625), 'io.StringIO', 'StringIO', ([], {}), '()\n', (6623, 6625), False, 'from io import StringIO\n'), ((2645, 2710), 'textworld.text_utils.extract_vocab_from_gamefiles', 'textworld.text_utils.extract_vocab_from_gamefiles', (['self.gamefiles'], {}), '(self.gamefiles)\n', (2694, 2710), False, 'import textworld\n'), ((6866, 6910), 'gym.utils.colorize', 'colorize', (['command', '"""yellow"""'], {'highlight': '(False)'}), "(command, 'yellow', highlight=False)\n", (6874, 6910), False, 'from gym.utils import colorize\n'), ((7109, 7143), 'textwrap.wrap', 'textwrap.wrap', (['paragraph'], {'width': '(80)'}), '(paragraph, width=80)\n', (7122, 7143), False, 'import textwrap\n')] |
import copy
import pytest
import numpy as np
import pandas as pd
from hyperactive import Hyperactive
search_space = {
"x1": list(np.arange(-100, 100, 1)),
}
def test_callback_0():
def callback_1(access):
access.stuff1 = 1
def callback_2(access):
access.stuff2 = 2
def objective_function(access):
assert access.stuff1 == 1
assert access.stuff2 == 2
return 0
hyper = Hyperactive()
hyper.add_search(
objective_function,
search_space,
n_iter=100,
callbacks={"before": [callback_1, callback_2]},
)
hyper.run()
def test_callback_1():
def callback_1(access):
access.stuff1 = 1
def callback_2(access):
access.stuff1 = 2
def objective_function(access):
assert access.stuff1 == 1
return 0
hyper = Hyperactive()
hyper.add_search(
objective_function,
search_space,
n_iter=100,
callbacks={"before": [callback_1], "after": [callback_2]},
)
hyper.run()
def test_callback_2():
def callback_1(access):
access.pass_through["stuff1"] = 1
def objective_function(access):
assert access.pass_through["stuff1"] == 1
return 0
hyper = Hyperactive()
hyper.add_search(
objective_function,
search_space,
n_iter=100,
callbacks={"before": [callback_1]},
pass_through={"stuff1": 0},
)
hyper.run()
def test_callback_3():
def callback_1(access):
access.pass_through["stuff1"] = 1
def objective_function(access):
if access.nth_iter == 0:
assert access.pass_through["stuff1"] == 0
else:
assert access.pass_through["stuff1"] == 1
return 0
hyper = Hyperactive()
hyper.add_search(
objective_function,
search_space,
n_iter=100,
callbacks={"after": [callback_1]},
pass_through={"stuff1": 0},
)
hyper.run()
| [
"hyperactive.Hyperactive",
"numpy.arange"
] | [((434, 447), 'hyperactive.Hyperactive', 'Hyperactive', ([], {}), '()\n', (445, 447), False, 'from hyperactive import Hyperactive\n'), ((854, 867), 'hyperactive.Hyperactive', 'Hyperactive', ([], {}), '()\n', (865, 867), False, 'from hyperactive import Hyperactive\n'), ((1262, 1275), 'hyperactive.Hyperactive', 'Hyperactive', ([], {}), '()\n', (1273, 1275), False, 'from hyperactive import Hyperactive\n'), ((1788, 1801), 'hyperactive.Hyperactive', 'Hyperactive', ([], {}), '()\n', (1799, 1801), False, 'from hyperactive import Hyperactive\n'), ((136, 159), 'numpy.arange', 'np.arange', (['(-100)', '(100)', '(1)'], {}), '(-100, 100, 1)\n', (145, 159), True, 'import numpy as np\n')] |
from policy import PolicyWithMu
import os
from evaluator import Evaluator
import gym
# from utils.em_brake_4test import EmergencyBraking
import numpy as np
from matplotlib.colors import ListedColormap
from dynamics.models import EmBrakeModel, UpperTriangleModel, Air3dModel
def hj_baseline(timet=5.0):
import jax
import jax.numpy as jnp
import hj_reachability as hj
# from hj_reachability.systems import DetAir3d
dynamics = hj.systems.DoubleInt()
grid = hj.Grid.from_grid_definition_and_initial_values(hj.sets.Box(lo=np.array([-5., -5.]),
hi=np.array([5., 5.])), (50, 50))
values = - jnp.linalg.norm(grid.states, axis=-1, ord=jnp.inf) + 5
solver_settings = hj.SolverSettings.with_accuracy("very_high",
hamiltonian_postprocessor=hj.solver.backwards_reachable_tube)
time = 0.
target_time = -timet
target_values = hj.step(solver_settings, dynamics, grid, time, values, target_time).block_until_ready()
return grid, target_values
def static_region(test_dir, iteration,
bound=(-5., 5., -5., 5.),
baseline=False):
import json
import argparse
import datetime
from policy import PolicyWithMu
params = json.loads(open(test_dir + '/config.json').read())
time_now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
test_log_dir = params['log_dir'] + '/tester/test-region-{}'.format(time_now)
params.update(dict(mode='testing',
test_dir=test_dir,
test_log_dir=test_log_dir,))
parser = argparse.ArgumentParser()
for key, val in params.items():
parser.add_argument("-" + key, default=val)
args = parser.parse_args()
evaluator = Evaluator(PolicyWithMu, args.env_id, args)
evaluator.load_weights(os.path.join(test_dir, 'models'), iteration)
brake_model = EmBrakeModel()
double_intergrator_model = UpperTriangleModel()
air3d_model = Air3dModel()
model_dict = {"UpperTriangle": double_intergrator_model,
"Air3d": air3d_model}
model = model_dict.get(args.env_id.split("-")[0])
# generate batch obses
d = np.linspace(bound[0], bound[1], 400)
v = np.linspace(bound[2], bound[3], 400)
# cmaplist = ['springgreen'] * 3 + ['crimson'] * 87
# cmap1 = ListedColormap(cmaplist)
D, V = np.meshgrid(d, v)
flatten_d = np.reshape(D, [-1, ])
flatten_v = np.reshape(V, [-1, ])
env_name = args.env_id.split("-")[0]
if env_name == 'Air3d':
x3 = np.pi * np.ones_like(flatten_d)
init_obses = np.stack([flatten_d, flatten_v, x3], 1)
else:
init_obses = np.stack([flatten_d, flatten_v], 1)
# define rollout
# def reduced_model_rollout_for_update(obses):
# model.reset(obses)
# constraints_list = []
# for step in range(args.num_rollout_list_for_policy_update[0]):
# processed_obses = evaluator.preprocessor.tf_process_obses(obses)
# actions, _ = evaluator.policy_with_value.compute_action(processed_obses)
# obses, rewards, constraints = model.rollout_out(actions)
# constraints = evaluator.tf.expand_dims(constraints, 1) if len(constraints.shape) == 1 else constraints
# constraints_list.append(constraints)
# flattern_cstr = evaluator.tf.concat(constraints_list, 1).numpy()
# return flattern_cstr
# flatten_cstr = reduced_model_rollout_for_update(init_obses)
preprocess_obs = evaluator.preprocessor.np_process_obses(init_obses)
flatten_mu = evaluator.policy_with_value.compute_lam(preprocess_obs).numpy()
processed_obses = evaluator.preprocessor.tf_process_obses(init_obses)
actions, _ = evaluator.policy_with_value.compute_action(processed_obses)
flatten_cost_q = evaluator.policy_with_value.compute_QC1(processed_obses, actions).numpy()
flatten_fea_v = flatten_cost_q
flatten_cs = np.multiply(flatten_fea_v, flatten_mu)
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 16})
from mpl_toolkits.mplot3d import Axes3D
plot_items = ['cstr']
data_dict = {'cs': flatten_cs, 'mu': flatten_mu, 'cstr': flatten_fea_v}
if baseline:
grid, target_values = hj_baseline()
grid1, target_values1 = hj_baseline(timet=10.0)
def plot_region(data_reshape, name):
fig = plt.figure(figsize=[5,6])
ax = plt.axes([0.1,0.2,0.8,0.75])
data_reshape += 0.15 * np.where(data_reshape == 0,
np.zeros_like(data_reshape),
np.ones_like(data_reshape))
ct1 = ax.contourf(D, V, data_reshape, cmap='Accent') # 50
plt.colorbar(ct1)
ct1.collections[0].set_label('Learned Boundary')
ax.contour(D, V, data_reshape, levels=0,
colors="green",
linewidths=3)
if baseline:
ct2 = ax.contour(grid.coordinate_vectors[0],
grid.coordinate_vectors[1],
target_values.T,
levels=0,
colors="grey",
linewidths=3)
# data = np.load('/home/mahaitong/PycharmProjects/toyota_exp_train (copy)/baseline/init_feasible_f1.npy')
# data2 = np.load('/home/mahaitong/PycharmProjects/toyota_exp_train (copy)/baseline/init_feasible_f0.4.npy')
# ds = np.linspace(bound[0], bound[1], 100)
# vs = np.linspace(bound[2], bound[3], 100)
# Ds, Vs = np.meshgrid(ds, vs)
# ct3 = ax.contour(Ds,
# Vs,
# data.T,
# levels=0,
# colors="cornflowerblue",
# linewidths=3)
# ct2 = ax.contour(Ds,
# Vs,
# data2.T,
# levels=0,
# colors="orange",
# linewidths=3)
# ct2.collections[0].set_label('HJ-Reachability Boundary')
name_2d = name + '_' + str(iteration) + '_2d.jpg'
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
rect1 = plt.Rectangle((0, 0), 1, 1, fc=ct1.collections[0].get_facecolor()[0], ec='green', linewidth=3)
rect2 = plt.Rectangle((0, 0), 1, 1, fill=False, ec='grey', linewidth=3)
rect3 = plt.Rectangle((0, 0), 1, 1, fill=False, ec='orange', linewidth=3)
rect4 = plt.Rectangle((0, 0), 1, 1, fill=False, ec='cornflowerblue', linewidth=3)
ax = plt.axes([0.05, 0.02, 0.9, 0.16])
plt.axis('off')
# ax.legend((rect1,rect2, rect3, rect4), ('Feasible region', 'HJ avoid set', 'Energy-based','MPC-feasiblity')
# , loc='lower center',ncol=2, fontsize=15)
# plt.title('Feasible Region of Double Integrator')
plt.tight_layout(pad=0.5)
plt.show()
# plt.savefig(os.path.join(evaluator.log_dir, name_2d))
# figure = plt.figure()
# ax = Axes3D(figure)
# ax.plot_surface(D, V, data_reshape, rstride=1, cstride=1, cmap='rainbow')
# name_3d = name + '_' + str(iteration) + '_3d.jpg'
# plt.savefig(os.path.join(evaluator.log_dir, name_3d))
for plot_item in plot_items:
data = data_dict.get(plot_item)
# for k in range(data.shape[1]):
# data_k = data[:, k]
# data_reshape = data_k.reshape(D.shape)
# plot_region(data_reshape, plot_item + '_' + str(k))
data_reshape = data.reshape(D.shape)
plot_region(data_reshape, plot_item + '_sum')
if __name__ == '__main__':
# static_region('./results/toyota3lane/LMAMPC-v2-2021-11-21-23-04-21', 300000)
# static_region('./results/Air3d/LMAMPC-vector-2021-12-02-01-41-12', 300000,
# bound=(-6., 20., -10., 10.),
# baseline=True) #
# LMAMPC - vector - 2021 - 11 - 29 - 21 - 22 - 40
static_region('../results/model-free/UpperTriangle-2021-12-20-22-06-18', 300000,
bound=(-5., 5., -5., 5.),
baseline=False) #
| [
"argparse.ArgumentParser",
"matplotlib.pyplot.axes",
"dynamics.models.UpperTriangleModel",
"hj_reachability.systems.DoubleInt",
"matplotlib.pyplot.figure",
"evaluator.Evaluator",
"os.path.join",
"matplotlib.pyplot.tight_layout",
"hj_reachability.step",
"numpy.meshgrid",
"numpy.multiply",
"nump... | [((446, 468), 'hj_reachability.systems.DoubleInt', 'hj.systems.DoubleInt', ([], {}), '()\n', (466, 468), True, 'import hj_reachability as hj\n'), ((764, 875), 'hj_reachability.SolverSettings.with_accuracy', 'hj.SolverSettings.with_accuracy', (['"""very_high"""'], {'hamiltonian_postprocessor': 'hj.solver.backwards_reachable_tube'}), "('very_high', hamiltonian_postprocessor=hj.\n solver.backwards_reachable_tube)\n", (795, 875), True, 'import hj_reachability as hj\n'), ((1674, 1699), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1697, 1699), False, 'import argparse\n'), ((1835, 1877), 'evaluator.Evaluator', 'Evaluator', (['PolicyWithMu', 'args.env_id', 'args'], {}), '(PolicyWithMu, args.env_id, args)\n', (1844, 1877), False, 'from evaluator import Evaluator\n'), ((1968, 1982), 'dynamics.models.EmBrakeModel', 'EmBrakeModel', ([], {}), '()\n', (1980, 1982), False, 'from dynamics.models import EmBrakeModel, UpperTriangleModel, Air3dModel\n'), ((2014, 2034), 'dynamics.models.UpperTriangleModel', 'UpperTriangleModel', ([], {}), '()\n', (2032, 2034), False, 'from dynamics.models import EmBrakeModel, UpperTriangleModel, Air3dModel\n'), ((2053, 2065), 'dynamics.models.Air3dModel', 'Air3dModel', ([], {}), '()\n', (2063, 2065), False, 'from dynamics.models import EmBrakeModel, UpperTriangleModel, Air3dModel\n'), ((2258, 2294), 'numpy.linspace', 'np.linspace', (['bound[0]', 'bound[1]', '(400)'], {}), '(bound[0], bound[1], 400)\n', (2269, 2294), True, 'import numpy as np\n'), ((2303, 2339), 'numpy.linspace', 'np.linspace', (['bound[2]', 'bound[3]', '(400)'], {}), '(bound[2], bound[3], 400)\n', (2314, 2339), True, 'import numpy as np\n'), ((2446, 2463), 'numpy.meshgrid', 'np.meshgrid', (['d', 'v'], {}), '(d, v)\n', (2457, 2463), True, 'import numpy as np\n'), ((2480, 2499), 'numpy.reshape', 'np.reshape', (['D', '[-1]'], {}), '(D, [-1])\n', (2490, 2499), True, 'import numpy as np\n'), ((2518, 2537), 'numpy.reshape', 'np.reshape', (['V', '[-1]'], {}), '(V, [-1])\n', (2528, 2537), True, 'import numpy as np\n'), ((4021, 4059), 'numpy.multiply', 'np.multiply', (['flatten_fea_v', 'flatten_mu'], {}), '(flatten_fea_v, flatten_mu)\n', (4032, 4059), True, 'import numpy as np\n'), ((4102, 4140), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (4121, 4140), True, 'import matplotlib.pyplot as plt\n'), ((1905, 1937), 'os.path.join', 'os.path.join', (['test_dir', '"""models"""'], {}), "(test_dir, 'models')\n", (1917, 1937), False, 'import os\n'), ((2675, 2714), 'numpy.stack', 'np.stack', (['[flatten_d, flatten_v, x3]', '(1)'], {}), '([flatten_d, flatten_v, x3], 1)\n', (2683, 2714), True, 'import numpy as np\n'), ((2746, 2781), 'numpy.stack', 'np.stack', (['[flatten_d, flatten_v]', '(1)'], {}), '([flatten_d, flatten_v], 1)\n', (2754, 2781), True, 'import numpy as np\n'), ((4461, 4487), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[5, 6]'}), '(figsize=[5, 6])\n', (4471, 4487), True, 'import matplotlib.pyplot as plt\n'), ((4500, 4531), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.1, 0.2, 0.8, 0.75]'], {}), '([0.1, 0.2, 0.8, 0.75])\n', (4508, 4531), True, 'import matplotlib.pyplot as plt\n'), ((4800, 4817), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['ct1'], {}), '(ct1)\n', (4812, 4817), True, 'import matplotlib.pyplot as plt\n'), ((6479, 6542), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(0, 0)', '(1)', '(1)'], {'fill': '(False)', 'ec': '"""grey"""', 'linewidth': '(3)'}), "((0, 0), 1, 1, fill=False, ec='grey', linewidth=3)\n", (6492, 6542), True, 'import matplotlib.pyplot as plt\n'), ((6559, 6624), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(0, 0)', '(1)', '(1)'], {'fill': '(False)', 'ec': '"""orange"""', 'linewidth': '(3)'}), "((0, 0), 1, 1, fill=False, ec='orange', linewidth=3)\n", (6572, 6624), True, 'import matplotlib.pyplot as plt\n'), ((6641, 6714), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(0, 0)', '(1)', '(1)'], {'fill': '(False)', 'ec': '"""cornflowerblue"""', 'linewidth': '(3)'}), "((0, 0), 1, 1, fill=False, ec='cornflowerblue', linewidth=3)\n", (6654, 6714), True, 'import matplotlib.pyplot as plt\n'), ((6728, 6761), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.05, 0.02, 0.9, 0.16]'], {}), '([0.05, 0.02, 0.9, 0.16])\n', (6736, 6761), True, 'import matplotlib.pyplot as plt\n'), ((6770, 6785), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6778, 6785), True, 'import matplotlib.pyplot as plt\n'), ((7035, 7060), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0.5)'}), '(pad=0.5)\n', (7051, 7060), True, 'import matplotlib.pyplot as plt\n'), ((7069, 7079), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7077, 7079), True, 'import matplotlib.pyplot as plt\n'), ((686, 736), 'jax.numpy.linalg.norm', 'jnp.linalg.norm', (['grid.states'], {'axis': '(-1)', 'ord': 'jnp.inf'}), '(grid.states, axis=-1, ord=jnp.inf)\n', (701, 736), True, 'import jax.numpy as jnp\n'), ((984, 1051), 'hj_reachability.step', 'hj.step', (['solver_settings', 'dynamics', 'grid', 'time', 'values', 'target_time'], {}), '(solver_settings, dynamics, grid, time, values, target_time)\n', (991, 1051), True, 'import hj_reachability as hj\n'), ((1393, 1416), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1414, 1416), False, 'import datetime\n'), ((2630, 2653), 'numpy.ones_like', 'np.ones_like', (['flatten_d'], {}), '(flatten_d)\n', (2642, 2653), True, 'import numpy as np\n'), ((544, 566), 'numpy.array', 'np.array', (['[-5.0, -5.0]'], {}), '([-5.0, -5.0])\n', (552, 566), True, 'import numpy as np\n'), ((640, 660), 'numpy.array', 'np.array', (['[5.0, 5.0]'], {}), '([5.0, 5.0])\n', (648, 660), True, 'import numpy as np\n'), ((4628, 4655), 'numpy.zeros_like', 'np.zeros_like', (['data_reshape'], {}), '(data_reshape)\n', (4641, 4655), True, 'import numpy as np\n'), ((4697, 4723), 'numpy.ones_like', 'np.ones_like', (['data_reshape'], {}), '(data_reshape)\n', (4709, 4723), True, 'import numpy as np\n')] |
import collections
import numpy as np
from sympy import Point3D, Line3D
class Ray(object):
# scaling for distance of lines
_R = 1e10 # cm
_scale = 3e-8
def __init__(self, detector, point_source, color="#29FC5C", probability=None):
self._detector = detector
self._probability = probability
self._color = color
self._point_source = point_source
self._calculate_ray_origin()
def _calculate_ray_origin(self):
theta = np.deg2rad(90.0 - self._point_source.spherical.lat.value)
phi = np.deg2rad(self._point_source.spherical.lon.value)
x = Ray._R * np.cos(phi) * np.sin(theta)
y = Ray._R * np.sin(phi) * np.sin(theta)
z = Ray._R * np.cos(theta)
# this is the "distant orgin of the ray"
self._origin = np.array([x, y, z])
self._sympy_line = Line3D(
Point3D(self._detector.mount_point), Point3D(self._origin)
)
self._plot_origin = self.point_on_ray(Ray._scale)
def plot(self, ax):
ax.plot(
[self._plot_origin[0], self.detector_origin[0]],
[self._plot_origin[1], self.detector_origin[1]],
[self._plot_origin[2], self.detector_origin[2]],
color=self._color,
alpha=0.8,
)
@property
def detector_name(self):
return self._detector.name
@property
def probability(self):
return self._probability
@property
def detector_origin(self):
return self._detector.mount_point
@property
def ray_origin(self):
return self._origin
@property
def sympy_line(self):
return self._sympy_line
def point_on_ray(self, t=0.5):
"""
get a parametrized point on the ray
:param t: point between 0 and 1
:return:
"""
assert 0.0 <= t <= 1.0, "t must be between 0 and 1"
return self.detector_origin + (self._origin - self.detector_origin) * t
| [
"numpy.deg2rad",
"numpy.sin",
"numpy.array",
"numpy.cos",
"sympy.Point3D"
] | [((490, 547), 'numpy.deg2rad', 'np.deg2rad', (['(90.0 - self._point_source.spherical.lat.value)'], {}), '(90.0 - self._point_source.spherical.lat.value)\n', (500, 547), True, 'import numpy as np\n'), ((562, 612), 'numpy.deg2rad', 'np.deg2rad', (['self._point_source.spherical.lon.value'], {}), '(self._point_source.spherical.lon.value)\n', (572, 612), True, 'import numpy as np\n'), ((820, 839), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (828, 839), True, 'import numpy as np\n'), ((649, 662), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (655, 662), True, 'import numpy as np\n'), ((698, 711), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (704, 711), True, 'import numpy as np\n'), ((733, 746), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (739, 746), True, 'import numpy as np\n'), ((888, 923), 'sympy.Point3D', 'Point3D', (['self._detector.mount_point'], {}), '(self._detector.mount_point)\n', (895, 923), False, 'from sympy import Point3D, Line3D\n'), ((925, 946), 'sympy.Point3D', 'Point3D', (['self._origin'], {}), '(self._origin)\n', (932, 946), False, 'from sympy import Point3D, Line3D\n'), ((635, 646), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (641, 646), True, 'import numpy as np\n'), ((684, 695), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (690, 695), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import os
import numpy as np
def show_res_for_this_run(best_fitnesses_each_iter, average_fitnesses_each_iter, num_of_features_selected_by_best_ant_each_iter, feature_num):
iterations = np.arange(1,len(best_fitnesses_each_iter)+1, dtype="int64")
# Spacing between each line
intervals = 1
loc = plticker.MultipleLocator(base=intervals)
# ax.xaxis.set_major_locator(loc)
##################################
fig, ax1 = plt.subplots(figsize=(10,8))
plt.subplot(221)
xx1 = np.array(iterations)
yy1 = np.array(best_fitnesses_each_iter)
plt.plot(xx1, yy1, 'bo', xx1, yy1, 'k')
plt.xlabel('iteration num')
plt.ylabel('accuracy (fitness)')
plt.title('Visualization of Accuracy over each Iteration')
ax1 = fig.gca()
ax1.xaxis.set_major_locator(loc)
plt.grid(True)
##################################
plt.subplot(222)
xx2 = np.array(iterations)
yy2 = np.array(average_fitnesses_each_iter)
plt.plot(xx2, yy2, 'bo', xx2, yy2, 'k')
plt.xlabel('iteration num')
plt.ylabel('average accuracy')
plt.title('Visualization of Average of Accuracy over each Iteration')
ax2 = fig.gca()
ax2.xaxis.set_major_locator(loc)
##################################
plt.subplot(223)
N = len(num_of_features_selected_by_best_ant_each_iter)
ind = np.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars
ax3 = fig.gca()
rects = ax3.bar(ind, num_of_features_selected_by_best_ant_each_iter, width, color='c')
ax3.set_ylabel('num of selected features (by best ant)')
ax3.set_title('selected features over each iteration')
ax3.set_xticks(ind + width / 2)
ax3.set_xticklabels(np.arange(1, N+1))
ax3.set_ylim([0, feature_num])
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax3.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
autolabel(rects)
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.arange",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.grid"
] | [((389, 429), 'matplotlib.ticker.MultipleLocator', 'plticker.MultipleLocator', ([], {'base': 'intervals'}), '(base=intervals)\n', (413, 429), True, 'import matplotlib.ticker as plticker\n'), ((524, 553), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (536, 553), True, 'import matplotlib.pyplot as plt\n'), ((558, 574), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (569, 574), True, 'import matplotlib.pyplot as plt\n'), ((587, 607), 'numpy.array', 'np.array', (['iterations'], {}), '(iterations)\n', (595, 607), True, 'import numpy as np\n'), ((618, 652), 'numpy.array', 'np.array', (['best_fitnesses_each_iter'], {}), '(best_fitnesses_each_iter)\n', (626, 652), True, 'import numpy as np\n'), ((658, 697), 'matplotlib.pyplot.plot', 'plt.plot', (['xx1', 'yy1', '"""bo"""', 'xx1', 'yy1', '"""k"""'], {}), "(xx1, yy1, 'bo', xx1, yy1, 'k')\n", (666, 697), True, 'import matplotlib.pyplot as plt\n'), ((703, 730), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration num"""'], {}), "('iteration num')\n", (713, 730), True, 'import matplotlib.pyplot as plt\n'), ((735, 767), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy (fitness)"""'], {}), "('accuracy (fitness)')\n", (745, 767), True, 'import matplotlib.pyplot as plt\n'), ((772, 830), 'matplotlib.pyplot.title', 'plt.title', (['"""Visualization of Accuracy over each Iteration"""'], {}), "('Visualization of Accuracy over each Iteration')\n", (781, 830), True, 'import matplotlib.pyplot as plt\n'), ((893, 907), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (901, 907), True, 'import matplotlib.pyplot as plt\n'), ((954, 970), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (965, 970), True, 'import matplotlib.pyplot as plt\n'), ((982, 1002), 'numpy.array', 'np.array', (['iterations'], {}), '(iterations)\n', (990, 1002), True, 'import numpy as np\n'), ((1013, 1050), 'numpy.array', 'np.array', (['average_fitnesses_each_iter'], {}), '(average_fitnesses_each_iter)\n', (1021, 1050), True, 'import numpy as np\n'), ((1056, 1095), 'matplotlib.pyplot.plot', 'plt.plot', (['xx2', 'yy2', '"""bo"""', 'xx2', 'yy2', '"""k"""'], {}), "(xx2, yy2, 'bo', xx2, yy2, 'k')\n", (1064, 1095), True, 'import matplotlib.pyplot as plt\n'), ((1101, 1128), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iteration num"""'], {}), "('iteration num')\n", (1111, 1128), True, 'import matplotlib.pyplot as plt\n'), ((1133, 1163), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""average accuracy"""'], {}), "('average accuracy')\n", (1143, 1163), True, 'import matplotlib.pyplot as plt\n'), ((1168, 1237), 'matplotlib.pyplot.title', 'plt.title', (['"""Visualization of Average of Accuracy over each Iteration"""'], {}), "('Visualization of Average of Accuracy over each Iteration')\n", (1177, 1237), True, 'import matplotlib.pyplot as plt\n'), ((1343, 1359), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (1354, 1359), True, 'import matplotlib.pyplot as plt\n'), ((1432, 1444), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1441, 1444), True, 'import numpy as np\n'), ((2150, 2160), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2158, 2160), True, 'import matplotlib.pyplot as plt\n'), ((1820, 1839), 'numpy.arange', 'np.arange', (['(1)', '(N + 1)'], {}), '(1, N + 1)\n', (1829, 1839), True, 'import numpy as np\n')] |
"""Test brillouinzone.py module."""
import numpy as np
import pytest
from morpho.brillouinzone import BrillouinZonePath as BZPath
from morpho.brillouinzone import SymmetryPoint as SPoint
def test_symmetrypoint_constructor_3d():
"""Test 3D SymmetryPoint constructor."""
X = SPoint((0.4, 0.5, 0.3), "X")
assert X.name == "X"
assert X.point.shape == (3,)
def test_symmetrypoint_constructor_2d():
"""Test 2D SymmetryPoint constructor."""
X = SPoint((0.5, 0.3), "X")
assert X.name == "X"
assert X.point.shape == (2,)
def test_symmetrypoint_constructor_1d():
"""Test 1D SymmetryPoint constructor."""
X = SPoint((0.5,), "X")
assert X.name == "X"
assert X.point.shape == (1,)
def test_brillouin_zone_path_3d():
"""Test 3D BrillouinZonePath."""
a = 1
n_points = 11
t1, t2, t3 = (a, 0, 0), (0, a, 0), (0, 0, a)
G = SPoint((0, 0, 0), "Γ")
Z = SPoint((0, 0, 1 / 2), "Z")
X = SPoint((1 / 2, 0, 0), "X")
path = BZPath(t1, t2, t3, [G, Z, X], n_points)
assert path.b1 == pytest.approx(np.array([2 * np.pi / a, 0, 0]))
assert path.b2 == pytest.approx(np.array([0, 2 * np.pi / a, 0]))
assert path.b3 == pytest.approx(np.array([0, 0, 2 * np.pi / a]))
assert path.betas.values.shape == (3, n_points)
def test_brillouin_zone_path_2d():
"""Test 2D BrillouinZonePath."""
a = 1
n_points = 11
t1, t2 = (a, 0), (0, a)
G = SPoint((0, 0), "Γ")
X = SPoint((1, 0), "X")
Y = SPoint((0, 1), "Y")
path = BZPath(t1, t2, [G, X, Y], n_points=n_points)
assert path.b1 == pytest.approx(np.array([2 * np.pi / a, 0]))
assert path.b2 == pytest.approx(np.array([0, 2 * np.pi / a]))
assert path.betas.values.shape == (2, n_points)
def test_brillouin_zone_path_1d():
"""Test 1D BrillouinZonePath."""
a = 1
n_points = 11
t1 = (a,)
G = SPoint((0,), "Γ")
X = SPoint((1,), "X")
path = BZPath(t1, [G, X], n_points=n_points)
assert path.b1 == pytest.approx(np.array([2 * np.pi / a]))
assert path.betas.values.shape == (1, n_points)
| [
"numpy.array",
"morpho.brillouinzone.BrillouinZonePath",
"morpho.brillouinzone.SymmetryPoint"
] | [((283, 311), 'morpho.brillouinzone.SymmetryPoint', 'SPoint', (['(0.4, 0.5, 0.3)', '"""X"""'], {}), "((0.4, 0.5, 0.3), 'X')\n", (289, 311), True, 'from morpho.brillouinzone import SymmetryPoint as SPoint\n'), ((466, 489), 'morpho.brillouinzone.SymmetryPoint', 'SPoint', (['(0.5, 0.3)', '"""X"""'], {}), "((0.5, 0.3), 'X')\n", (472, 489), True, 'from morpho.brillouinzone import SymmetryPoint as SPoint\n'), ((644, 663), 'morpho.brillouinzone.SymmetryPoint', 'SPoint', (['(0.5,)', '"""X"""'], {}), "((0.5,), 'X')\n", (650, 663), True, 'from morpho.brillouinzone import SymmetryPoint as SPoint\n'), ((881, 903), 'morpho.brillouinzone.SymmetryPoint', 'SPoint', (['(0, 0, 0)', '"""Γ"""'], {}), "((0, 0, 0), 'Γ')\n", (887, 903), True, 'from morpho.brillouinzone import SymmetryPoint as SPoint\n'), ((912, 938), 'morpho.brillouinzone.SymmetryPoint', 'SPoint', (['(0, 0, 1 / 2)', '"""Z"""'], {}), "((0, 0, 1 / 2), 'Z')\n", (918, 938), True, 'from morpho.brillouinzone import SymmetryPoint as SPoint\n'), ((947, 973), 'morpho.brillouinzone.SymmetryPoint', 'SPoint', (['(1 / 2, 0, 0)', '"""X"""'], {}), "((1 / 2, 0, 0), 'X')\n", (953, 973), True, 'from morpho.brillouinzone import SymmetryPoint as SPoint\n'), ((986, 1025), 'morpho.brillouinzone.BrillouinZonePath', 'BZPath', (['t1', 't2', 't3', '[G, Z, X]', 'n_points'], {}), '(t1, t2, t3, [G, Z, X], n_points)\n', (992, 1025), True, 'from morpho.brillouinzone import BrillouinZonePath as BZPath\n'), ((1423, 1442), 'morpho.brillouinzone.SymmetryPoint', 'SPoint', (['(0, 0)', '"""Γ"""'], {}), "((0, 0), 'Γ')\n", (1429, 1442), True, 'from morpho.brillouinzone import SymmetryPoint as SPoint\n'), ((1451, 1470), 'morpho.brillouinzone.SymmetryPoint', 'SPoint', (['(1, 0)', '"""X"""'], {}), "((1, 0), 'X')\n", (1457, 1470), True, 'from morpho.brillouinzone import SymmetryPoint as SPoint\n'), ((1479, 1498), 'morpho.brillouinzone.SymmetryPoint', 'SPoint', (['(0, 1)', '"""Y"""'], {}), "((0, 1), 'Y')\n", (1485, 1498), True, 'from morpho.brillouinzone import SymmetryPoint as SPoint\n'), ((1511, 1555), 'morpho.brillouinzone.BrillouinZonePath', 'BZPath', (['t1', 't2', '[G, X, Y]'], {'n_points': 'n_points'}), '(t1, t2, [G, X, Y], n_points=n_points)\n', (1517, 1555), True, 'from morpho.brillouinzone import BrillouinZonePath as BZPath\n'), ((1864, 1881), 'morpho.brillouinzone.SymmetryPoint', 'SPoint', (['(0,)', '"""Γ"""'], {}), "((0,), 'Γ')\n", (1870, 1881), True, 'from morpho.brillouinzone import SymmetryPoint as SPoint\n'), ((1890, 1907), 'morpho.brillouinzone.SymmetryPoint', 'SPoint', (['(1,)', '"""X"""'], {}), "((1,), 'X')\n", (1896, 1907), True, 'from morpho.brillouinzone import SymmetryPoint as SPoint\n'), ((1920, 1957), 'morpho.brillouinzone.BrillouinZonePath', 'BZPath', (['t1', '[G, X]'], {'n_points': 'n_points'}), '(t1, [G, X], n_points=n_points)\n', (1926, 1957), True, 'from morpho.brillouinzone import BrillouinZonePath as BZPath\n'), ((1062, 1093), 'numpy.array', 'np.array', (['[2 * np.pi / a, 0, 0]'], {}), '([2 * np.pi / a, 0, 0])\n', (1070, 1093), True, 'import numpy as np\n'), ((1131, 1162), 'numpy.array', 'np.array', (['[0, 2 * np.pi / a, 0]'], {}), '([0, 2 * np.pi / a, 0])\n', (1139, 1162), True, 'import numpy as np\n'), ((1200, 1231), 'numpy.array', 'np.array', (['[0, 0, 2 * np.pi / a]'], {}), '([0, 0, 2 * np.pi / a])\n', (1208, 1231), True, 'import numpy as np\n'), ((1592, 1620), 'numpy.array', 'np.array', (['[2 * np.pi / a, 0]'], {}), '([2 * np.pi / a, 0])\n', (1600, 1620), True, 'import numpy as np\n'), ((1658, 1686), 'numpy.array', 'np.array', (['[0, 2 * np.pi / a]'], {}), '([0, 2 * np.pi / a])\n', (1666, 1686), True, 'import numpy as np\n'), ((1994, 2019), 'numpy.array', 'np.array', (['[2 * np.pi / a]'], {}), '([2 * np.pi / a])\n', (2002, 2019), True, 'import numpy as np\n')] |
# Copyright 2013, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
# Computes a coordinate representation using Multidimensional Scaling
# on an alpha-sum of distance matrices.
#
# <NAME>
# 1/6/2015
# Now set up to handle landmarks (3/1/2021). To preserve compatibility with older
# models, landmarks should be set to the entire dataset in the case when full
# (square) distance matrices are available. In this case the exact behavior is
# preserved. If landmarks are a subset of the full dataset, there is a slightly
# different behavior when analyzing subsets (namely a subset does not trigger
# a full re-calculation of the coordinates, since that would not be possible
# without full distance matrices).
"""Computes a coordinate representation using Multidimensional Scaling
on an alpha-sum of distance matrices. The distance matrices are stored
in a list of 2d numpy arrays and the alpha values are stored in a 1d numpy
array."""
import numpy as np
import scipy.linalg
import scipy.optimize
from scipy import spatial
# cmdscale translation from Matlab by <NAME>
def cmdscale(D, full=False):
"""
Classical multidimensional scaling (MDS)
Parameters
----------
D : (n, n) array
Symmetric distance matrix.
full: Boolean
Use to compute all eigenvalues/vectors
Returns
-------
Y : (n, 2) array
Configuration matrix. Each column represents a dimension. Only the
p dimensions corresponding to positive eigenvalues of B are returned.
Note that each dimension is only determined up to an overall sign,
corresponding to a reflection. Only returns 2d coordinates.
Yinv : multiply (dx - d_mean) times Yinv to get 2d projected coordinates,
where dx is a row vector of distances to points in Y and d_mean
is the average of the distance in Y.
"""
# number of points
n = len(D)
# for multiple points, solve eigenvalue problem
if n > 1:
# Centering matrix
H = np.eye(n) - np.ones((n, n)) / n
# YY^T
B = -H.dot(D ** 2).dot(H) / 2
# diagonalize
if full:
# keep all eigenvalues/vectors
evals, evecs = np.linalg.eigh(B)
else:
# keep only largest two eigenvalues/vectors
evals, evecs = scipy.linalg.eigh(B, eigvals=(n-2,n-1))
# Sort by eigenvalue in descending order
idx = np.argsort(evals)[::-1]
evals = evals[idx]
evecs = evecs[:,idx]
# Compute the coordinates using positive-eigenvalued components only
w, = np.where(evals > 0)
L = np.diag(np.sqrt(evals[w]))
V = evecs[:,w]
Y = V.dot(L)
# compute inverse for projection
Linv = np.diag(np.reciprocal(np.sqrt(evals[w])))
Yinv = -V.dot(Linv) / 2.0
# if no coordinates then use two columns of zeros for Y and Yinv
if len(w) == 0:
Y = np.zeros((n,2))
Yinv = np.zeros((n,2))
# if only one coordinate then add one column of zeros to Y and Yinv
if len(w) == 1:
Y = np.append(np.reshape(Y, (Y.shape[0],1)),
np.zeros((Y.shape[0],1)), axis=1)
Yinv = np.append(np.reshape(Yinv, (Yinv.shape[0], 1)),
np.zeros((Yinv.shape[0], 1)), axis=1)
# for one point set coordinates to center of screen
else:
Y = np.array([0, 0])
Yinv = np.array([0, 0])
return Y, Yinv
# this is the legacy, non-landmark behavior, preserved
# in case of models with full pairwise distance matrices
def compute_coords_subset (dist_mats, alpha_values, old_coords, subset, proj=None):
"""
Computes sum alpha_i^2 dist_mat_i.^2 then calls cmdscale to compute
classical multidimensional scaling.
INPUTS: - dist_mats is a list of numpy arrays containing square
matrices (n,n) representing distances,
- alpha_values is a numpy array containing a vector of
alpha values between 0 and 1.
- subset is a vector of length n with 1 = in subset, 0 = not in subset.
- proj is an optional vector similar to subset, defaults to vector
of all 1.
OUTPUTS: Y is a numpy array of coordinates (n,2) and
"""
# set projection default (vector of all ones -- everything in projection)
num_tests = dist_mats[0].shape[0]
if proj is None:
proj = np.ones(num_tests)
# make sure projection is an array
else:
proj = np.asarray(proj)
# get sizes of projection, subset
num_proj = int(np.sum(proj))
num_subset = int(np.sum(subset))
# use subset if not full dataset, otherwise get projection
cmd_subset = subset
compute_proj = False
if num_subset == num_tests:
cmd_subset = proj
compute_proj = True
# init distance matrix to size of working subset
num_cmd_subset = int(np.sum(cmd_subset))
full_dist_mat = np.zeros((num_cmd_subset,num_cmd_subset))
# compute alpha-sum of distance matrices on subset
subset_inds = np.where(cmd_subset)[0]
for i in range(len(dist_mats)):
full_dist_mat = full_dist_mat + alpha_values[i]**2 * \
dist_mats[i][subset_inds[:,None], subset_inds]**2
# compute mds coordinates on subset
mds_subset_coords, proj_inv = cmdscale(np.sqrt(full_dist_mat))
# if not in subset, assign coordinates of [0,0]
mds_coords = old_coords
mds_coords[subset_inds,:] = mds_subset_coords
# compute projection, if no subset and projection not full dataset
if compute_proj and (num_proj < num_tests):
# get points to project
proj_inds = np.where(cmd_subset==0)[0]
num_proj_inds = len(proj_inds)
# compute mean distance squared for points in projection
mean_dist = np.mean(full_dist_mat, axis=1)
# compute distance squared for each point to be projected
proj_dist_mat = np.zeros((num_proj_inds, num_proj))
for i in range(len(dist_mats)):
proj_dist_mat = proj_dist_mat + alpha_values[i] ** 2 * \
dist_mats[i][proj_inds[:, None], subset_inds] ** 2
# compute projected coords
proj_coords = (proj_dist_mat - mean_dist).dot(proj_inv)
# put projected coords into mds coords
mds_coords[proj_inds,:] = proj_coords
return mds_coords
def compute_coords (dist_mats, alpha_values, old_coords, subset,
proj=None, landmarks=None):
"""
Computes sum alpha_i^2 dist_mat_i.^2 then calls cmdscale to compute
classical multidimensional scaling.
INPUTS: -- dist_mats is a list of numpy arrays containing square
matrices (n,n) representing distances,
-- alpha_values is a numpy array containing a vector of
alpha values between 0 and 1.
-- old_coords is a numpy array containing the previous coordinates.
-- subset is a vector of length n with 1 = in subset, 0 = not in subset.
-- proj is an optional vector similar to subset, defaults to vector
of all 1.
-- landmarks is an optional vector which specifies landmark indices to use
in the MDS calculation using mask. if the dist_mats are not square it
is required and the matrices are assumed to be size (n,k), where k is
the number of landmarks
OUTPUTS: Y is a numpy array of coordinates (n,2) and
"""
# landmarks test on weather data
# test_landmarks = np.concatenate((np.ones(50), np.zeros(50)))
# landmarks = test_landmarks
# set landmark default, vector of all ones, indicating that
# everything is a landmark and distance matrices are square
num_tests = dist_mats[0].shape[0]
if landmarks is None:
landmarks = np.ones(num_tests)
# make sure landmarks is an array
else:
landmarks = np.asarray(landmarks)
# use legacy behavior if we have full pairwise distance matrices
num_landmarks = int(np.sum(landmarks))
if num_landmarks == num_tests:
return compute_coords_subset (dist_mats, alpha_values,
old_coords, subset, proj=proj)
# set projection default (vector of all ones -- everything in base calculation)
if proj is None:
proj = np.ones(num_tests)
# make sure projection is an array
else:
proj = np.asarray(proj)
# remove projected points from landmarks
landmarks = np.multiply(landmarks, proj)
# get sizes of projection, subset
num_proj = int(np.sum(proj))
num_subset = int(np.sum(subset))
# always use landmarks to compute basic coordinates
num_landmarks = int(np.sum(landmarks))
full_dist_mat = np.zeros((num_landmarks,num_landmarks))
# compute alpha-sum of distance matrices on landmarks
landmark_rows = np.where(landmarks)[0]
landmark_cols = np.arange(num_landmarks)
for i in range(len(dist_mats)):
full_dist_mat = full_dist_mat + alpha_values[i]**2 * \
dist_mats[i][landmark_rows[:,None], landmark_cols]**2
# compute mds coordinates on landmarks
mds_landmark_coords, proj_inv = cmdscale(np.sqrt(full_dist_mat))
# if not in landmarks, assign old coordinates
mds_coords = old_coords
mds_coords[landmark_rows,:] = mds_landmark_coords
# now project onto landmarks
if num_landmarks < num_tests:
# get points to project (subset or proj except landmarks)
if num_subset == num_tests:
proj_inds = np.where(landmarks==0)[0]
else:
proj_inds = np.where(np.logical_and(landmarks==0, subset))[0]
# compute mean distance squared for points in projection
mean_dist = np.mean(full_dist_mat, axis=1)
# compute distance squared for each point to be projected
num_proj_inds = len(proj_inds)
proj_dist_mat = np.zeros((num_proj_inds, num_landmarks))
for i in range(len(dist_mats)):
proj_dist_mat = proj_dist_mat + alpha_values[i] ** 2 * \
dist_mats[i][proj_inds[:, None], landmark_cols] ** 2
# compute projected coords
proj_coords = (proj_dist_mat - mean_dist).dot(proj_inv)
# put projected coords into mds coords
mds_coords[proj_inds,:] = proj_coords
return mds_coords
def scale_coords (coords, full_coords, subset, center):
"""
Adjusts coords (with 2 columns) so that the orientation is
correlated with the full_coords and scaled by the scalar
scale to fit in a box [0,1]^2.
INPUTS: -- coords is numpy matrix (n,2) of coords to scale,
-- full_coords is numpy matrix (n,2) to align
as a reference for the coords vector.
-- subset is a vector of length n with 1 = in subset, 0 = not in subset.
-- center is the subset center
OUTPUTS: a numpy matrix (n,2) of adjusted coordinates.
"""
# get subset for scaling
subset_inds = np.where(subset)[0]
subset_coords = coords[subset_inds,:]
# mean subtract subset coords
subset_coords_mean = np.mean(subset_coords, axis=0)
subset_coords_ms = subset_coords - subset_coords_mean
# mean subtract full subset
full_subset_coords = full_coords[subset_inds,:]
full_subset_mean = np.mean(full_subset_coords, axis=0)
full_subset_coords_ms = full_subset_coords - full_subset_mean
# use Kabsch algorithm to rotate coords in line with full_coords
corr_mat = np.dot(subset_coords_ms.transpose(),full_subset_coords_ms)
u,s,v = np.linalg.svd(corr_mat)
rot_mat = np.dot(v, u.transpose())
# rotate to get new coords
rot_coords = np.dot(subset_coords_ms, rot_mat.transpose())
# get max absolute value for x,y independently
coords_scale_x = np.amax(np.absolute(rot_coords[:,0]))
coords_scale_y = np.amax(np.absolute(rot_coords[:,1]))
# make sure we do not divide by 0
if coords_scale_x < np.finfo(float).eps:
coords_scale_x = 1.0
if coords_scale_y < np.finfo(float).eps:
coords_scale_y = 1.0
# scale to [0,1]^2 independently for x,y
scaled_coords = rot_coords
scaled_coords[:,0] = scaled_coords[:,0] / (2.0 * coords_scale_x) + 0.5
scaled_coords[:,1] = scaled_coords[:,1] / (2.0 * coords_scale_y) + 0.5
# set coords of anything not in subset
num_coords = coords.shape[0]
new_coords = np.zeros((num_coords,2))
for i in range(num_coords):
if subset[i] == 0:
# compute direction to move non-subset coord
move_dir = coords[i,:] - center
norm_move_dir = np.linalg.norm(move_dir)
if norm_move_dir == 0:
move_dir = np.array([-1,-1])
else:
move_dir = center + 2.0 * move_dir/norm_move_dir
# put somewhere beyond [0,1], but in the same direction from center
new_coords[i,:] = move_dir
# set newly computed subset coordinates
new_coords[subset_inds,:] = scaled_coords
return new_coords
def init_coords (var_dist, proj=None, landmarks=None):
"""
Computes initial MDS coordinates assuming alpha values are all 1.0
INPUTS: - var_dist is a list of distance matrices
- proj is a vector mask of projected points (optional)
- landmarks is a vector of indices of landmark points (optional)
OUTPUTS: mds_coords are the initial scaled MDS coordinates
full_mds_coords are the unscaled version of the same coordinates
"""
num_vars = len(var_dist)
# assume initial alpha values are all one
alpha_values = np.ones(num_vars)
# scale distance matrices by maximum, unless maximum is zero
for i in range(0, num_vars):
coords_scale = np.amax(var_dist[i])
if coords_scale < np.finfo(float).eps:
coords_scale = 1.0
var_dist[i] = var_dist[i] / coords_scale
# compute MDS coordinates assuming alpha = 1 for scaling, full subset, full view
subset_mask = np.ones(var_dist[0].shape[0])
old_coords = np.zeros((var_dist[0].shape[0], 2))
full_mds_coords = compute_coords(var_dist, alpha_values, old_coords, subset_mask,
proj=proj, landmarks=landmarks)
# scale using full coordinates
subset_center = np.array([.5,.5])
mds_coords = scale_coords(full_mds_coords, full_mds_coords, subset_mask, subset_center)
return mds_coords, full_mds_coords
def compute_alpha_clusters (var_dist, meta_columns, meta_column_types, landmarks=None):
"""
Computes the alpha cluster values.
INPUTS: -- var_dist is a list of distance matrices
-- meta_columns is a list of meta data arrays
-- meta_column_types is a list of the meta data array types
-- landmarks is a mask indicating landmarks
OUTPUTS: alpha_cluster_mat is a matrix containing all the alpha
values for clustering each meta data array
"""
# if landmarks are not given, assume everything is a landmark
num_tests = var_dist[0].shape[0]
if landmarks is None:
landmarks = np.ones(num_tests)
# make sure landmarks is an array
else:
landmarks = np.asarray(landmarks)
# get landmarks locations in distance matrices
num_landmarks = int(np.sum(landmarks))
landmark_rows = np.where(landmarks)[0]
landmark_cols = np.arange(num_landmarks)
# compute alpha cluster values using landmarks only
num_vars = len(var_dist)
num_time_series = num_landmarks
# form a matrix with each distance matrix as a column (this is U matrix)
all_dist_mat = np.zeros((num_time_series * num_time_series, num_vars))
for i in range(num_vars):
all_dist_mat[:, i] = np.squeeze(np.reshape(var_dist[i][landmark_rows[:,None], landmark_cols],
(num_time_series * num_time_series, 1)))
# for each quantitative meta variable, compute distances as columns (V matrices)
prop_dist_mats = [] # store as a list of numpy columns
num_meta_cols = len(meta_column_types)
for i in range(num_meta_cols):
if meta_column_types[i] == "float64":
# compute pairwise distance matrix vector for property i
landmark_data_i = np.asarray(meta_columns[i])[landmark_rows]
prop_dist_mats.append(compute_prop_dist_vec(landmark_data_i, num_time_series))
elif meta_column_types[i] == "string":
# compute pairwise distance matrix for property i
# using strings (sorted alphabetically and assigned
# values starting at 0)
# sort potential values in string metadata
uniq_sorted_columns = sorted(set(meta_columns[i]))
# use alphabetical order to make a vector of numbers
meta_column_num = np.asarray([uniq_sorted_columns.index(str_meta)
for str_meta in meta_columns[i]])[landmark_rows]
prop_dist_mats.append(compute_prop_dist_vec(meta_column_num, num_time_series))
else:
# do nothing
prop_dist_mats.append(0)
# compute NNLS cluster button alpha values, if more than one data point
alpha_cluster_mat = np.zeros((num_meta_cols, num_vars))
if num_time_series > 1:
for i in range(num_meta_cols):
if (meta_column_types[i] == "float64") or \
(meta_column_types[i] == "string"):
beta_i = scipy.optimize.nnls(all_dist_mat, prop_dist_mats[i])
alpha_i = np.sqrt(beta_i[0])
# again don't divide by zero
alpha_max_i = np.amax(alpha_i)
if alpha_max_i <= np.finfo(float).eps:
alpha_max_i = 1
alpha_cluster_mat[i, :] = alpha_i / alpha_max_i
return alpha_cluster_mat
# subroutine for compute_alpha_clusters which computes the pairwise
# distance matrix for the alpha slider optimization
def compute_prop_dist_vec(prop_vec, vec_length):
# compute pairwise distance matrix for property
prop_dist_mat = np.absolute(
np.transpose(np.tile(prop_vec, (vec_length, 1))) - np.tile(prop_vec, (vec_length, 1)))
prop_dist_vec = np.squeeze(np.reshape(prop_dist_mat, (vec_length * vec_length, 1)))
# make sure we don't divide by 0
prop_dist_vec_max = np.amax(prop_dist_vec)
if prop_dist_vec_max <= np.finfo(float).eps:
prop_dist_vec_max = 1.0
return prop_dist_vec / prop_dist_vec_max
# use max-min algorithm to choose landmarks
def select_landmarks(num_points, num_landmarks, variable):
num_vars = len(variable)
# first landmark is first point in dataset
landmarks = [0]
# next landmarks are chosen by max-min
min_combined_dist = np.full((num_points, 1), np.inf)
for i in range(1, num_landmarks):
# compute combined distance from each point to previous landmark
combined_dist = np.zeros((num_points, 1))
for j in range(num_vars):
combined_dist += spatial.distance.cdist(variable[j], variable[j][[landmarks[i-1]],:])
# compute minimum distance from each point to set of landmarks
min_combined_dist = np.minimum(min_combined_dist, combined_dist)
# next landmark is maximum of minimum distances
landmarks.append(np.argmax(min_combined_dist))
# make landmarks 1-based numpy array
landmarks = np.asarray(landmarks) + 1
return landmarks
| [
"numpy.absolute",
"numpy.sum",
"numpy.argmax",
"numpy.ones",
"numpy.argsort",
"numpy.linalg.svd",
"numpy.mean",
"numpy.arange",
"numpy.linalg.norm",
"numpy.tile",
"numpy.full",
"numpy.multiply",
"numpy.finfo",
"numpy.reshape",
"scipy.spatial.distance.cdist",
"numpy.minimum",
"numpy.a... | [((5947, 5989), 'numpy.zeros', 'np.zeros', (['(num_cmd_subset, num_cmd_subset)'], {}), '((num_cmd_subset, num_cmd_subset))\n', (5955, 5989), True, 'import numpy as np\n'), ((9523, 9551), 'numpy.multiply', 'np.multiply', (['landmarks', 'proj'], {}), '(landmarks, proj)\n', (9534, 9551), True, 'import numpy as np\n'), ((9781, 9821), 'numpy.zeros', 'np.zeros', (['(num_landmarks, num_landmarks)'], {}), '((num_landmarks, num_landmarks))\n', (9789, 9821), True, 'import numpy as np\n'), ((9943, 9967), 'numpy.arange', 'np.arange', (['num_landmarks'], {}), '(num_landmarks)\n', (9952, 9967), True, 'import numpy as np\n'), ((12206, 12236), 'numpy.mean', 'np.mean', (['subset_coords'], {'axis': '(0)'}), '(subset_coords, axis=0)\n', (12213, 12236), True, 'import numpy as np\n'), ((12403, 12438), 'numpy.mean', 'np.mean', (['full_subset_coords'], {'axis': '(0)'}), '(full_subset_coords, axis=0)\n', (12410, 12438), True, 'import numpy as np\n'), ((12661, 12684), 'numpy.linalg.svd', 'np.linalg.svd', (['corr_mat'], {}), '(corr_mat)\n', (12674, 12684), True, 'import numpy as np\n'), ((13501, 13526), 'numpy.zeros', 'np.zeros', (['(num_coords, 2)'], {}), '((num_coords, 2))\n', (13509, 13526), True, 'import numpy as np\n'), ((14719, 14736), 'numpy.ones', 'np.ones', (['num_vars'], {}), '(num_vars)\n', (14726, 14736), True, 'import numpy as np\n'), ((15113, 15142), 'numpy.ones', 'np.ones', (['var_dist[0].shape[0]'], {}), '(var_dist[0].shape[0])\n', (15120, 15142), True, 'import numpy as np\n'), ((15160, 15195), 'numpy.zeros', 'np.zeros', (['(var_dist[0].shape[0], 2)'], {}), '((var_dist[0].shape[0], 2))\n', (15168, 15195), True, 'import numpy as np\n'), ((15408, 15428), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (15416, 15428), True, 'import numpy as np\n'), ((16492, 16516), 'numpy.arange', 'np.arange', (['num_landmarks'], {}), '(num_landmarks)\n', (16501, 16516), True, 'import numpy as np\n'), ((16736, 16791), 'numpy.zeros', 'np.zeros', (['(num_time_series * num_time_series, num_vars)'], {}), '((num_time_series * num_time_series, num_vars))\n', (16744, 16791), True, 'import numpy as np\n'), ((18316, 18351), 'numpy.zeros', 'np.zeros', (['(num_meta_cols, num_vars)'], {}), '((num_meta_cols, num_vars))\n', (18324, 18351), True, 'import numpy as np\n'), ((19430, 19452), 'numpy.amax', 'np.amax', (['prop_dist_vec'], {}), '(prop_dist_vec)\n', (19437, 19452), True, 'import numpy as np\n'), ((19850, 19882), 'numpy.full', 'np.full', (['(num_points, 1)', 'np.inf'], {}), '((num_points, 1), np.inf)\n', (19857, 19882), True, 'import numpy as np\n'), ((3551, 3570), 'numpy.where', 'np.where', (['(evals > 0)'], {}), '(evals > 0)\n', (3559, 3570), True, 'import numpy as np\n'), ((4383, 4399), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (4391, 4399), True, 'import numpy as np\n'), ((4415, 4431), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (4423, 4431), True, 'import numpy as np\n'), ((5419, 5437), 'numpy.ones', 'np.ones', (['num_tests'], {}), '(num_tests)\n', (5426, 5437), True, 'import numpy as np\n'), ((5503, 5519), 'numpy.asarray', 'np.asarray', (['proj'], {}), '(proj)\n', (5513, 5519), True, 'import numpy as np\n'), ((5578, 5590), 'numpy.sum', 'np.sum', (['proj'], {}), '(proj)\n', (5584, 5590), True, 'import numpy as np\n'), ((5613, 5627), 'numpy.sum', 'np.sum', (['subset'], {}), '(subset)\n', (5619, 5627), True, 'import numpy as np\n'), ((5907, 5925), 'numpy.sum', 'np.sum', (['cmd_subset'], {}), '(cmd_subset)\n', (5913, 5925), True, 'import numpy as np\n'), ((6063, 6083), 'numpy.where', 'np.where', (['cmd_subset'], {}), '(cmd_subset)\n', (6071, 6083), True, 'import numpy as np\n'), ((6344, 6366), 'numpy.sqrt', 'np.sqrt', (['full_dist_mat'], {}), '(full_dist_mat)\n', (6351, 6366), True, 'import numpy as np\n'), ((6824, 6854), 'numpy.mean', 'np.mean', (['full_dist_mat'], {'axis': '(1)'}), '(full_dist_mat, axis=1)\n', (6831, 6854), True, 'import numpy as np\n'), ((6946, 6981), 'numpy.zeros', 'np.zeros', (['(num_proj_inds, num_proj)'], {}), '((num_proj_inds, num_proj))\n', (6954, 6981), True, 'import numpy as np\n'), ((8870, 8888), 'numpy.ones', 'np.ones', (['num_tests'], {}), '(num_tests)\n', (8877, 8888), True, 'import numpy as np\n'), ((8962, 8983), 'numpy.asarray', 'np.asarray', (['landmarks'], {}), '(landmarks)\n', (8972, 8983), True, 'import numpy as np\n'), ((9078, 9095), 'numpy.sum', 'np.sum', (['landmarks'], {}), '(landmarks)\n', (9084, 9095), True, 'import numpy as np\n'), ((9360, 9378), 'numpy.ones', 'np.ones', (['num_tests'], {}), '(num_tests)\n', (9367, 9378), True, 'import numpy as np\n'), ((9444, 9460), 'numpy.asarray', 'np.asarray', (['proj'], {}), '(proj)\n', (9454, 9460), True, 'import numpy as np\n'), ((9610, 9622), 'numpy.sum', 'np.sum', (['proj'], {}), '(proj)\n', (9616, 9622), True, 'import numpy as np\n'), ((9645, 9659), 'numpy.sum', 'np.sum', (['subset'], {}), '(subset)\n', (9651, 9659), True, 'import numpy as np\n'), ((9742, 9759), 'numpy.sum', 'np.sum', (['landmarks'], {}), '(landmarks)\n', (9748, 9759), True, 'import numpy as np\n'), ((9900, 9919), 'numpy.where', 'np.where', (['landmarks'], {}), '(landmarks)\n', (9908, 9919), True, 'import numpy as np\n'), ((10234, 10256), 'numpy.sqrt', 'np.sqrt', (['full_dist_mat'], {}), '(full_dist_mat)\n', (10241, 10256), True, 'import numpy as np\n'), ((10794, 10824), 'numpy.mean', 'np.mean', (['full_dist_mat'], {'axis': '(1)'}), '(full_dist_mat, axis=1)\n', (10801, 10824), True, 'import numpy as np\n'), ((10955, 10995), 'numpy.zeros', 'np.zeros', (['(num_proj_inds, num_landmarks)'], {}), '((num_proj_inds, num_landmarks))\n', (10963, 10995), True, 'import numpy as np\n'), ((12084, 12100), 'numpy.where', 'np.where', (['subset'], {}), '(subset)\n', (12092, 12100), True, 'import numpy as np\n'), ((12904, 12933), 'numpy.absolute', 'np.absolute', (['rot_coords[:, 0]'], {}), '(rot_coords[:, 0])\n', (12915, 12933), True, 'import numpy as np\n'), ((12963, 12992), 'numpy.absolute', 'np.absolute', (['rot_coords[:, 1]'], {}), '(rot_coords[:, 1])\n', (12974, 12992), True, 'import numpy as np\n'), ((14860, 14880), 'numpy.amax', 'np.amax', (['var_dist[i]'], {}), '(var_dist[i])\n', (14867, 14880), True, 'import numpy as np\n'), ((16220, 16238), 'numpy.ones', 'np.ones', (['num_tests'], {}), '(num_tests)\n', (16227, 16238), True, 'import numpy as np\n'), ((16312, 16333), 'numpy.asarray', 'np.asarray', (['landmarks'], {}), '(landmarks)\n', (16322, 16333), True, 'import numpy as np\n'), ((16410, 16427), 'numpy.sum', 'np.sum', (['landmarks'], {}), '(landmarks)\n', (16416, 16427), True, 'import numpy as np\n'), ((16449, 16468), 'numpy.where', 'np.where', (['landmarks'], {}), '(landmarks)\n', (16457, 16468), True, 'import numpy as np\n'), ((19311, 19366), 'numpy.reshape', 'np.reshape', (['prop_dist_mat', '(vec_length * vec_length, 1)'], {}), '(prop_dist_mat, (vec_length * vec_length, 1))\n', (19321, 19366), True, 'import numpy as np\n'), ((20019, 20044), 'numpy.zeros', 'np.zeros', (['(num_points, 1)'], {}), '((num_points, 1))\n', (20027, 20044), True, 'import numpy as np\n'), ((20285, 20329), 'numpy.minimum', 'np.minimum', (['min_combined_dist', 'combined_dist'], {}), '(min_combined_dist, combined_dist)\n', (20295, 20329), True, 'import numpy as np\n'), ((20500, 20521), 'numpy.asarray', 'np.asarray', (['landmarks'], {}), '(landmarks)\n', (20510, 20521), True, 'import numpy as np\n'), ((2949, 2958), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (2955, 2958), True, 'import numpy as np\n'), ((3158, 3175), 'numpy.linalg.eigh', 'np.linalg.eigh', (['B'], {}), '(B)\n', (3172, 3175), True, 'import numpy as np\n'), ((3380, 3397), 'numpy.argsort', 'np.argsort', (['evals'], {}), '(evals)\n', (3390, 3397), True, 'import numpy as np\n'), ((3592, 3609), 'numpy.sqrt', 'np.sqrt', (['evals[w]'], {}), '(evals[w])\n', (3599, 3609), True, 'import numpy as np\n'), ((3904, 3920), 'numpy.zeros', 'np.zeros', (['(n, 2)'], {}), '((n, 2))\n', (3912, 3920), True, 'import numpy as np\n'), ((3939, 3955), 'numpy.zeros', 'np.zeros', (['(n, 2)'], {}), '((n, 2))\n', (3947, 3955), True, 'import numpy as np\n'), ((6672, 6697), 'numpy.where', 'np.where', (['(cmd_subset == 0)'], {}), '(cmd_subset == 0)\n', (6680, 6697), True, 'import numpy as np\n'), ((13056, 13071), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (13064, 13071), True, 'import numpy as np\n'), ((13130, 13145), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (13138, 13145), True, 'import numpy as np\n'), ((13715, 13739), 'numpy.linalg.norm', 'np.linalg.norm', (['move_dir'], {}), '(move_dir)\n', (13729, 13739), True, 'import numpy as np\n'), ((16862, 16969), 'numpy.reshape', 'np.reshape', (['var_dist[i][landmark_rows[:, None], landmark_cols]', '(num_time_series * num_time_series, 1)'], {}), '(var_dist[i][landmark_rows[:, None], landmark_cols], (\n num_time_series * num_time_series, 1))\n', (16872, 16969), True, 'import numpy as np\n'), ((19244, 19278), 'numpy.tile', 'np.tile', (['prop_vec', '(vec_length, 1)'], {}), '(prop_vec, (vec_length, 1))\n', (19251, 19278), True, 'import numpy as np\n'), ((19481, 19496), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (19489, 19496), True, 'import numpy as np\n'), ((20108, 20179), 'scipy.spatial.distance.cdist', 'spatial.distance.cdist', (['variable[j]', 'variable[j][[landmarks[i - 1]], :]'], {}), '(variable[j], variable[j][[landmarks[i - 1]], :])\n', (20130, 20179), False, 'from scipy import spatial\n'), ((20412, 20440), 'numpy.argmax', 'np.argmax', (['min_combined_dist'], {}), '(min_combined_dist)\n', (20421, 20440), True, 'import numpy as np\n'), ((2961, 2976), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (2968, 2976), True, 'import numpy as np\n'), ((3736, 3753), 'numpy.sqrt', 'np.sqrt', (['evals[w]'], {}), '(evals[w])\n', (3743, 3753), True, 'import numpy as np\n'), ((4082, 4112), 'numpy.reshape', 'np.reshape', (['Y', '(Y.shape[0], 1)'], {}), '(Y, (Y.shape[0], 1))\n', (4092, 4112), True, 'import numpy as np\n'), ((4139, 4164), 'numpy.zeros', 'np.zeros', (['(Y.shape[0], 1)'], {}), '((Y.shape[0], 1))\n', (4147, 4164), True, 'import numpy as np\n'), ((4202, 4238), 'numpy.reshape', 'np.reshape', (['Yinv', '(Yinv.shape[0], 1)'], {}), '(Yinv, (Yinv.shape[0], 1))\n', (4212, 4238), True, 'import numpy as np\n'), ((4266, 4294), 'numpy.zeros', 'np.zeros', (['(Yinv.shape[0], 1)'], {}), '((Yinv.shape[0], 1))\n', (4274, 4294), True, 'import numpy as np\n'), ((10586, 10610), 'numpy.where', 'np.where', (['(landmarks == 0)'], {}), '(landmarks == 0)\n', (10594, 10610), True, 'import numpy as np\n'), ((13802, 13820), 'numpy.array', 'np.array', (['[-1, -1]'], {}), '([-1, -1])\n', (13810, 13820), True, 'import numpy as np\n'), ((14907, 14922), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (14915, 14922), True, 'import numpy as np\n'), ((17371, 17398), 'numpy.asarray', 'np.asarray', (['meta_columns[i]'], {}), '(meta_columns[i])\n', (17381, 17398), True, 'import numpy as np\n'), ((18631, 18649), 'numpy.sqrt', 'np.sqrt', (['beta_i[0]'], {}), '(beta_i[0])\n', (18638, 18649), True, 'import numpy as np\n'), ((18726, 18742), 'numpy.amax', 'np.amax', (['alpha_i'], {}), '(alpha_i)\n', (18733, 18742), True, 'import numpy as np\n'), ((19206, 19240), 'numpy.tile', 'np.tile', (['prop_vec', '(vec_length, 1)'], {}), '(prop_vec, (vec_length, 1))\n', (19213, 19240), True, 'import numpy as np\n'), ((10659, 10697), 'numpy.logical_and', 'np.logical_and', (['(landmarks == 0)', 'subset'], {}), '(landmarks == 0, subset)\n', (10673, 10697), True, 'import numpy as np\n'), ((18777, 18792), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (18785, 18792), True, 'import numpy as np\n')] |
from collections import OrderedDict, defaultdict
from contextlib import contextmanager
import numpy as np
import time
import warnings
import torch
def cycle(iterable):
# see https://github.com/pytorch/pytorch/issues/23900
iterator = iter(iterable)
while True:
try:
yield next(iterator)
except StopIteration:
iterator = iter(iterable)
def copy_state(model):
"""
Given PyTorch module `model`, makes a copy of the state onto CPU.
Args:
model: PyTorch module to copy state dict of
Returns:
A copy of state dict with all tensors allocated on the CPU
"""
copy_dict = OrderedDict()
state_dict = model.state_dict()
for k, v in state_dict.items():
copy_dict[k] = v.cpu() if v.is_cuda else v.clone()
return copy_dict
class Tracker:
"""
Abstract Tracker class to serve as the bass class for all trackers.
Defines the two interfacing methods of `log_objective` and `finalize`.
"""
def log_objective(self, obj=None):
"""
Logs the provided object
Args:
obj (Any, optional): Object to be logged
Raises:
NotImplementedError: Override this method to provide a functional behavior.
"""
raise NotImplementedError("Please override this method to provide functional behavior")
def finalize(self, obj):
pass
class TimeObjectiveTracker(Tracker):
"""
Provides basic tracking of any object with a timestamp. Invoking `finalize()` will
make all recorded timestamps relative to the first event tracked unless a specific
time value to relativize against is provided.
"""
def __init__(self, add_creation_event=False):
"""
Initializes the tracker. If `add_creation_event` is True, then an entry is created with value
`0.0` with the timestamp corresponding to the creation fo this tracker object.
Args:
add_creation_event (bool, optional): If set to True, an event for creation with value of 0.0 is added to the log. Defaults to False.
"""
self.tracker = np.array([[time.time(), 0.0]]) if add_creation_event else np.empty((0, 0))
def log_objective(self, obj):
"""
Logs the provided object paired with the timestamp. Before finalizing by invoking `finalize()`, all events
are logged with absolute time in epoch time in seconds.
Args:
obj (Any): Object to be logged.
"""
new_track_point = np.array([[time.time(), obj]])
self.tracker = np.concatenate((self.tracker, new_track_point), axis=0)
def finalize(self, reference=None):
"""
When invoked, all logs time entries are relativized against the first log entry time.
Pass value `reference` to set the time relative to the passed-in value instead.
Args:
reference (float, optional): Timestamp to relativize all logged even times to. If None, relativizes all
time entries to first time entry of the log. Defaults to None.
"""
# relativize to the first entry of the tracked events unless reference provided
reference = self.tracker[0, 0] if reference is None else reference
# relativize the entry
self.tracker[:, 0] -= reference
class MultipleObjectiveTracker(Tracker):
"""
Given a dictionary of functions, this will invoke all functions and log the returned values against
the invocation timestamp. Calling `finalize` relativizes the timestamps to the first entry (i.e. first
log_objective call) made.
"""
def __init__(self, default_name=None, **objectives):
"""
Initializes the tracker. Pass any additional objective functions as keywords arguments.
Args:
default_name (string, optional): Name under which the objective value passed into `log_objective` is saved under.
If set to None, the passed in value is NOT saved. Defaults to None.
"""
self._default_name = default_name
self.objectives = objectives
self.log = defaultdict(list)
self.time = []
def log_objective(self, obj=None):
"""
Log the objective and also returned values of any list of objective functions this tracker
is configured with. The passed in value of `obj` is logged only if `default_name` was set to
something except for None.
Args:
obj (Any, optional): Value to be logged if `default_name` is not None. Defaults to None.
"""
t = time.time()
# iterate through and invoke each objective function and accumulate the
# returns. This is performed separately from actual logging so that any
# error in one of the objective evaluation will halt the whole timestamp
# logging, and avoid leaving partial results
values = {}
# log the passed in value only if `default_name` was given
if self._default_name is not None:
values[self._default_name] = obj
for name, objective in self.objectives.items():
values[name] = objective()
# Actually log all values
self.time.append(t)
for name, value in values.items():
self.log[name].append(value)
def finalize(self, reference=None):
"""
When invoked, all logs are convereted into numpy arrays and are relativized against the first log entry time.
Pass value `reference` to set the time relative to the passed-in value instead.
Args:
reference (float, optional): Timestamp to relativize all logged even times to. If None, relativizes all
time entries to first time entry of the log. Defaults to None.
"""
self.time = np.array(self.time)
reference = self.time[0] if reference is None else reference
self.time -= reference
for k, l in self.log.items():
self.log[k] = np.array(l)
def asdict(self, time_key="time", make_copy=True):
"""
Output the cotent of the tracker as a single dictionary. The time value
Args:
time_key (str, optional): Name of the key to save the time information as. Defaults to "time".
make_copy (bool, optional): If True, the returned log times will be a (shallow) copy. Defaults to True.
Returns:
dict: Dictionary containing tracked values as well as the time
"""
log_copy = {k: np.copy(v) if make_copy else v for k, v in self.log.items()}
log_copy[time_key] = np.copy(self.time) if make_copy else self.time
return log_copy
@contextmanager
def eval_state(model):
"""
Context manager, within which the model will be under `eval` mode.
Upon existing, the model will return to whatever training state it
was as it entered into the context.
Args:
model (PyTorch Module): PyTorch Module whose train/eval state is to be managed.
Yields:
PyTorch Module: The model switched to eval state.
"""
training_status = model.training
try:
model.eval()
yield model
finally:
model.train(training_status)
@contextmanager
def device_state(model, device):
"""
Within the context, attemps to place the `model` onto the specified
`device`. If `device` is CUDA and the specified device does not exist,
the context falls back to using `cpu`. Upon existing the context, the model
will be placed back on to the original device inferred based on the first entry
of the model's parameter.
Args:
model (PyTorch Module): PyTorch Module object to swtich device.
device (Any): target device descriptor. Any valid PyTorch device descriptor may be used.
Yields:
PyTorch Module: Model placed on the new device
"""
# infer the original device based on the device the first parameter is placed on
original_device = next(model.parameters()).device
# create device spec
device = torch.device(device)
if device.type == "cuda" and device.index >= torch.cuda.device_count():
# fall back to using CPU
warnings.warn("Incompatible CUDA spec. Falling back to CPU usage")
device = "cpu"
try:
model.to(device)
yield model
finally:
model.to(original_device)
def early_stopping(
model,
objective,
interval=5,
patience=20,
start=0,
max_iter=1000,
maximize=True,
tolerance=1e-5,
switch_mode=True,
restore_best=True,
tracker=None,
scheduler=None,
lr_decay_steps=1,
):
"""
Early stopping iterator. Keeps track of the best model state during training. Resets the model to its
best state, when either the number of maximum epochs or the patience [number of epochs without improvement)
is reached.
Also includes a convenient way to reduce the learning rate. Takes as an additional input a PyTorch scheduler object
(e.g. torch.optim.lr_scheduler.ReduceLROnPlateau), which will automatically decrease the learning rate.
If the patience counter is reached, the scheduler will decay the LR, and the model is set back to its best state.
This loop will continue for n times in the variable lr_decay_steps. The patience and tolerance parameters in
early stopping and the scheduler object should be identical.
Args:
model: model that is being optimized
objective: objective function that is used for early stopping. The function must accept single positional argument `model`
and return a single scalar quantity.
interval: interval at which objective is evaluated to consider early stopping
patience: number of continuous epochs the objective could remain without improvement before the iterator terminates
start: start value for iteration (used to check against `max_iter`)
max_iter: maximum number of iterations before the iterator terminated
maximize: whether the objective is maximized of minimized
tolerance: margin by which the new objective score must improve to be considered as an update in best score
switch_mode: whether to switch model's train mode into eval prior to objective evaluation. If True (default),
the model is switched to eval mode before objective evaluation and restored to its previous mode
after the evaluation.
restore_best: whether to restore the best scoring model state at the end of early stopping
tracker (Tracker):
Tracker to be invoked for every epoch. `log_objective` is invoked with the current value of `objective`. Note that `finalize`
method is NOT invoked.
scheduler: scheduler object, which automatically reduces decreases the LR by a specified amount.
The scheduler's `step` method is invoked, passing in the current value of `objective`
lr_decay_steps: Number of times the learning rate should be reduced before stopping the training.
"""
training_status = model.training
def _objective():
if switch_mode:
model.eval()
ret = objective(model)
if switch_mode:
model.train(training_status)
return ret
def decay_lr(model, best_state_dict):
old_objective = _objective()
if restore_best:
model.load_state_dict(best_state_dict)
print("Restoring best model after lr decay! {:.6f} ---> {:.6f}".format(old_objective, _objective()))
def finalize(model, best_state_dict):
old_objective = _objective()
if restore_best:
model.load_state_dict(best_state_dict)
print("Restoring best model! {:.6f} ---> {:.6f}".format(old_objective, _objective()))
else:
print("Final best model! objective {:.6f}".format(_objective()))
epoch = start
# turn into a sign
maximize = -1 if maximize else 1
best_objective = current_objective = _objective()
best_state_dict = copy_state(model)
for repeat in range(lr_decay_steps):
patience_counter = 0
while patience_counter < patience and epoch < max_iter:
for _ in range(interval):
epoch += 1
if tracker is not None:
tracker.log_objective(current_objective)
if (~np.isfinite(current_objective)).any():
print("Objective is not Finite. Stopping training")
finalize(model, best_state_dict)
return
yield epoch, current_objective
current_objective = _objective()
# if a scheduler is defined, a .step with the current objective is all that is needed to reduce the LR
if scheduler is not None:
scheduler.step(current_objective)
if current_objective * maximize < best_objective * maximize - tolerance:
print(
"[{:03d}|{:02d}/{:02d}] ---> {}".format(epoch, patience_counter, patience, current_objective),
flush=True,
)
best_state_dict = copy_state(model)
best_objective = current_objective
patience_counter = 0
else:
patience_counter += 1
print(
"[{:03d}|{:02d}/{:02d}] -/-> {}".format(epoch, patience_counter, patience, current_objective),
flush=True,
)
if (epoch < max_iter) & (lr_decay_steps > 1) & (repeat < lr_decay_steps):
decay_lr(model, best_state_dict)
finalize(model, best_state_dict)
def alternate(*args):
"""
Given multiple iterators, returns a generator that alternatively visit one element from each iterator at a time.
Examples:
>>> list(alternate(['a', 'b', 'c'], [1, 2, 3], ['Mon', 'Tue', 'Wed']))
['a', 1, 'Mon', 'b', 2, 'Tue', 'c', 3, 'Wed']
Args:
*args: one or more iterables (e.g. tuples, list, iterators) separated by commas
Returns:
A generator that alternatively visits one element at a time from the list of iterables
"""
for row in zip(*args):
yield from row
def cycle_datasets(loaders):
"""
Given a dictionary mapping data_key into dataloader objects, returns a generator that alternately yields
output from the loaders in the dictionary. The order of data_key traversal is determined by the first invocation to `.keys()`.
To obtain deterministic behavior of key traversal, recommended to use OrderedDict.
The generator terminates as soon as any one of the constituent loaders is exhausted.
Args:
loaders (dict): Dict mapping a data_key to a dataloader object.
Yields:
string, Any: data_key and and the next output from the data loader corresponding to the data_key
"""
keys = list(loaders.keys())
# establish a consistent ordering across loaders
ordered_loaders = [loaders[k] for k in keys]
for data_key, outputs in zip(cycle(loaders.keys()), alternate(*ordered_loaders)):
yield data_key, outputs
class Exhauster:
"""
Given a dictionary of data loaders, mapping data_key into a data loader, steps through each data loader, moving onto the next data loader
only upon exhausing the content of the current data loader.
"""
def __init__(self, loaders):
self.loaders = loaders
def __iter__(self):
for data_key, loader in self.loaders.items():
for batch in loader:
yield data_key, batch
def __len__(self):
return sum([len(loader) for loader in self.loaders])
class LongCycler:
"""
Cycles through trainloaders until the loader with largest size is exhausted.
Needed for dataloaders of unequal size (as in the monkey data).
"""
def __init__(self, loaders):
self.loaders = loaders
self.max_batches = max([len(loader) for loader in self.loaders.values()])
def __iter__(self):
cycles = [cycle(loader) for loader in self.loaders.values()]
for k, loader, _ in zip(
cycle(self.loaders.keys()),
(cycle(cycles)),
range(len(self.loaders) * self.max_batches),
):
yield k, next(loader)
def __len__(self):
return len(self.loaders) * self.max_batches
class ShortCycler:
"""
Cycles through trainloaders until the loader with smallest size is exhausted.
Needed for dataloaders of unequal size (as in the monkey data).
"""
def __init__(self, loaders):
self.loaders = loaders
self.min_batches = min([len(loader) for loader in self.loaders.values()])
def __iter__(self):
cycles = [cycle(loader) for loader in self.loaders.values()]
for k, loader, _ in zip(
cycle(self.loaders.keys()),
(cycle(cycles)),
range(len(self.loaders) * self.min_batches),
):
yield k, next(loader)
def __len__(self):
return len(self.loaders) * self.min_batches
| [
"numpy.copy",
"numpy.empty",
"numpy.isfinite",
"time.time",
"collections.defaultdict",
"torch.cuda.device_count",
"numpy.array",
"torch.device",
"collections.OrderedDict",
"warnings.warn",
"numpy.concatenate"
] | [((658, 671), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (669, 671), False, 'from collections import OrderedDict, defaultdict\n'), ((8087, 8107), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (8099, 8107), False, 'import torch\n'), ((2593, 2648), 'numpy.concatenate', 'np.concatenate', (['(self.tracker, new_track_point)'], {'axis': '(0)'}), '((self.tracker, new_track_point), axis=0)\n', (2607, 2648), True, 'import numpy as np\n'), ((4141, 4158), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4152, 4158), False, 'from collections import OrderedDict, defaultdict\n'), ((4609, 4620), 'time.time', 'time.time', ([], {}), '()\n', (4618, 4620), False, 'import time\n'), ((5834, 5853), 'numpy.array', 'np.array', (['self.time'], {}), '(self.time)\n', (5842, 5853), True, 'import numpy as np\n'), ((8225, 8291), 'warnings.warn', 'warnings.warn', (['"""Incompatible CUDA spec. Falling back to CPU usage"""'], {}), "('Incompatible CUDA spec. Falling back to CPU usage')\n", (8238, 8291), False, 'import warnings\n'), ((2199, 2215), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (2207, 2215), True, 'import numpy as np\n'), ((6018, 6029), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (6026, 6029), True, 'import numpy as np\n'), ((6635, 6653), 'numpy.copy', 'np.copy', (['self.time'], {}), '(self.time)\n', (6642, 6653), True, 'import numpy as np\n'), ((8157, 8182), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (8180, 8182), False, 'import torch\n'), ((6545, 6555), 'numpy.copy', 'np.copy', (['v'], {}), '(v)\n', (6552, 6555), True, 'import numpy as np\n'), ((2550, 2561), 'time.time', 'time.time', ([], {}), '()\n', (2559, 2561), False, 'import time\n'), ((2152, 2163), 'time.time', 'time.time', ([], {}), '()\n', (2161, 2163), False, 'import time\n'), ((12503, 12533), 'numpy.isfinite', 'np.isfinite', (['current_objective'], {}), '(current_objective)\n', (12514, 12533), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
class AnchorBoxGenerator:
'''Generates anchor boxes.
This class has operations to generate anchor boxes for feature maps at
strides `[8, 16, 32, 64, 128]`. Where each anchor each box is of the
format `[x, y, width, height]`.
Attributes:
aspect_ratios: A list of float values representing the aspect ratios of
the anchor boxes at each location on the feature map
scales: A list of float values representing the scale of the anchor boxes
at each location on the feature map.
num_anchors: The number of anchor boxes at each location on feature map
areas: A list of float values representing the areas of the anchor
boxes for each feature map in the feature pyramid.
strides: A list of float value representing the strides for each feature
map in the feature pyramid.
'''
def __init__(self, img_h, img_w, min_level, max_level, params):
self.image_height = img_h
self.image_width = img_w
self.areas = params.areas
self.aspect_ratios = params.aspect_ratios
self.scales = params.scales
self._num_anchors = len(params.aspect_ratios) * len(params.scales)
self._min_level = min_level
self._max_level = max_level
self._strides = [2**i for i in range(min_level, max_level+1)]
self._anchor_dims = self._compute_dims()
self._anchor_boundaries = self._compute_anchor_boundaries()
self._boxes = self.get_anchors()
def _compute_anchor_boundaries(self):
boundaries = [0]
for i in range(self._min_level, self._max_level + 1):
num_anchors = int(
np.ceil(self.image_height / 2**i) *
np.ceil(self.image_width / 2**i) * self._num_anchors)
boundaries += [boundaries[-1] + num_anchors]
return boundaries
def _compute_dims(self):
'''Computes anchor dims for all ratios and scales at all levels'''
anchor_dims_all = []
for area in self.areas:
anchor_dims = []
for ratio in self.aspect_ratios:
h = tf.math.sqrt(area / ratio)
w = area / h
wh = tf.reshape(tf.stack([w, h], axis=-1), [1, 1, 2])
for scale in self.scales:
anchor_dims.append(scale * wh)
anchor_dims_all.append(tf.stack(anchor_dims, axis=-2))
return anchor_dims_all
def _get_anchors(self, feature_height, feature_width, level):
'''Generates anchor boxes for a given feature map size and level
Arguments:
feature_height: An integer representing the height of the feature map.
feature_width: An integer representing the width of the feature map.
level: An integer representing the level of the feature map in the
feature pyramid.
Returns:
anchor boxes with the shape
`(feature_height * feature_width * num_anchors, 4)`
'''
rx = tf.range(feature_width, dtype=tf.float32) + 0.5
ry = tf.range(feature_height, dtype=tf.float32) + 0.5
centers = tf.stack(tf.meshgrid(rx, ry), axis=-1) * \
self._strides[level - self._min_level]
centers = tf.expand_dims(centers, axis=-2)
centers = tf.tile(centers, [1, 1, self._num_anchors, 1])
wh = tf.tile(self._anchor_dims[level - self._min_level],
[feature_height, feature_width, 1, 1])
anchors = tf.concat([centers, wh], axis=-1)
return tf.reshape(
anchors, [feature_height * feature_width * self._num_anchors, 4])
def get_anchors(self):
'''Generates anchor boxes for all the feature maps of the feature pyramid.
Returns:
anchor boxes for all the feature maps, stacked as a single tensor
with shape `(total_anchors, 4)`
'''
anchors = [
self._get_anchors(
tf.math.ceil(self.image_height / 2**i),
tf.math.ceil(self.image_width / 2**i),
i,
) for i in range(self._min_level, self._max_level + 1)
]
return tf.concat(anchors, axis=0)
@property
def anchor_boundaries(self):
return self._anchor_boundaries
@property
def boxes(self):
return self._boxes
| [
"tensorflow.meshgrid",
"tensorflow.range",
"tensorflow.math.ceil",
"numpy.ceil",
"tensorflow.reshape",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.tile",
"tensorflow.math.sqrt",
"tensorflow.expand_dims"
] | [((3288, 3320), 'tensorflow.expand_dims', 'tf.expand_dims', (['centers'], {'axis': '(-2)'}), '(centers, axis=-2)\n', (3302, 3320), True, 'import tensorflow as tf\n'), ((3339, 3385), 'tensorflow.tile', 'tf.tile', (['centers', '[1, 1, self._num_anchors, 1]'], {}), '(centers, [1, 1, self._num_anchors, 1])\n', (3346, 3385), True, 'import tensorflow as tf\n'), ((3399, 3493), 'tensorflow.tile', 'tf.tile', (['self._anchor_dims[level - self._min_level]', '[feature_height, feature_width, 1, 1]'], {}), '(self._anchor_dims[level - self._min_level], [feature_height,\n feature_width, 1, 1])\n', (3406, 3493), True, 'import tensorflow as tf\n'), ((3529, 3562), 'tensorflow.concat', 'tf.concat', (['[centers, wh]'], {'axis': '(-1)'}), '([centers, wh], axis=-1)\n', (3538, 3562), True, 'import tensorflow as tf\n'), ((3578, 3654), 'tensorflow.reshape', 'tf.reshape', (['anchors', '[feature_height * feature_width * self._num_anchors, 4]'], {}), '(anchors, [feature_height * feature_width * self._num_anchors, 4])\n', (3588, 3654), True, 'import tensorflow as tf\n'), ((4202, 4228), 'tensorflow.concat', 'tf.concat', (['anchors'], {'axis': '(0)'}), '(anchors, axis=0)\n', (4211, 4228), True, 'import tensorflow as tf\n'), ((3048, 3089), 'tensorflow.range', 'tf.range', (['feature_width'], {'dtype': 'tf.float32'}), '(feature_width, dtype=tf.float32)\n', (3056, 3089), True, 'import tensorflow as tf\n'), ((3109, 3151), 'tensorflow.range', 'tf.range', (['feature_height'], {'dtype': 'tf.float32'}), '(feature_height, dtype=tf.float32)\n', (3117, 3151), True, 'import tensorflow as tf\n'), ((2162, 2188), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['(area / ratio)'], {}), '(area / ratio)\n', (2174, 2188), True, 'import tensorflow as tf\n'), ((2416, 2446), 'tensorflow.stack', 'tf.stack', (['anchor_dims'], {'axis': '(-2)'}), '(anchor_dims, axis=-2)\n', (2424, 2446), True, 'import tensorflow as tf\n'), ((3185, 3204), 'tensorflow.meshgrid', 'tf.meshgrid', (['rx', 'ry'], {}), '(rx, ry)\n', (3196, 3204), True, 'import tensorflow as tf\n'), ((3996, 4036), 'tensorflow.math.ceil', 'tf.math.ceil', (['(self.image_height / 2 ** i)'], {}), '(self.image_height / 2 ** i)\n', (4008, 4036), True, 'import tensorflow as tf\n'), ((4052, 4091), 'tensorflow.math.ceil', 'tf.math.ceil', (['(self.image_width / 2 ** i)'], {}), '(self.image_width / 2 ** i)\n', (4064, 4091), True, 'import tensorflow as tf\n'), ((2250, 2275), 'tensorflow.stack', 'tf.stack', (['[w, h]'], {'axis': '(-1)'}), '([w, h], axis=-1)\n', (2258, 2275), True, 'import tensorflow as tf\n'), ((1713, 1748), 'numpy.ceil', 'np.ceil', (['(self.image_height / 2 ** i)'], {}), '(self.image_height / 2 ** i)\n', (1720, 1748), True, 'import numpy as np\n'), ((1765, 1799), 'numpy.ceil', 'np.ceil', (['(self.image_width / 2 ** i)'], {}), '(self.image_width / 2 ** i)\n', (1772, 1799), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from functools import lru_cache
import numpy as np
from ..base import Property
from ..types.prediction import GaussianMeasurementPrediction
from ..types.update import Update
from ..models.measurement.linear import LinearGaussian
from ..updater.kalman import KalmanUpdater
class InformationKalmanUpdater(KalmanUpdater):
r"""A class which implements the update of information form of the Kalman filter. This is
conceptually very simple. The update proceeds as:
.. math::
Y_{k|k} = Y_{k|k-1} + H^{T}_k R^{-1}_k H_k
\mathbf{y}_{k|k} = \mathbf{y}_{k|k-1} + H^{T}_k R^{-1}_k \mathbf{z}_{k}
where :math:`\mathbf{y}_{k|k-1}` is the predicted information state and :math:`Y_{k|k-1}` the
predicted information matrix which form the :class:`~.InformationStatePrediction` object. The
measurement matrix :math:`H_k` and measurement covariance :math:`R_k` are those in the Kalman
filter (see tutorial 1). An :class:`~.InformationStateUpdate` object is returned.
Note
----
Analogously with the :class:`~.InformationKalmanPredictor`, the measurement model is queried
for the existence of an :meth:`inverse_covar()` property. If absent, the :meth:`covar()` is
inverted.
"""
measurement_model: LinearGaussian = Property(
default=None,
doc="A linear Gaussian measurement model. This need not be defined if "
"a measurement model is provided in the measurement. If no model "
"specified on construction, or in the measurement, then error "
"will be thrown.")
def _inverse_measurement_covar(self, measurement_model, **kwargs):
"""Return the inverse of the measurement covariance (or calculate it)
Parameters
----------
measurement_model
The measurement model to be queried
**kwargs : various, optional
These are passed to :meth:`~.LinearGaussian.covar()`
Returns
-------
: :class:`numpy.ndarray`
The inverse of the measurement covariance, :math:`R_k^{-1}`
"""
if hasattr(measurement_model, 'inverse_covar'):
inv_measurement_covar = measurement_model.inverse_covar(**kwargs)
else:
inv_measurement_covar = np.linalg.inv(measurement_model.covar(**kwargs))
return inv_measurement_covar
@lru_cache()
def predict_measurement(self, predicted_state, measurement_model=None, **kwargs):
r"""There's no direct analogue of a predicted measurement in the information form. This
method is therefore provided to return the predicted measurement as would the standard
Kalman updater. This is mainly for compatibility as it's not anticipated that it would
be used in the usual operation of the information filter.
Parameters
----------
predicted_information_state : :class:`~.State`
The predicted state in information form :math:`\mathbf{y}_{k|k-1}`
measurement_model : :class:`~.MeasurementModel`
The measurement model. If omitted, the model in the updater object
is used
**kwargs : various
These are passed to :meth:`~.MeasurementModel.matrix()`
Returns
-------
: :class:`~.GaussianMeasurementPrediction`
The measurement prediction, :math:`H \mathbf{x}_{k|k-1}`
"""
# If a measurement model is not specified then use the one that's
# native to the updater
measurement_model = self._check_measurement_model(measurement_model)
hh = self._measurement_matrix(predicted_state=predicted_state,
measurement_model=measurement_model,
**kwargs)
predicted_covariance = np.linalg.inv(predicted_state.precision)
predicted_state_mean = predicted_covariance @ predicted_state.state_vector
predicted_measurement = hh @ predicted_state_mean
innovation_covariance = hh @ predicted_covariance @ hh.T + measurement_model.covar()
return GaussianMeasurementPrediction(predicted_measurement, innovation_covariance,
predicted_state.timestamp,
cross_covar=predicted_covariance @ hh.T)
def update(self, hypothesis, **kwargs):
r"""The Information filter update (corrector) method. Given a hypothesised association
between a predicted information state and an actual measurement, calculate the posterior
information state.
Parameters
----------
hypothesis : :class:`~.SingleHypothesis`
the prediction-measurement association hypothesis. This hypothesis
carries a predicted information state.
**kwargs : various
These are passed to :meth:`predict_measurement`
Returns
-------
: :class:`~.InformationStateUpdate`
The posterior information state with information state :math:`\mathbf{y}_{k|k}` and
precision :math:`Y_{k|k}`
"""
measurement_model = hypothesis.measurement.measurement_model
measurement_model = self._check_measurement_model(measurement_model)
pred_info_mean = hypothesis.prediction.state_vector
hh = measurement_model.matrix()
invr = self._inverse_measurement_covar(measurement_model)
posterior_precision = hypothesis.prediction.precision + hh.T @ invr @ hh
posterior_information_mean = pred_info_mean + hh.T @ invr @ \
hypothesis.measurement.state_vector
if self.force_symmetric_covariance:
posterior_precision = (posterior_precision + posterior_precision.T)/2
return Update.from_state(hypothesis.prediction, posterior_information_mean,
posterior_precision,
timestamp=hypothesis.measurement.timestamp, hypothesis=hypothesis)
| [
"functools.lru_cache",
"numpy.linalg.inv"
] | [((2391, 2402), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (2400, 2402), False, 'from functools import lru_cache\n'), ((3840, 3880), 'numpy.linalg.inv', 'np.linalg.inv', (['predicted_state.precision'], {}), '(predicted_state.precision)\n', (3853, 3880), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from datetime import datetime
import tempfile
import string
import os.path
import random
import numpy as np
import pandas as pd
import pytz
from impactutils.io.container import HDFContainer
TIMEFMT = '%Y-%d-%m %H:%M:%S.%f'
def test_hdf_dictonaries():
f, testfile = tempfile.mkstemp()
os.close(f)
try:
container = HDFContainer.create(testfile)
# before we put anything in here, let's make sure we get empty lists from
# all of the methods that are supposed to return lists of stuff.
assert container.getDictionaries() == []
assert container.getLists() == []
assert container.getArrays() == []
assert container.getStrings() == []
assert container.getDataFrames() == []
# test simple dictionary
print('Test simple dictionary...')
indict1 = {'name': 'Fred', 'age': 34,
'dob': datetime(1950, 1, 1, 23, 43, 12).strftime(TIMEFMT)}
container.setDictionary('person', indict1)
outdict = container.getDictionary('person')
assert outdict == indict1
# this should fail because we can't serialize datetimes to json.
try:
indict1 = {'name': 'Fred', 'age': 34,
'dob': datetime(1950, 1, 1, 23, 43, 12)}
container.setDictionary('person', indict1)
except TypeError as te:
print('Expected failure: %s' % str(te))
assert 1 == 1
# test more complicated dictionary
print('Test complex dictionary...')
indict2 = {'names': ['Fred', 'Akyüz'], 'ages': [34, 33]}
container.setDictionary('people', indict2)
outdict = container.getDictionary('people')
assert outdict == indict2
# test getDictionaryNames()
print('Test dictionary names...')
names = container.getDictionaries()
assert sorted(names) == sorted(['person', 'people'])
# test dropping a dictionary
container.dropDictionary('person')
assert container.getDictionaries() == ['people']
# try closing container and reopening
container.close()
container2 = HDFContainer.load(testfile)
assert container2.getDictionaries() == ['people']
except Exception:
assert 1 == 2
finally:
os.remove(testfile)
def test_hdf_lists():
f, testfile = tempfile.mkstemp()
os.close(f)
try:
container = HDFContainer.create(testfile)
# test setting a list of strings
inlist = ['one', 'two', 'three']
container.setList('test_list1', inlist)
assert container.getList('test_list1') == inlist
# test setting a list of numbers
inlist = [5.4, 1.2, 3.4]
container.setList('test_list2', inlist)
assert container.getList('test_list2') == inlist
# test getlists
assert sorted(container.getLists()) == [
'test_list1', 'test_list2']
# test setting a list with dictionaries in it
inlist = [{'a': 1}, {'b': 2}]
container.setList('test_list3', inlist)
# drop a list
container.dropList('test_list1')
assert sorted(container.getLists()) == ['test_list2', 'test_list3']
# close container, re-open
container.close()
container2 = HDFContainer.load(testfile)
assert sorted(container2.getLists()) == ['test_list2', 'test_list3']
except Exception:
assert 1 == 2
finally:
os.remove(testfile)
def test_hdf_arrays():
f, testfile = tempfile.mkstemp()
os.close(f)
try:
container = HDFContainer.create(testfile)
# test simple array, without compression
print('Test simple array...')
data = np.random.rand(4, 3)
metadata = {'xmin': 54.1, 'xmax': 123.1}
container.setArray('testdata1', data, metadata, compression=False)
outdata, outmetadata = container.getArray('testdata1')
np.testing.assert_array_equal(outdata, data)
assert outmetadata == metadata
# test array with nans, and compression on
print('Test nans array...')
data = np.random.rand(4, 3)
data[1, 1] = np.nan
metadata = {'xmin': 54.1, 'xmax': 123.1}
container.setArray('testdata2', data, metadata, compression=True)
outdata, outmetadata = container.getArray('testdata2')
np.testing.assert_array_equal(outdata, data)
assert outmetadata == metadata
# test getArrayNames
print('Test array names...')
names = container.getArrays()
assert sorted(names) == sorted(['testdata1', 'testdata2'])
# drop an array
container.dropArray('testdata1')
names = container.getArrays()
assert names == ['testdata2']
# close container, re-open
container.close()
container2 = HDFContainer.load(testfile)
assert container2.getArrays() == ['testdata2']
except Exception:
assert 1 == 2
finally:
os.remove(testfile)
def test_hdf_strings():
f, testfile = tempfile.mkstemp()
os.close(f)
try:
container = HDFContainer.create(testfile)
# test simple string
print('Test simple string...')
string1 = "These are the times that try men's souls."
container.setString('test_string1', string1)
outstring = container.getString('test_string1')
assert outstring == string1
# test unicode string
print('Test unicode string...')
string2 = "#SOURCE: <NAME>., <NAME>, <NAME>, <NAME>, <NAME>,"
container.setString('test_string2', string2)
outstring = container.getString('test_string2')
assert outstring == string2
# test getstrings
print('Test string names...')
names = container.getStrings()
assert names == ['test_string1', 'test_string2']
# drop string
container.dropString('test_string1')
assert container.getStrings() == ['test_string2']
# test a really big string
sets = string.ascii_uppercase + string.digits + string.ascii_lowercase
num_chars = 1000000
print('Making a really big string...')
big_string = ''.join(random.choice(sets) for _ in range(num_chars))
container.setString('big', big_string)
big_string2 = container.getString('big')
assert big_string == big_string2
# close container, re-open
container.close()
container2 = HDFContainer.load(testfile)
assert container2.getStrings() == ['big', 'test_string2']
except Exception:
assert 1 == 2
finally:
os.remove(testfile)
def test_hdf_dataframes():
f, testfile = tempfile.mkstemp()
os.close(f)
try:
container = HDFContainer.create(testfile)
# test pandas dataframe
print('Test dataframe...')
ttime1 = datetime(1900, 1, 1)
ttime2 = datetime(2000, 1, 1)
utc = pytz.timezone('UTC')
utc_time1 = utc.localize(ttime1)
utc_time2 = utc.localize(ttime2)
d = {'Time': [utc_time1, utc_time2],
'ID': ['thing1', 'thing2'],
'Number': np.array([12.34, 25.67])}
df = pd.DataFrame(d)
container.setDataFrame('testframe1', df)
outdf = container.getDataFrame('testframe1')
assert outdf['Number'].sum() == df['Number'].sum()
assert pd.to_datetime(outdf['Time'][0]) == df['Time'][0]
# test another dataframe
df2 = pd.DataFrame(data=[4, 5, 6, 7], index=range(0, 4), columns=['A'])
container.setDataFrame('testframe2', df2)
outdf = container.getDataFrame('testframe2')
outdf['A'].sum() == df2['A'].sum()
# test getdataframes
assert sorted(container.getDataFrames()) == [
'testframe1', 'testframe2']
# drop a dataframe
container.dropDataFrame('testframe1')
assert container.getDataFrames() == ['testframe2']
# close container, re-open
container.close()
container2 = HDFContainer.load(testfile)
assert container2.getDataFrames() == ['testframe2']
except Exception:
assert 1 == 2
finally:
os.remove(testfile)
if __name__ == '__main__':
test_hdf_dictonaries()
test_hdf_lists()
test_hdf_arrays()
test_hdf_strings()
test_hdf_dataframes()
| [
"pandas.DataFrame",
"tempfile.mkstemp",
"numpy.testing.assert_array_equal",
"random.choice",
"datetime.datetime",
"impactutils.io.container.HDFContainer.load",
"numpy.array",
"pytz.timezone",
"pandas.to_datetime",
"numpy.random.rand",
"impactutils.io.container.HDFContainer.create"
] | [((298, 316), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (314, 316), False, 'import tempfile\n'), ((2397, 2415), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (2413, 2415), False, 'import tempfile\n'), ((3571, 3589), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (3587, 3589), False, 'import tempfile\n'), ((5108, 5126), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (5124, 5126), False, 'import tempfile\n'), ((6764, 6782), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (6780, 6782), False, 'import tempfile\n'), ((362, 391), 'impactutils.io.container.HDFContainer.create', 'HDFContainer.create', (['testfile'], {}), '(testfile)\n', (381, 391), False, 'from impactutils.io.container import HDFContainer\n'), ((2183, 2210), 'impactutils.io.container.HDFContainer.load', 'HDFContainer.load', (['testfile'], {}), '(testfile)\n', (2200, 2210), False, 'from impactutils.io.container import HDFContainer\n'), ((2461, 2490), 'impactutils.io.container.HDFContainer.create', 'HDFContainer.create', (['testfile'], {}), '(testfile)\n', (2480, 2490), False, 'from impactutils.io.container import HDFContainer\n'), ((3337, 3364), 'impactutils.io.container.HDFContainer.load', 'HDFContainer.load', (['testfile'], {}), '(testfile)\n', (3354, 3364), False, 'from impactutils.io.container import HDFContainer\n'), ((3635, 3664), 'impactutils.io.container.HDFContainer.create', 'HDFContainer.create', (['testfile'], {}), '(testfile)\n', (3654, 3664), False, 'from impactutils.io.container import HDFContainer\n'), ((3768, 3788), 'numpy.random.rand', 'np.random.rand', (['(4)', '(3)'], {}), '(4, 3)\n', (3782, 3788), True, 'import numpy as np\n'), ((3984, 4028), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['outdata', 'data'], {}), '(outdata, data)\n', (4013, 4028), True, 'import numpy as np\n'), ((4171, 4191), 'numpy.random.rand', 'np.random.rand', (['(4)', '(3)'], {}), '(4, 3)\n', (4185, 4191), True, 'import numpy as np\n'), ((4414, 4458), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['outdata', 'data'], {}), '(outdata, data)\n', (4443, 4458), True, 'import numpy as np\n'), ((4895, 4922), 'impactutils.io.container.HDFContainer.load', 'HDFContainer.load', (['testfile'], {}), '(testfile)\n', (4912, 4922), False, 'from impactutils.io.container import HDFContainer\n'), ((5172, 5201), 'impactutils.io.container.HDFContainer.create', 'HDFContainer.create', (['testfile'], {}), '(testfile)\n', (5191, 5201), False, 'from impactutils.io.container import HDFContainer\n'), ((6537, 6564), 'impactutils.io.container.HDFContainer.load', 'HDFContainer.load', (['testfile'], {}), '(testfile)\n', (6554, 6564), False, 'from impactutils.io.container import HDFContainer\n'), ((6828, 6857), 'impactutils.io.container.HDFContainer.create', 'HDFContainer.create', (['testfile'], {}), '(testfile)\n', (6847, 6857), False, 'from impactutils.io.container import HDFContainer\n'), ((6943, 6963), 'datetime.datetime', 'datetime', (['(1900)', '(1)', '(1)'], {}), '(1900, 1, 1)\n', (6951, 6963), False, 'from datetime import datetime\n'), ((6981, 7001), 'datetime.datetime', 'datetime', (['(2000)', '(1)', '(1)'], {}), '(2000, 1, 1)\n', (6989, 7001), False, 'from datetime import datetime\n'), ((7016, 7036), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (7029, 7036), False, 'import pytz\n'), ((7267, 7282), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (7279, 7282), True, 'import pandas as pd\n'), ((8109, 8136), 'impactutils.io.container.HDFContainer.load', 'HDFContainer.load', (['testfile'], {}), '(testfile)\n', (8126, 8136), False, 'from impactutils.io.container import HDFContainer\n'), ((7228, 7252), 'numpy.array', 'np.array', (['[12.34, 25.67]'], {}), '([12.34, 25.67])\n', (7236, 7252), True, 'import numpy as np\n'), ((7459, 7491), 'pandas.to_datetime', 'pd.to_datetime', (["outdf['Time'][0]"], {}), "(outdf['Time'][0])\n", (7473, 7491), True, 'import pandas as pd\n'), ((1278, 1310), 'datetime.datetime', 'datetime', (['(1950)', '(1)', '(1)', '(23)', '(43)', '(12)'], {}), '(1950, 1, 1, 23, 43, 12)\n', (1286, 1310), False, 'from datetime import datetime\n'), ((6270, 6289), 'random.choice', 'random.choice', (['sets'], {}), '(sets)\n', (6283, 6289), False, 'import random\n'), ((922, 954), 'datetime.datetime', 'datetime', (['(1950)', '(1)', '(1)', '(23)', '(43)', '(12)'], {}), '(1950, 1, 1, 23, 43, 12)\n', (930, 954), False, 'from datetime import datetime\n')] |
"""
predict using trained model,
draw predicted landmarks on 112*112 input image and
"""
import torch
from torch.utils.data import DataLoader
import numpy as np
import cv2
from network_utils import MyNet, MyNet2, MyDataSet
from PIL import Image
def predict(trained_model_path, model, loader, data):
model.load_state_dict(torch.load(trained_model_path))
model.eval()
with torch.no_grad():
for batch_idx, batch in enumerate(loader):
image = batch['image']
landmarks = batch['landmarks'] # 1*1*42
landmarks_truth = landmarks.numpy()[0, 0, :]
print('len of landmarks_truth: ', len(landmarks_truth))
x = list(map(int, landmarks_truth[0:: 2]))
y = list(map(int, landmarks_truth[1:: 2]))
landmarks_truth = list(zip(x, y))
output = model(image)
output = output.numpy()[0]
output_x = list(map(int, output[0:: 2]))
output_y = list(map(int, output[1:: 2]))
landmarks_predicted = list(zip(output_x, output_y))
# print('landmarks_predicted:', landmarks_predicted)
# draw on 112*112
image = image.numpy()[0].astype(np.uint8)
print('image shape: ', image.shape)
image = image.transpose(1, 2, 0) # torch.Tensor C*H*W, numpy: H*W*C
print('image shape: ', image.shape)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
for landmark_truth, landmark_predicted in zip(landmarks_truth, landmarks_predicted):
# green truth landmarks
cv2.circle(image, center=tuple(landmark_truth), radius=2, color=(0, 255, 0), thickness=-1)
# blue predicted landmarks
cv2.circle(image, center=tuple(landmark_predicted), radius=2, color=(255, 0, 0), thickness=-1)
cv2.imshow('112', image)
cv2.imwrite('..\\Result\\' + str(batch_idx)+'.jpg', image)
# draw on original size image
origin_image_path = data[batch_idx][0]
x1, y1 = int(float(data[batch_idx][1])), int(float(data[batch_idx][2])) # rect left up point
x2, y2 = int(float(data[batch_idx][3])), int(float(data[batch_idx][4])) # rect right down point
rect_w = x2-x1
rect_h = y2-y1
origin_image = Image.open(origin_image_path).convert('RGB')
origin_image = np.asarray(origin_image, dtype=np.uint8)
origin_image = cv2.cvtColor(origin_image, cv2.COLOR_RGB2BGR)
for landmark_truth, landmark_predicted in zip(landmarks_truth, landmarks_predicted):
# green truth landmarks
x, y = landmark_truth
x = x1 + x / 112 * rect_w
y = y1 + y / 112 * rect_h
cv2.circle(origin_image, center=(int(x), int(y)), radius=2, color=(0, 255, 0), thickness=-1)
# blue predicted landmarks
x, y = landmark_predicted
x = x1 + x / 112 * rect_w
y = y1 + y / 112 * rect_h
cv2.circle(origin_image, center=(int(x), int(y)), radius=2, color=(255, 0, 0), thickness=-1)
cv2.imshow('original image', origin_image)
cv2.imwrite('..\\Result\\origin' + str(batch_idx)+'.jpg', origin_image)
key = cv2.waitKey()
if key == 27:
# exit()
cv2.destroyAllWindows()
if __name__ == '__main__':
model_path = '..\\mature\\model1.pt'
# model_path = '..\\mature\\model2.pt'
test_txt_path = '..\\test.txt'
my_data = []
with open(test_txt_path, 'r') as f:
lines = f.readlines() # list
for line in lines:
line = line.split(' ')
my_data.append(line)
torch.manual_seed(1)
use_cuda = False # torch.cuda.is_available()
my_device = torch.device("cuda" if use_cuda else "cpu") # cuda: 0
# For multi GPUs, nothing need to change here
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
test_set = MyDataSet(test_txt_path, 'test')
test_loader = DataLoader(test_set, **kwargs)
my_model = MyNet().to(my_device)
# prediction
predict(model_path, my_model, test_loader, my_data)
| [
"torch.utils.data.DataLoader",
"cv2.cvtColor",
"torch.manual_seed",
"torch.load",
"numpy.asarray",
"cv2.imshow",
"cv2.waitKey",
"cv2.destroyAllWindows",
"PIL.Image.open",
"torch.device",
"network_utils.MyDataSet",
"torch.no_grad",
"network_utils.MyNet"
] | [((3785, 3805), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (3802, 3805), False, 'import torch\n'), ((3872, 3915), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (3884, 3915), False, 'import torch\n'), ((4065, 4097), 'network_utils.MyDataSet', 'MyDataSet', (['test_txt_path', '"""test"""'], {}), "(test_txt_path, 'test')\n", (4074, 4097), False, 'from network_utils import MyNet, MyNet2, MyDataSet\n'), ((4116, 4146), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {}), '(test_set, **kwargs)\n', (4126, 4146), False, 'from torch.utils.data import DataLoader\n'), ((327, 357), 'torch.load', 'torch.load', (['trained_model_path'], {}), '(trained_model_path)\n', (337, 357), False, 'import torch\n'), ((385, 400), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (398, 400), False, 'import torch\n'), ((1413, 1451), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2BGR'], {}), '(image, cv2.COLOR_RGB2BGR)\n', (1425, 1451), False, 'import cv2\n'), ((1863, 1887), 'cv2.imshow', 'cv2.imshow', (['"""112"""', 'image'], {}), "('112', image)\n", (1873, 1887), False, 'import cv2\n'), ((2421, 2461), 'numpy.asarray', 'np.asarray', (['origin_image'], {'dtype': 'np.uint8'}), '(origin_image, dtype=np.uint8)\n', (2431, 2461), True, 'import numpy as np\n'), ((2489, 2534), 'cv2.cvtColor', 'cv2.cvtColor', (['origin_image', 'cv2.COLOR_RGB2BGR'], {}), '(origin_image, cv2.COLOR_RGB2BGR)\n', (2501, 2534), False, 'import cv2\n'), ((3193, 3235), 'cv2.imshow', 'cv2.imshow', (['"""original image"""', 'origin_image'], {}), "('original image', origin_image)\n", (3203, 3235), False, 'import cv2\n'), ((3338, 3351), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (3349, 3351), False, 'import cv2\n'), ((4163, 4170), 'network_utils.MyNet', 'MyNet', ([], {}), '()\n', (4168, 4170), False, 'from network_utils import MyNet, MyNet2, MyDataSet\n'), ((3419, 3442), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3440, 3442), False, 'import cv2\n'), ((2349, 2378), 'PIL.Image.open', 'Image.open', (['origin_image_path'], {}), '(origin_image_path)\n', (2359, 2378), False, 'from PIL import Image\n')] |
import cv2 as cv
from pyzbar.pyzbar import decode
import numpy as np
import pyautogui
import socket
from time import time
import pywintypes
import win32gui, win32ui, win32con, win32api
class WindowCapture():
w = 0
h = 0
hwnd = None
cropped_x = 0
cropped_y = 0
offset_x = 0
offset_y = 0
def __init__(self, window_name=None) :
self.SM_XVIRTUALSCREEN = 76
self.SM_YVIRTUALSCREEN = 77
self.SM_CXVIRTUALSCREEN = 78
self.SM_CYVIRTUALSCREEN = 79
if window_name is None:
self.hwnd = win32gui.GetDesktopWindow()
else:
self.hwnd = win32gui.FindWindow(None, window_name)
if not self.hwnd:
raise Exception('Window not found: {}'.format(window_name))
# get window size
window_rect = win32gui.GetWindowRect(self.hwnd)
self.w = window_rect[2] - window_rect[0]
self.h = window_rect[3] - window_rect[1]
# remove window border and title bar
border_pixels = 8
titlebar_pixels = 30
self.w = self.w - (border_pixels*2)
self.h = self.h - titlebar_pixels - border_pixels
self.cropped_x = border_pixels
self.cropped_y = titlebar_pixels
pass
@staticmethod
def get_window_names():
def winEnumHandler(hwnd, ctx):
if win32gui.IsWindowVisible(hwnd):
print(hex(hwnd),win32gui.GetWindowText(hwnd))
win32gui.EnumWindows(winEnumHandler,None)
def get_screenshot(self):
# get window image data
wDC = win32gui.GetWindowDC(self.hwnd)
dcObj=win32ui.CreateDCFromHandle(wDC)
cDC=dcObj.CreateCompatibleDC()
dataBitMap = win32ui.CreateBitmap()
dataBitMap.CreateCompatibleBitmap(dcObj, self.w, self.h)
cDC.SelectObject(dataBitMap)
cDC.BitBlt((0,0),(self.w, self.h) , dcObj, (self.cropped_x,self.cropped_y), win32con.SRCCOPY)
signedIntsArray = dataBitMap.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (self.h,self.w,4)
#img = img[35:self.h-8,10:self.w-10]
# Free Resources
dcObj.DeleteDC()
cDC.DeleteDC()
win32gui.ReleaseDC(self.hwnd, wDC)
win32gui.DeleteObject(dataBitMap.GetHandle())
return img
def get_sec_screen(self,width_first_screen,height_first_screen,widthSecondScreen,heightSecondScreen):
# width = largura
w = win32api.GetSystemMetrics(self.SM_CXVIRTUALSCREEN)
h = win32api.GetSystemMetrics(self.SM_CYVIRTUALSCREEN)
l = win32api.GetSystemMetrics(self.SM_XVIRTUALSCREEN)
t = win32api.GetSystemMetrics(self.SM_YVIRTUALSCREEN)
hwndDC = win32gui.GetWindowDC(self.hwnd)
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
saveDC = mfcDC.CreateCompatibleDC()
saveBitMap = win32ui.CreateBitmap()
saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
saveDC.SelectObject(saveBitMap)
saveDC.BitBlt((0, 0), (w, h), mfcDC, (width_first_screen, t), win32con.SRCCOPY)
signedIntsArray = saveBitMap.GetBitmapBits(True)
img = np.frombuffer(signedIntsArray, dtype='uint8')
img.shape = (h,w,4)
offset_Y = np.absolute(height_first_screen - heightSecondScreen)
# x1 -> width 1st screen // y1 -> height first monitor
x0,y0,x1,y1 = win32gui.GetWindowRect(self.hwnd)
if (height_first_screen > heightSecondScreen) and (width_first_screen > widthSecondScreen):
img = img[y0:y1-offset_Y,0:w-x1]
elif (heightSecondScreen > height_first_screen) and (widthSecondScreen > width_first_screen):
img = img[y0:y1+offset_Y,0:w-x1]
return img
| [
"numpy.absolute",
"win32gui.GetWindowRect",
"win32gui.GetDesktopWindow",
"win32gui.IsWindowVisible",
"win32gui.ReleaseDC",
"numpy.frombuffer",
"win32gui.GetWindowText",
"win32gui.FindWindow",
"win32ui.CreateBitmap",
"win32gui.GetWindowDC",
"win32gui.EnumWindows",
"win32ui.CreateDCFromHandle",
... | [((870, 903), 'win32gui.GetWindowRect', 'win32gui.GetWindowRect', (['self.hwnd'], {}), '(self.hwnd)\n', (892, 903), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1553, 1595), 'win32gui.EnumWindows', 'win32gui.EnumWindows', (['winEnumHandler', 'None'], {}), '(winEnumHandler, None)\n', (1573, 1595), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1707, 1738), 'win32gui.GetWindowDC', 'win32gui.GetWindowDC', (['self.hwnd'], {}), '(self.hwnd)\n', (1727, 1738), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1754, 1785), 'win32ui.CreateDCFromHandle', 'win32ui.CreateDCFromHandle', (['wDC'], {}), '(wDC)\n', (1780, 1785), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1848, 1870), 'win32ui.CreateBitmap', 'win32ui.CreateBitmap', ([], {}), '()\n', (1868, 1870), False, 'import win32gui, win32ui, win32con, win32api\n'), ((2153, 2198), 'numpy.fromstring', 'np.fromstring', (['signedIntsArray'], {'dtype': '"""uint8"""'}), "(signedIntsArray, dtype='uint8')\n", (2166, 2198), True, 'import numpy as np\n'), ((2373, 2407), 'win32gui.ReleaseDC', 'win32gui.ReleaseDC', (['self.hwnd', 'wDC'], {}), '(self.hwnd, wDC)\n', (2391, 2407), False, 'import win32gui, win32ui, win32con, win32api\n'), ((2634, 2684), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['self.SM_CXVIRTUALSCREEN'], {}), '(self.SM_CXVIRTUALSCREEN)\n', (2659, 2684), False, 'import win32gui, win32ui, win32con, win32api\n'), ((2698, 2748), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['self.SM_CYVIRTUALSCREEN'], {}), '(self.SM_CYVIRTUALSCREEN)\n', (2723, 2748), False, 'import win32gui, win32ui, win32con, win32api\n'), ((2762, 2811), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['self.SM_XVIRTUALSCREEN'], {}), '(self.SM_XVIRTUALSCREEN)\n', (2787, 2811), False, 'import win32gui, win32ui, win32con, win32api\n'), ((2825, 2874), 'win32api.GetSystemMetrics', 'win32api.GetSystemMetrics', (['self.SM_YVIRTUALSCREEN'], {}), '(self.SM_YVIRTUALSCREEN)\n', (2850, 2874), False, 'import win32gui, win32ui, win32con, win32api\n'), ((2903, 2934), 'win32gui.GetWindowDC', 'win32gui.GetWindowDC', (['self.hwnd'], {}), '(self.hwnd)\n', (2923, 2934), False, 'import win32gui, win32ui, win32con, win32api\n'), ((2953, 2987), 'win32ui.CreateDCFromHandle', 'win32ui.CreateDCFromHandle', (['hwndDC'], {}), '(hwndDC)\n', (2979, 2987), False, 'import win32gui, win32ui, win32con, win32api\n'), ((3065, 3087), 'win32ui.CreateBitmap', 'win32ui.CreateBitmap', ([], {}), '()\n', (3085, 3087), False, 'import win32gui, win32ui, win32con, win32api\n'), ((3352, 3397), 'numpy.frombuffer', 'np.frombuffer', (['signedIntsArray'], {'dtype': '"""uint8"""'}), "(signedIntsArray, dtype='uint8')\n", (3365, 3397), True, 'import numpy as np\n'), ((3449, 3502), 'numpy.absolute', 'np.absolute', (['(height_first_screen - heightSecondScreen)'], {}), '(height_first_screen - heightSecondScreen)\n', (3460, 3502), True, 'import numpy as np\n'), ((3601, 3634), 'win32gui.GetWindowRect', 'win32gui.GetWindowRect', (['self.hwnd'], {}), '(self.hwnd)\n', (3623, 3634), False, 'import win32gui, win32ui, win32con, win32api\n'), ((601, 628), 'win32gui.GetDesktopWindow', 'win32gui.GetDesktopWindow', ([], {}), '()\n', (626, 628), False, 'import win32gui, win32ui, win32con, win32api\n'), ((669, 707), 'win32gui.FindWindow', 'win32gui.FindWindow', (['None', 'window_name'], {}), '(None, window_name)\n', (688, 707), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1448, 1478), 'win32gui.IsWindowVisible', 'win32gui.IsWindowVisible', (['hwnd'], {}), '(hwnd)\n', (1472, 1478), False, 'import win32gui, win32ui, win32con, win32api\n'), ((1513, 1541), 'win32gui.GetWindowText', 'win32gui.GetWindowText', (['hwnd'], {}), '(hwnd)\n', (1535, 1541), False, 'import win32gui, win32ui, win32con, win32api\n')] |
# coding: utf-8
"""
Test observing classes
"""
from __future__ import absolute_import, unicode_literals, \
division, print_function
__author__ = "adrn <<EMAIL>>"
# Standard library
import os, sys
import pytest
# Third-party
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from streams.reduction.observing import *
from streams.reduction.util import *
def main():
# define the ccd and geometry
# TODO: units for gain / read_noise?
ccd = CCD(gain=3.7, read_noise=5.33,
shape=(1024,364), dispersion_axis=0) # shape=(nrows, ncols)
# define regions of the detector
ccd.regions["data"] = ccd[:,:-64]
ccd.regions["science"] = ccd[:,100:200]
ccd.regions["overscan"] = ccd[:,-64:]
# create an observing run object, which holds paths and some global things
# like the ccd object, maybe Site object?
path = os.path.join("/Users/adrian/Documents/GraduateSchool/Observing/",
"2013-10_MDM")
obs_run = ObservingRun(path, ccd=ccd)
# - median a bunch of arc images, extract a 1D arc spectrum from
# the first night (arbitrary)
obs_run.make_master_arc(obs_run.nights.values()[0],
narcs=10, overwrite=False)
arc = obs_run.master_arc
pix = np.arange(len(arc))
if arc.wavelength is None:
# fit for a rough wavelength solution
arc.solve_wavelength(obs_run, find_line_list("Hg Ne"))
# plot the arc lamp spectrum with lines identified
fig,ax = obs_run.master_arc.plot(line_ids=True)
fig.savefig(os.path.join(obs_run.redux_path,
"plots", "master_arc.pdf"))
plt.clf()
# - the above, rough wavelength solution is used when no arcs were
# taken at a particular pointing, and as intial conditions for the
# line positions for fitting to each individual arc
all_spec = []
for night in obs_run.nights.values(): #[obs_run.nights["m102213"]]:
for pointing in night.pointings:
if pointing.object_name != "RR Lyr":
continue
pointing.reduce(overwrite=True)
science_data = fits.getdata(pointing._data_file_paths.values()[0])
wvln_2d = pointing.wavelength_image
collapsed_spec = np.median(science_data, axis=0)
row_pix = np.arange(len(collapsed_spec))
g = gaussian_fit(row_pix, collapsed_spec,
mean=np.argmax(collapsed_spec))
# define rough box-car aperture for spectrum
L_idx = int(np.floor(g.mean.value - 4*g.stddev.value))
R_idx = int(np.ceil(g.mean.value + 4*g.stddev.value))+1
spec = np.sum(science_data[:,L_idx:R_idx], axis=1)
spec /= float(R_idx-L_idx)
spec_wvln = np.mean(wvln_2d[:,L_idx:R_idx], axis=1)
all_spec.append((spec_wvln, spec))
continue
if len(all_spec) >= 3:
break
plt.figure(figsize=(12,6))
first_w = None
for wv,fx in all_spec:
w,f = wv[275:375], fx[275:375]
if first_w is None:
first_w = w
ff = interp1d(w,f,bounds_error=False)
print(w-first_w)
# TODO: gaussian fit should allow negative values
#g = gaussian_fit(w, f, mean=6563., log10_amplitude=1E-1)
plt.plot(first_w, ff(first_w))
plt.xlim(6500, 6600)
plt.show()
if False:
# create 2D wavelength image
# TODO: cache this!
wvln_2d = obj.solve_2d_wavelength(overwrite=False)
science_data = frame_data[ccd.regions["science"]]
## HACK
collapsed_spec = np.median(science_data, axis=0)
row_pix = np.arange(len(collapsed_spec))
g = gaussian_fit(row_pix, collapsed_spec,
mean=np.argmax(collapsed_spec))
# define rough box-car aperture for spectrum
L_idx = int(np.floor(g.mean.value - 4*g.stddev.value))
R_idx = int(np.ceil(g.mean.value + 4*g.stddev.value))+1
spec = np.sum(science_data[:,L_idx:R_idx], axis=1)
spec /= float(R_idx-L_idx)
if hdr["EXPTIME"] > 60:
sky_l = np.median(science_data[:,L_idx-20:L_idx-10], axis=1)
sky_r = np.median(science_data[:,R_idx+10:R_idx+20], axis=1)
sky = (sky_l + sky_r) / 2.
spec -= sky
s = Spectrum(obs_run.master_arc.wavelength*u.angstrom,
spec)
fig,ax = s.plot()
ax.set_title(hdr["OBJECT"])
fig.savefig("/Users/adrian/Downloads/{0}.pdf".format(hdr["OBJECT"]))
return
## HACK
# first do it the IRAF way:
row_pix = np.arange(science_data.shape[1])
for row in science_data:
g = gaussian_fit(row_pix, row,
mean=np.argmax(row))
L_idx = int(np.floor(g.mean.value - 4*g.stddev.value))
R_idx = int(np.ceil(g.mean.value + 4*g.stddev.value))+1
plt.clf()
plt.plot(row_pix, row, marker='o', linestyle='none')
plt.axvline(L_idx)
plt.axvline(R_idx)
plt.show()
return
collapsed_spec = np.median(science_data, axis=0)
row_pix = np.arange(len(collapsed_spec))
g = gaussian_fit(row_pix, collapsed_spec,
mean=np.argmax(collapsed_spec))
# define rough box-car aperture for spectrum
L_idx = int(np.floor(g.mean.value - 5*g.stddev.value))
R_idx = int(np.ceil(g.mean.value + 5*g.stddev.value))+1
# grab 2D sky regions around the aperture
# sky_l = np.ravel(science_data[:,L_idx-20:L_idx-10])
# sky_l_wvln = np.ravel(wvln_2d[:,L_idx-20:L_idx-10])
# sky_r = np.ravel(science_data[:,R_idx+10:R_idx+20])
# sky_r_wvln = np.ravel(wvln_2d[:,R_idx+10:R_idx+20])
# # make 1D, oversampled sky spectrum
# sky_wvln = np.append(sky_l_wvln, sky_r_wvln)
# idx = np.argsort(sky_wvln)
# sky_wvln = sky_wvln[idx]
# sky = np.append(sky_l, sky_r)[idx]
# from scipy.interpolate import UnivariateSpline
# interp = UnivariateSpline(sky_wvln, sky, k=3)
spec_2d = science_data[:,L_idx:R_idx]
spec_wvln = wvln_2d[:,L_idx:R_idx]
spec_sky = interp(spec_wvln[:,3])
plt.plot(spec_wvln[:,3],
(spec_2d[:,3] - spec_sky),
drawstyle="steps")
plt.show()
return
spec = np.sum(science_data[:,L_idx:R_idx], axis=1)
spec /= float(R_idx-L_idx)
plt.figure()
plt.subplot(211)
plt.title("sky")
plt.plot(obs_run.master_arc.wavelength, sky,
alpha=0.5, lw=2, drawstyle='steps')
plt.subplot(212)
plt.title("spec")
plt.plot(obs_run.master_arc.wavelength, spec,
alpha=0.5, lw=2, drawstyle='steps')
plt.figure()
plt.plot(obs_run.master_arc.wavelength, spec-sky,
alpha=1., lw=1, drawstyle='steps')
plt.show()
return
#from scipy.interpolate import LSQBivariateSpline
#s = UnivariateSpline(wvln_2d[sky_idx], frame_data[sky_idx])
plt.plot(obs_run.master_arc.wavelength, spec-sky,
drawstyle="steps")
plt.show()
return
# sky subtract
frame.sky_subtract(obs_run)
if __name__ == "__main__":
from argparse import ArgumentParser
import logging
# Create logger
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
formatter = logging.Formatter("%(name)s / %(levelname)s / %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
# Define parser object
parser = ArgumentParser(description="")
parser.add_argument("-v", "--verbose", action="store_true",
dest="verbose", default=False,
help="Be chatty! (default = False)")
parser.add_argument("-q", "--quiet", action="store_true", dest="quiet",
default=False, help="Be quiet! (default = False)")
args = parser.parse_args()
# Set logger level based on verbose flags
if args.verbose:
logger.setLevel(logging.DEBUG)
elif args.quiet:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.INFO)
main() | [
"matplotlib.pyplot.title",
"numpy.sum",
"argparse.ArgumentParser",
"matplotlib.pyplot.clf",
"numpy.argmax",
"numpy.floor",
"logging.Formatter",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"scipy.interpolate.interp1d",
"os.path.join",
"matplotlib.pyplot.axvline",
"matplotlib.p... | [((956, 1041), 'os.path.join', 'os.path.join', (['"""/Users/adrian/Documents/GraduateSchool/Observing/"""', '"""2013-10_MDM"""'], {}), "('/Users/adrian/Documents/GraduateSchool/Observing/', '2013-10_MDM'\n )\n", (968, 1041), False, 'import os, sys\n'), ((1738, 1747), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1745, 1747), True, 'import matplotlib.pyplot as plt\n'), ((3047, 3074), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (3057, 3074), True, 'import matplotlib.pyplot as plt\n'), ((3452, 3472), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(6500)', '(6600)'], {}), '(6500, 6600)\n', (3460, 3472), True, 'import matplotlib.pyplot as plt\n'), ((3477, 3487), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3485, 3487), True, 'import matplotlib.pyplot as plt\n'), ((7582, 7609), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (7599, 7609), False, 'import logging\n'), ((7619, 7642), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (7640, 7642), False, 'import logging\n'), ((7659, 7718), 'logging.Formatter', 'logging.Formatter', (['"""%(name)s / %(levelname)s / %(message)s"""'], {}), "('%(name)s / %(levelname)s / %(message)s')\n", (7676, 7718), False, 'import logging\n'), ((7817, 7847), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (7831, 7847), False, 'from argparse import ArgumentParser\n'), ((1644, 1703), 'os.path.join', 'os.path.join', (['obs_run.redux_path', '"""plots"""', '"""master_arc.pdf"""'], {}), "(obs_run.redux_path, 'plots', 'master_arc.pdf')\n", (1656, 1703), False, 'import os, sys\n'), ((3226, 3260), 'scipy.interpolate.interp1d', 'interp1d', (['w', 'f'], {'bounds_error': '(False)'}), '(w, f, bounds_error=False)\n', (3234, 3260), False, 'from scipy.interpolate import interp1d\n'), ((3727, 3758), 'numpy.median', 'np.median', (['science_data'], {'axis': '(0)'}), '(science_data, axis=0)\n', (3736, 3758), True, 'import numpy as np\n'), ((4112, 4156), 'numpy.sum', 'np.sum', (['science_data[:, L_idx:R_idx]'], {'axis': '(1)'}), '(science_data[:, L_idx:R_idx], axis=1)\n', (4118, 4156), True, 'import numpy as np\n'), ((4750, 4782), 'numpy.arange', 'np.arange', (['science_data.shape[1]'], {}), '(science_data.shape[1])\n', (4759, 4782), True, 'import numpy as np\n'), ((5262, 5293), 'numpy.median', 'np.median', (['science_data'], {'axis': '(0)'}), '(science_data, axis=0)\n', (5271, 5293), True, 'import numpy as np\n'), ((6405, 6475), 'matplotlib.pyplot.plot', 'plt.plot', (['spec_wvln[:, 3]', '(spec_2d[:, 3] - spec_sky)'], {'drawstyle': '"""steps"""'}), "(spec_wvln[:, 3], spec_2d[:, 3] - spec_sky, drawstyle='steps')\n", (6413, 6475), True, 'import matplotlib.pyplot as plt\n'), ((6518, 6528), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6526, 6528), True, 'import matplotlib.pyplot as plt\n'), ((6560, 6604), 'numpy.sum', 'np.sum', (['science_data[:, L_idx:R_idx]'], {'axis': '(1)'}), '(science_data[:, L_idx:R_idx], axis=1)\n', (6566, 6604), True, 'import numpy as np\n'), ((6648, 6660), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6658, 6660), True, 'import matplotlib.pyplot as plt\n'), ((6669, 6685), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (6680, 6685), True, 'import matplotlib.pyplot as plt\n'), ((6694, 6710), 'matplotlib.pyplot.title', 'plt.title', (['"""sky"""'], {}), "('sky')\n", (6703, 6710), True, 'import matplotlib.pyplot as plt\n'), ((6719, 6804), 'matplotlib.pyplot.plot', 'plt.plot', (['obs_run.master_arc.wavelength', 'sky'], {'alpha': '(0.5)', 'lw': '(2)', 'drawstyle': '"""steps"""'}), "(obs_run.master_arc.wavelength, sky, alpha=0.5, lw=2, drawstyle='steps'\n )\n", (6727, 6804), True, 'import matplotlib.pyplot as plt\n'), ((6825, 6841), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (6836, 6841), True, 'import matplotlib.pyplot as plt\n'), ((6850, 6867), 'matplotlib.pyplot.title', 'plt.title', (['"""spec"""'], {}), "('spec')\n", (6859, 6867), True, 'import matplotlib.pyplot as plt\n'), ((6876, 6962), 'matplotlib.pyplot.plot', 'plt.plot', (['obs_run.master_arc.wavelength', 'spec'], {'alpha': '(0.5)', 'lw': '(2)', 'drawstyle': '"""steps"""'}), "(obs_run.master_arc.wavelength, spec, alpha=0.5, lw=2, drawstyle=\n 'steps')\n", (6884, 6962), True, 'import matplotlib.pyplot as plt\n'), ((6984, 6996), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6994, 6996), True, 'import matplotlib.pyplot as plt\n'), ((7005, 7096), 'matplotlib.pyplot.plot', 'plt.plot', (['obs_run.master_arc.wavelength', '(spec - sky)'], {'alpha': '(1.0)', 'lw': '(1)', 'drawstyle': '"""steps"""'}), "(obs_run.master_arc.wavelength, spec - sky, alpha=1.0, lw=1,\n drawstyle='steps')\n", (7013, 7096), True, 'import matplotlib.pyplot as plt\n'), ((7116, 7126), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7124, 7126), True, 'import matplotlib.pyplot as plt\n'), ((7279, 7349), 'matplotlib.pyplot.plot', 'plt.plot', (['obs_run.master_arc.wavelength', '(spec - sky)'], {'drawstyle': '"""steps"""'}), "(obs_run.master_arc.wavelength, spec - sky, drawstyle='steps')\n", (7287, 7349), True, 'import matplotlib.pyplot as plt\n'), ((7373, 7383), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7381, 7383), True, 'import matplotlib.pyplot as plt\n'), ((2361, 2392), 'numpy.median', 'np.median', (['science_data'], {'axis': '(0)'}), '(science_data, axis=0)\n', (2370, 2392), True, 'import numpy as np\n'), ((2774, 2818), 'numpy.sum', 'np.sum', (['science_data[:, L_idx:R_idx]'], {'axis': '(1)'}), '(science_data[:, L_idx:R_idx], axis=1)\n', (2780, 2818), True, 'import numpy as np\n'), ((2882, 2922), 'numpy.mean', 'np.mean', (['wvln_2d[:, L_idx:R_idx]'], {'axis': '(1)'}), '(wvln_2d[:, L_idx:R_idx], axis=1)\n', (2889, 2922), True, 'import numpy as np\n'), ((3989, 4032), 'numpy.floor', 'np.floor', (['(g.mean.value - 4 * g.stddev.value)'], {}), '(g.mean.value - 4 * g.stddev.value)\n', (3997, 4032), True, 'import numpy as np\n'), ((4244, 4301), 'numpy.median', 'np.median', (['science_data[:, L_idx - 20:L_idx - 10]'], {'axis': '(1)'}), '(science_data[:, L_idx - 20:L_idx - 10], axis=1)\n', (4253, 4301), True, 'import numpy as np\n'), ((4317, 4374), 'numpy.median', 'np.median', (['science_data[:, R_idx + 10:R_idx + 20]'], {'axis': '(1)'}), '(science_data[:, R_idx + 10:R_idx + 20], axis=1)\n', (4326, 4374), True, 'import numpy as np\n'), ((5057, 5066), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5064, 5066), True, 'import matplotlib.pyplot as plt\n'), ((5079, 5131), 'matplotlib.pyplot.plot', 'plt.plot', (['row_pix', 'row'], {'marker': '"""o"""', 'linestyle': '"""none"""'}), "(row_pix, row, marker='o', linestyle='none')\n", (5087, 5131), True, 'import matplotlib.pyplot as plt\n'), ((5144, 5162), 'matplotlib.pyplot.axvline', 'plt.axvline', (['L_idx'], {}), '(L_idx)\n', (5155, 5162), True, 'import matplotlib.pyplot as plt\n'), ((5175, 5193), 'matplotlib.pyplot.axvline', 'plt.axvline', (['R_idx'], {}), '(R_idx)\n', (5186, 5193), True, 'import matplotlib.pyplot as plt\n'), ((5206, 5216), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5214, 5216), True, 'import matplotlib.pyplot as plt\n'), ((5524, 5567), 'numpy.floor', 'np.floor', (['(g.mean.value - 5 * g.stddev.value)'], {}), '(g.mean.value - 5 * g.stddev.value)\n', (5532, 5567), True, 'import numpy as np\n'), ((2643, 2686), 'numpy.floor', 'np.floor', (['(g.mean.value - 4 * g.stddev.value)'], {}), '(g.mean.value - 4 * g.stddev.value)\n', (2651, 2686), True, 'import numpy as np\n'), ((3888, 3913), 'numpy.argmax', 'np.argmax', (['collapsed_spec'], {}), '(collapsed_spec)\n', (3897, 3913), True, 'import numpy as np\n'), ((4052, 4094), 'numpy.ceil', 'np.ceil', (['(g.mean.value + 4 * g.stddev.value)'], {}), '(g.mean.value + 4 * g.stddev.value)\n', (4059, 4094), True, 'import numpy as np\n'), ((4933, 4976), 'numpy.floor', 'np.floor', (['(g.mean.value - 4 * g.stddev.value)'], {}), '(g.mean.value - 4 * g.stddev.value)\n', (4941, 4976), True, 'import numpy as np\n'), ((5423, 5448), 'numpy.argmax', 'np.argmax', (['collapsed_spec'], {}), '(collapsed_spec)\n', (5432, 5448), True, 'import numpy as np\n'), ((5587, 5629), 'numpy.ceil', 'np.ceil', (['(g.mean.value + 5 * g.stddev.value)'], {}), '(g.mean.value + 5 * g.stddev.value)\n', (5594, 5629), True, 'import numpy as np\n'), ((2534, 2559), 'numpy.argmax', 'np.argmax', (['collapsed_spec'], {}), '(collapsed_spec)\n', (2543, 2559), True, 'import numpy as np\n'), ((2710, 2752), 'numpy.ceil', 'np.ceil', (['(g.mean.value + 4 * g.stddev.value)'], {}), '(g.mean.value + 4 * g.stddev.value)\n', (2717, 2752), True, 'import numpy as np\n'), ((4893, 4907), 'numpy.argmax', 'np.argmax', (['row'], {}), '(row)\n', (4902, 4907), True, 'import numpy as np\n'), ((5000, 5042), 'numpy.ceil', 'np.ceil', (['(g.mean.value + 4 * g.stddev.value)'], {}), '(g.mean.value + 4 * g.stddev.value)\n', (5007, 5042), True, 'import numpy as np\n')] |
### This script is to load a model and use it to drive an AV in the simulator
from keras import __version__ as keras_version
from keras.models import load_model
import h5py
import argparse
import base64
import os
import shutil
import cv2
import csv
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from flask import Flask
from io import BytesIO
from datetime import datetime
import sys
sys.path.insert(0, 'library/')
from utilities import resize_image
class SimplePIController:
def __init__(self, Kp, Ki):
self.Kp = Kp
self.Ki = Ki
self.set_point = 0.
self.error = 0.
self.integral = 0.
def set_desired_speed(self, desired):
self.set_point = desired
def update(self, measurement):
# proportional error
self.error = self.set_point - measurement
# integral error
self.integral = self.integral + self.error
return self.Kp * self.error + self.Ki * self.integral
###################################
## variables
desiredSpeed = 20
## LSTM
fLSTM = False
fOnlyFollow = True
frameCount = 0
nFramesLSTM = 5
curSampleLSTM = np.empty((nFramesLSTM, 66, 200, 3))
# !!!! hard coded path
netPath = 'D:/projects/gitProjects/SAAP_Auto-driving_Platform/Data/training_simu_1/trainedModels/models-cnn/'
netModel = netPath + 'model856.h5'
###################################
## globals
controller = SimplePIController(0.1, 0.002)
controller.set_desired_speed(desiredSpeed)
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
def record_images():
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
image.save('{}.jpg'.format(image_filename))
def shift_img_array(curSampleLSTM, newImg):
global nFramesLSTM
newSampleLSTM = curSampleLSTM
for i in range(nFramesLSTM - 1):
newSampleLSTM[i] = curSampleLSTM[i+1]
newSampleLSTM[-1] = newImg
return newSampleLSTM
@sio.on('telemetry')
def telemetry(sid, data):
global frameCount
global curSampleLSTM
global nFramesLSTM
if data:
## get data from Unity
angleUnity = data["steering_angle"]
throttleUnity = data["throttle"]
speedUnity = data["speed"]
ctrImgUnity = resize_image(np.array(Image.open(BytesIO(base64.b64decode(data["image"])))), True)
## prepare variables for prediction results
rAngle = 0
rThrottle = controller.update(float(speedUnity))
## if LSTM is used, prepare a LSTM sample for predicting
if not fLSTM:
ctrImgModel = ctrImgUnity[None, :, :, :]
#rDetect = np.argmax(netDetect.predict(ctrImgModel))
rAngle = float(driveModel.predict(ctrImgModel))
else:
if frameCount < nFramesLSTM:
curSampleLSTM[frameCount] = ctrImgUnity
frameCount += 1
else:
curSamplePredict = curSampleLSTM[None, :, :, :, :]
rAngle = np.mean(driveModel.predict(curSamplePredict))
#rAngleTest = np.mean(netTest.predict(curSamplePredict))
curSampleLSTM = shift_img_array(curSampleLSTM, ctrImgUnity)
print('speedUnity ' + str(speedUnity))
print('rAngle ' + str(rAngle))
print('rThrottle ' + str(rThrottle))
send_control(rAngle*float(speedUnity)/150, rThrottle)
else:
sio.emit('manual', data={}, skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit(
"steer",
data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
},
skip_sid=True)
if __name__ == '__main__':
## take arguments from the command line
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'model',
type=str,
help='Path to model h5 file. Model should be on the same path.'
)
'''
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder. This is where the images from the run will be saved.'
)
'''
#args = parser.parse_args()
'''
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("An image storing folder is provided, recording this run ...")
else:
print("An image storing folder is missing, not recording this run ...")
'''
## check that the model's Keras version is the same as local Keras version
#f = h5py.File(netPath + args.model, mode='r')
f = h5py.File(netModel, mode='r')
model_version = f.attrs.get('keras_version')
keras_version = str(keras_version).encode('utf8')
if model_version != keras_version:
print('\n')
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('Current keras version ', keras_version, ', model keras version ', model_version)
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print('\n')
## load a model
#driveModel = load_model(netPath + args.model)
driveModel = load_model(netModel)
## wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
## deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| [
"keras.models.load_model",
"h5py.File",
"socketio.Middleware",
"argparse.ArgumentParser",
"numpy.empty",
"socketio.Server",
"flask.Flask",
"sys.path.insert",
"base64.b64decode",
"datetime.datetime.utcnow",
"os.path.join",
"eventlet.listen"
] | [((445, 475), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""library/"""'], {}), "(0, 'library/')\n", (460, 475), False, 'import sys\n'), ((1180, 1215), 'numpy.empty', 'np.empty', (['(nFramesLSTM, 66, 200, 3)'], {}), '((nFramesLSTM, 66, 200, 3))\n', (1188, 1215), True, 'import numpy as np\n'), ((1530, 1547), 'socketio.Server', 'socketio.Server', ([], {}), '()\n', (1545, 1547), False, 'import socketio\n'), ((1554, 1569), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1559, 1569), False, 'from flask import Flask\n'), ((3745, 3798), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Remote Driving"""'}), "(description='Remote Driving')\n", (3768, 3798), False, 'import argparse\n'), ((4668, 4697), 'h5py.File', 'h5py.File', (['netModel'], {'mode': '"""r"""'}), "(netModel, mode='r')\n", (4677, 4697), False, 'import h5py\n'), ((5116, 5136), 'keras.models.load_model', 'load_model', (['netModel'], {}), '(netModel)\n', (5126, 5136), False, 'from keras.models import load_model\n'), ((5199, 5228), 'socketio.Middleware', 'socketio.Middleware', (['sio', 'app'], {}), '(sio, app)\n', (5218, 5228), False, 'import socketio\n'), ((1748, 1790), 'os.path.join', 'os.path.join', (['args.image_folder', 'timestamp'], {}), '(args.image_folder, timestamp)\n', (1760, 1790), False, 'import os\n'), ((5290, 5317), 'eventlet.listen', 'eventlet.listen', (["('', 4567)"], {}), "(('', 4567))\n", (5305, 5317), False, 'import eventlet\n'), ((1673, 1690), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1688, 1690), False, 'from datetime import datetime\n'), ((2368, 2399), 'base64.b64decode', 'base64.b64decode', (["data['image']"], {}), "(data['image'])\n", (2384, 2399), False, 'import base64\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import random
from scipy.stats import gaussian_kde
import ternary
def plot_confusion_matrix(cm, labels, ax):
fontsize = 7
plt.rc('font', family='Arial', size=fontsize)
plt.tick_params(labelsize=fontsize)
im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.binary)
tick_major = np.arange(cm.shape[0])
x, y = np.meshgrid(tick_major, tick_major)
for xi, yi in zip(x.flatten(), y.flatten()):
val = cm[yi][xi]
if val > 0:
ax.text(xi, yi, val, color='red', va='center', ha='center')
ax.set_xlabel('Predicted label', fontsize=fontsize)
ax.set_ylabel('Actual label', fontsize=fontsize)
ax.set(xticks=tick_major, yticks=tick_major,
xticklabels=labels, yticklabels=labels)
tick_minor = np.arange(cm.shape[0] + 1) - 0.5
ax.set_xticks(tick_minor, minor=True)
ax.set_yticks(tick_minor, minor=True)
ax.tick_params(which='both', bottom=False, left=False)
ax.grid(True, which='minor', linestyle='-', lw=1.5)
linewidth = 1
ax.spines['top'].set_linewidth(linewidth)
ax.spines['bottom'].set_linewidth(linewidth)
ax.spines['left'].set_linewidth(linewidth)
ax.spines['right'].set_linewidth(linewidth)
cax = plt.colorbar(im, ax=ax, shrink=0.9)
cax.ax.tick_params(width=linewidth)
cax.outline.set_linewidth(linewidth)
return
def plot_ternary_Folk_B(df, ylabel, ax, density=False):
fontsize = 7
plt.rc('font', family='Arial', size=fontsize)
ptsize = 10
linewidth = 1
fig, tax = ternary.figure(ax=ax, scale=100)
tax.boundary(linewidth=linewidth)
tax.horizontal_line(90, linewidth=linewidth, color='blue')
tax.horizontal_line(50, linewidth=linewidth, color='blue')
tax.horizontal_line(10, linewidth=linewidth, color='blue')
p1 = (10 / 3, 90, 20 / 3)
p2 = (100 / 3, 0, 200 / 3)
tax.line(p1, p2, linewidth=linewidth, color='blue')
p1 = (20 / 3, 90, 10 / 3)
p2 = (200 / 3, 0, 100 / 3)
tax.line(p1, p2, linewidth=linewidth, color='blue')
tax.right_corner_label("Clay")
tax.top_corner_label("Sand")
tax.left_corner_label("Silt")
labels = df[ylabel].value_counts().index
cmap = mpl.cm.get_cmap('plasma', len(labels))
colors = cmap(range(len(labels)))
markers = ['.', ',', 'v', '+', 'o', '*', '<', '>', 'D', '1', 's', '2', 'h']
df_show = df[df['Sand'] + df['Silt'] + df['Clay'] > 99.9]
if density:
x = df_show['Sand'].values
y = df_show['Silt'].values
z = df_show['Clay'].values
xy = np.vstack([x, y])
c = gaussian_kde(xy)(xy)
idx = c.argsort()
x, y, z, c = x[idx], y[idx], z[idx], c[idx]
cb_kwargs = {"shrink": 1.0,
"orientation": "horizontal",
"fraction": 0.1,
"pad": 0.01,
"aspect": 30, }
s = tax.scatter(tuple(zip(z, x, y)), vmax=max(c), colormap=plt.cm.plasma, colorbar=False,
c=c, cmap=plt.cm.plasma, s=ptsize, edgecolor=None, linewidths=0)
cb = s.figure.colorbar(s.collections[0], **cb_kwargs)
cb.ax.tick_params(width=linewidth)
cb.outline.set_linewidth(linewidth)
else:
for label, color in zip(labels, colors):
tax.scatter(tuple(df_show.loc[df_show[ylabel] == label, ['Clay', 'Sand', 'Silt']].values),
marker=random.choice(markers), color=color, label=label, s=ptsize, edgecolor=None, linewidths=0)
tax.legend(loc=1, bbox_to_anchor=(1, 1, 0.01, 0.01), markerscale=2)
tax.annotate(text='S', position=(3, 92))
tax.annotate(text='zS', position=(4, 65))
tax.annotate(text='mS', position=(14, 65))
tax.annotate(text='cS', position=(25, 65))
tax.annotate(text='sZ', position=(10, 28))
tax.annotate(text='sM', position=(32, 28))
tax.annotate(text='sC', position=(56, 28))
tax.annotate(text='Z', position=(15, 3))
tax.annotate(text='M', position=(45, 3))
tax.annotate(text='C', position=(80, 3))
ticks = list(np.arange(0, 101, 20))
tax.ticks(ticks=ticks, axis='lbr', linewidth=1, multiple=1, offset=0.02, clockwise=False, fontsize=fontsize)
tax.get_axes().axis('off')
tax.clear_matplotlib_ticks()
| [
"numpy.meshgrid",
"ternary.figure",
"scipy.stats.gaussian_kde",
"random.choice",
"matplotlib.pyplot.colorbar",
"numpy.arange",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.tick_params",
"numpy.vstack"
] | [((218, 263), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""Arial"""', 'size': 'fontsize'}), "('font', family='Arial', size=fontsize)\n", (224, 263), True, 'import matplotlib.pyplot as plt\n'), ((269, 304), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': 'fontsize'}), '(labelsize=fontsize)\n', (284, 304), True, 'import matplotlib.pyplot as plt\n'), ((396, 418), 'numpy.arange', 'np.arange', (['cm.shape[0]'], {}), '(cm.shape[0])\n', (405, 418), True, 'import numpy as np\n'), ((431, 466), 'numpy.meshgrid', 'np.meshgrid', (['tick_major', 'tick_major'], {}), '(tick_major, tick_major)\n', (442, 466), True, 'import numpy as np\n'), ((1334, 1369), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'ax': 'ax', 'shrink': '(0.9)'}), '(im, ax=ax, shrink=0.9)\n', (1346, 1369), True, 'import matplotlib.pyplot as plt\n'), ((1551, 1596), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""Arial"""', 'size': 'fontsize'}), "('font', family='Arial', size=fontsize)\n", (1557, 1596), True, 'import matplotlib.pyplot as plt\n'), ((1651, 1683), 'ternary.figure', 'ternary.figure', ([], {'ax': 'ax', 'scale': '(100)'}), '(ax=ax, scale=100)\n', (1665, 1683), False, 'import ternary\n'), ((870, 896), 'numpy.arange', 'np.arange', (['(cm.shape[0] + 1)'], {}), '(cm.shape[0] + 1)\n', (879, 896), True, 'import numpy as np\n'), ((2687, 2704), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (2696, 2704), True, 'import numpy as np\n'), ((4223, 4244), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(20)'], {}), '(0, 101, 20)\n', (4232, 4244), True, 'import numpy as np\n'), ((2718, 2734), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['xy'], {}), '(xy)\n', (2730, 2734), False, 'from scipy.stats import gaussian_kde\n'), ((3563, 3585), 'random.choice', 'random.choice', (['markers'], {}), '(markers)\n', (3576, 3585), False, 'import random\n')] |
from typing import Type, List
import numpy as np
try:
from rlbench import ObservationConfig, Environment, CameraConfig
except (ModuleNotFoundError, ImportError) as e:
print("You need to install RLBench: 'https://github.com/stepjam/RLBench'")
raise e
from rlbench.action_modes import ActionMode
from rlbench.backend.observation import Observation
from rlbench.backend.task import Task
from yarr.envs.env import Env
from yarr.utils.observation_type import ObservationElement
from yarr.utils.transition import Transition
class RLBenchEnv(Env):
ROBOT_STATE_KEYS = [
'joint_velocities', 'joint_positions', 'joint_forces', 'gripper_open',
'gripper_pose', 'gripper_joint_positions', 'gripper_touch_forces',
'task_low_dim_state', 'misc'
]
def __init__(self,
task_class: Type[Task],
observation_config: ObservationConfig,
action_mode: ActionMode,
dataset_root: str = '',
channels_last=False,
headless=True):
super(RLBenchEnv, self).__init__()
self._task_class = task_class
self._observation_config = observation_config
self._channels_last = channels_last
self._rlbench_env = Environment(action_mode=action_mode,
obs_config=observation_config,
dataset_root=dataset_root,
headless=headless)
self._task = None
def extract_obs(self, obs: Observation):
obs_dict = vars(obs)
obs_dict = {k: v for k, v in obs_dict.items() if v is not None}
robot_state = obs.get_low_dim_data()
# Remove all of the individual state elements
obs_dict = {
k: v
for k, v in obs_dict.items()
if k not in RLBenchEnv.ROBOT_STATE_KEYS
}
if not self._channels_last:
# Swap channels from last dim to 1st dim
obs_dict = {
k: np.transpose(v, [2, 0, 1])
if v.ndim == 3 else np.expand_dims(v, 0)
for k, v in obs_dict.items()
}
else:
# Add extra dim to depth data
obs_dict = {
k: v if v.ndim == 3 else np.expand_dims(v, -1)
for k, v in obs_dict.items()
}
obs_dict['low_dim_state'] = np.array(robot_state, dtype=np.float32)
return obs_dict
def launch(self):
self._rlbench_env.launch()
self._task = self._rlbench_env.get_task(self._task_class)
def shutdown(self):
self._rlbench_env.shutdown()
def reset(self) -> dict:
descriptions, obs = self._task.reset()
return self.extract_obs(obs)
def step(self, action: np.ndarray) -> Transition:
obs, reward, terminal = self._task.step(action)
obs = self.extract_obs(obs)
return Transition(obs, reward, terminal)
def _get_cam_observation_elements(self, camera: CameraConfig, prefix: str):
elements = []
if camera.rgb:
shape = (camera.image_size + (3, ) if self._channels_last else
(3, ) + camera.image_size)
elements.append(
ObservationElement('%s_rgb' % prefix, shape, np.uint8))
if camera.depth:
shape = (camera.image_size + (1, ) if self._channels_last else
(1, ) + camera.image_size)
elements.append(
ObservationElement('%s_depth' % prefix, shape, np.float32))
if camera.mask:
raise NotImplementedError()
return elements
@property
def observation_elements(self) -> List[ObservationElement]:
elements = []
robot_state_len = 0
if self._observation_config.joint_velocities:
robot_state_len += 7
if self._observation_config.joint_positions:
robot_state_len += 7
if self._observation_config.joint_forces:
robot_state_len += 7
if self._observation_config.gripper_open:
robot_state_len += 1
if self._observation_config.gripper_pose:
robot_state_len += 7
if self._observation_config.gripper_joint_positions:
robot_state_len += 2
if self._observation_config.gripper_touch_forces:
robot_state_len += 2
if self._observation_config.task_low_dim_state:
raise NotImplementedError()
if robot_state_len > 0:
elements.append(
ObservationElement('low_dim_state', (robot_state_len, ),
np.float32))
elements.extend(
self._get_cam_observation_elements(
self._observation_config.left_shoulder_camera,
'left_shoulder'))
elements.extend(
self._get_cam_observation_elements(
self._observation_config.right_shoulder_camera,
'right_shoulder'))
elements.extend(
self._get_cam_observation_elements(
self._observation_config.front_camera, 'front'))
elements.extend(
self._get_cam_observation_elements(
self._observation_config.wrist_camera, 'wrist'))
return elements
@property
def action_shape(self):
return (self._rlbench_env.action_size, )
@property
def env(self) -> Environment:
return self._rlbench_env
| [
"yarr.utils.observation_type.ObservationElement",
"numpy.transpose",
"numpy.expand_dims",
"numpy.array",
"yarr.utils.transition.Transition",
"rlbench.Environment"
] | [((1262, 1379), 'rlbench.Environment', 'Environment', ([], {'action_mode': 'action_mode', 'obs_config': 'observation_config', 'dataset_root': 'dataset_root', 'headless': 'headless'}), '(action_mode=action_mode, obs_config=observation_config,\n dataset_root=dataset_root, headless=headless)\n', (1273, 1379), False, 'from rlbench import ObservationConfig, Environment, CameraConfig\n'), ((2424, 2463), 'numpy.array', 'np.array', (['robot_state'], {'dtype': 'np.float32'}), '(robot_state, dtype=np.float32)\n', (2432, 2463), True, 'import numpy as np\n'), ((2950, 2983), 'yarr.utils.transition.Transition', 'Transition', (['obs', 'reward', 'terminal'], {}), '(obs, reward, terminal)\n', (2960, 2983), False, 'from yarr.utils.transition import Transition\n'), ((3278, 3332), 'yarr.utils.observation_type.ObservationElement', 'ObservationElement', (["('%s_rgb' % prefix)", 'shape', 'np.uint8'], {}), "('%s_rgb' % prefix, shape, np.uint8)\n", (3296, 3332), False, 'from yarr.utils.observation_type import ObservationElement\n'), ((3527, 3585), 'yarr.utils.observation_type.ObservationElement', 'ObservationElement', (["('%s_depth' % prefix)", 'shape', 'np.float32'], {}), "('%s_depth' % prefix, shape, np.float32)\n", (3545, 3585), False, 'from yarr.utils.observation_type import ObservationElement\n'), ((4584, 4651), 'yarr.utils.observation_type.ObservationElement', 'ObservationElement', (['"""low_dim_state"""', '(robot_state_len,)', 'np.float32'], {}), "('low_dim_state', (robot_state_len,), np.float32)\n", (4602, 4651), False, 'from yarr.utils.observation_type import ObservationElement\n'), ((2042, 2068), 'numpy.transpose', 'np.transpose', (['v', '[2, 0, 1]'], {}), '(v, [2, 0, 1])\n', (2054, 2068), True, 'import numpy as np\n'), ((2105, 2125), 'numpy.expand_dims', 'np.expand_dims', (['v', '(0)'], {}), '(v, 0)\n', (2119, 2125), True, 'import numpy as np\n'), ((2307, 2328), 'numpy.expand_dims', 'np.expand_dims', (['v', '(-1)'], {}), '(v, -1)\n', (2321, 2328), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Colour Models Plotting
======================
Defines the colour models plotting objects:
- :func:`colourspaces_CIE_1931_chromaticity_diagram_plot`
- :func:`single_transfer_function_plot`
- :func:`multi_transfer_function_plot`
"""
from __future__ import division
import random
import numpy as np
import pylab
from colour.models import POINTER_GAMUT_DATA, RGB_COLOURSPACES
from colour.plotting import (
CIE_1931_chromaticity_diagram_plot,
aspect,
bounding_box,
display,
figure_size,
get_cmfs)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['get_RGB_colourspace',
'colourspaces_CIE_1931_chromaticity_diagram_plot',
'single_transfer_function_plot',
'multi_transfer_function_plot']
def get_RGB_colourspace(colourspace):
"""
Returns the *RGB* colourspace with given name.
Parameters
----------
colourspace : Unicode
*RGB* Colourspace name.
Returns
-------
RGB_Colourspace
*RGB* Colourspace.
Raises
------
KeyError
If the given colourspace is not found in the factory colourspaces.
"""
colourspace, name = RGB_COLOURSPACES.get(colourspace), colourspace
if colourspace is None:
raise KeyError(
('"{0}" colourspace not found in factory colourspaces: '
'"{1}".').format(name, sorted(RGB_COLOURSPACES.keys())))
return colourspace
@figure_size((8, 8))
def colourspaces_CIE_1931_chromaticity_diagram_plot(
colourspaces=None,
cmfs='CIE 1931 2 Degree Standard Observer',
**kwargs):
"""
Plots given colourspaces in *CIE 1931 Chromaticity Diagram*.
Parameters
----------
colourspaces : list, optional
Colourspaces to plot.
cmfs : unicode, optional
Standard observer colour matching functions used for diagram bounds.
\*\*kwargs : \*\*
Keywords arguments.
Returns
-------
bool
Definition success.
Examples
--------
>>> csps = ['sRGB', 'ACES RGB']
>>> colourspaces_CIE_1931_chromaticity_diagram_plot(csps) # doctest: +SKIP
True
"""
if colourspaces is None:
colourspaces = ('sRGB', 'ACES RGB', 'Pointer Gamut')
cmfs, name = get_cmfs(cmfs), cmfs
settings = {'title': '{0} - {1}'.format(', '.join(colourspaces), name),
'standalone': False}
settings.update(kwargs)
if not CIE_1931_chromaticity_diagram_plot(**settings):
return
x_limit_min, x_limit_max = [-0.1], [0.9]
y_limit_min, y_limit_max = [-0.1], [0.9]
for colourspace in colourspaces:
if colourspace == 'Pointer Gamut':
x, y = tuple(zip(*POINTER_GAMUT_DATA))
pylab.plot(x,
y,
label='Pointer Gamut',
color='0.95',
linewidth=2)
pylab.plot([x[-1],
x[0]],
[y[-1],
y[0]],
color='0.95',
linewidth=2)
else:
colourspace, name = get_RGB_colourspace(
colourspace), colourspace
random_colour = lambda: float(random.randint(64, 224)) / 255
r, g, b = random_colour(), random_colour(), random_colour()
primaries = colourspace.primaries
whitepoint = colourspace.whitepoint
pylab.plot([whitepoint[0], whitepoint[0]],
[whitepoint[1], whitepoint[1]],
color=(r, g, b),
label=colourspace.name,
linewidth=2)
pylab.plot([whitepoint[0], whitepoint[0]],
[whitepoint[1], whitepoint[1]],
'o',
color=(r, g, b),
linewidth=2)
pylab.plot([primaries[0, 0], primaries[1, 0]],
[primaries[0, 1], primaries[1, 1]],
'o-',
color=(r, g, b),
linewidth=2)
pylab.plot([primaries[1, 0], primaries[2, 0]],
[primaries[1, 1], primaries[2, 1]],
'o-',
color=(r, g, b),
linewidth=2)
pylab.plot([primaries[2, 0], primaries[0, 0]],
[primaries[2, 1], primaries[0, 1]],
'o-',
color=(r, g, b),
linewidth=2)
x_limit_min.append(np.amin(primaries[:, 0]))
y_limit_min.append(np.amin(primaries[:, 1]))
x_limit_max.append(np.amax(primaries[:, 0]))
y_limit_max.append(np.amax(primaries[:, 1]))
settings.update({'legend': True,
'legend_location': 'upper right',
'x_tighten': True,
'y_tighten': True,
'limits': [min(x_limit_min), max(x_limit_max),
min(y_limit_min), max(y_limit_max)],
'margins': [-0.05, 0.05, -0.05, 0.05],
'standalone': True})
bounding_box(**settings)
aspect(**settings)
return display(**settings)
def single_transfer_function_plot(colourspace='sRGB', **kwargs):
"""
Plots given colourspace transfer function.
Parameters
----------
colourspace : unicode, optional
*RGB* Colourspace transfer function to plot.
\*\*kwargs : \*\*
Keywords arguments.
Returns
-------
bool
Definition success.
Examples
--------
>>> single_transfer_function_plot() # doctest: +SKIP
True
"""
settings = {'title': '{0} - Transfer Function'.format(colourspace)}
settings.update(kwargs)
return multi_transfer_function_plot([colourspace], **settings)
@figure_size((8, 8))
def multi_transfer_function_plot(colourspaces=None,
inverse=False, **kwargs):
"""
Plots given colourspaces transfer functions.
Parameters
----------
colourspaces : list, optional
Colourspaces transfer functions to plot.
inverse : bool
Plot inverse transfer functions.
\*\*kwargs : \*\*
Keywords arguments.
Returns
-------
bool
Definition success.
Examples
--------
>>> multi_transfer_function_plot(['sRGB', 'Rec. 709']) # doctest: +SKIP
True
"""
if colourspaces is None:
colourspaces = ['sRGB', 'Rec. 709']
samples = np.linspace(0, 1, 1000)
for i, colourspace in enumerate(colourspaces):
colourspace, name = get_RGB_colourspace(colourspace), colourspace
RGBs = np.array([colourspace.inverse_transfer_function(x)
if inverse else
colourspace.transfer_function(x)
for x in samples])
pylab.plot(samples,
RGBs,
label=u'{0}'.format(colourspace.name),
linewidth=2)
settings = {
'title': '{0} - Transfer Functions'.format(
', '.join(colourspaces)),
'x_tighten': True,
'legend': True,
'legend_location': 'upper left',
'x_ticker': True,
'y_ticker': True,
'grid': True,
'limits': [0, 1, 0, 1]}
settings.update(kwargs)
bounding_box(**settings)
aspect(**settings)
return display(**settings)
| [
"colour.plotting.CIE_1931_chromaticity_diagram_plot",
"colour.plotting.display",
"numpy.amin",
"colour.plotting.figure_size",
"colour.models.RGB_COLOURSPACES.get",
"random.randint",
"pylab.plot",
"colour.plotting.aspect",
"numpy.amax",
"colour.plotting.bounding_box",
"numpy.linspace",
"colour.... | [((1692, 1711), 'colour.plotting.figure_size', 'figure_size', (['(8, 8)'], {}), '((8, 8))\n', (1703, 1711), False, 'from colour.plotting import CIE_1931_chromaticity_diagram_plot, aspect, bounding_box, display, figure_size, get_cmfs\n'), ((6167, 6186), 'colour.plotting.figure_size', 'figure_size', (['(8, 8)'], {}), '((8, 8))\n', (6178, 6186), False, 'from colour.plotting import CIE_1931_chromaticity_diagram_plot, aspect, bounding_box, display, figure_size, get_cmfs\n'), ((5459, 5483), 'colour.plotting.bounding_box', 'bounding_box', ([], {}), '(**settings)\n', (5471, 5483), False, 'from colour.plotting import CIE_1931_chromaticity_diagram_plot, aspect, bounding_box, display, figure_size, get_cmfs\n'), ((5488, 5506), 'colour.plotting.aspect', 'aspect', ([], {}), '(**settings)\n', (5494, 5506), False, 'from colour.plotting import CIE_1931_chromaticity_diagram_plot, aspect, bounding_box, display, figure_size, get_cmfs\n'), ((5519, 5538), 'colour.plotting.display', 'display', ([], {}), '(**settings)\n', (5526, 5538), False, 'from colour.plotting import CIE_1931_chromaticity_diagram_plot, aspect, bounding_box, display, figure_size, get_cmfs\n'), ((6851, 6874), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (6862, 6874), True, 'import numpy as np\n'), ((7693, 7717), 'colour.plotting.bounding_box', 'bounding_box', ([], {}), '(**settings)\n', (7705, 7717), False, 'from colour.plotting import CIE_1931_chromaticity_diagram_plot, aspect, bounding_box, display, figure_size, get_cmfs\n'), ((7722, 7740), 'colour.plotting.aspect', 'aspect', ([], {}), '(**settings)\n', (7728, 7740), False, 'from colour.plotting import CIE_1931_chromaticity_diagram_plot, aspect, bounding_box, display, figure_size, get_cmfs\n'), ((7753, 7772), 'colour.plotting.display', 'display', ([], {}), '(**settings)\n', (7760, 7772), False, 'from colour.plotting import CIE_1931_chromaticity_diagram_plot, aspect, bounding_box, display, figure_size, get_cmfs\n'), ((1427, 1460), 'colour.models.RGB_COLOURSPACES.get', 'RGB_COLOURSPACES.get', (['colourspace'], {}), '(colourspace)\n', (1447, 1460), False, 'from colour.models import POINTER_GAMUT_DATA, RGB_COLOURSPACES\n'), ((2518, 2532), 'colour.plotting.get_cmfs', 'get_cmfs', (['cmfs'], {}), '(cmfs)\n', (2526, 2532), False, 'from colour.plotting import CIE_1931_chromaticity_diagram_plot, aspect, bounding_box, display, figure_size, get_cmfs\n'), ((2693, 2739), 'colour.plotting.CIE_1931_chromaticity_diagram_plot', 'CIE_1931_chromaticity_diagram_plot', ([], {}), '(**settings)\n', (2727, 2739), False, 'from colour.plotting import CIE_1931_chromaticity_diagram_plot, aspect, bounding_box, display, figure_size, get_cmfs\n'), ((2990, 3056), 'pylab.plot', 'pylab.plot', (['x', 'y'], {'label': '"""Pointer Gamut"""', 'color': '"""0.95"""', 'linewidth': '(2)'}), "(x, y, label='Pointer Gamut', color='0.95', linewidth=2)\n", (3000, 3056), False, 'import pylab\n'), ((3161, 3228), 'pylab.plot', 'pylab.plot', (['[x[-1], x[0]]', '[y[-1], y[0]]'], {'color': '"""0.95"""', 'linewidth': '(2)'}), "([x[-1], x[0]], [y[-1], y[0]], color='0.95', linewidth=2)\n", (3171, 3228), False, 'import pylab\n'), ((3709, 3841), 'pylab.plot', 'pylab.plot', (['[whitepoint[0], whitepoint[0]]', '[whitepoint[1], whitepoint[1]]'], {'color': '(r, g, b)', 'label': 'colourspace.name', 'linewidth': '(2)'}), '([whitepoint[0], whitepoint[0]], [whitepoint[1], whitepoint[1]],\n color=(r, g, b), label=colourspace.name, linewidth=2)\n', (3719, 3841), False, 'import pylab\n'), ((3942, 4055), 'pylab.plot', 'pylab.plot', (['[whitepoint[0], whitepoint[0]]', '[whitepoint[1], whitepoint[1]]', '"""o"""'], {'color': '(r, g, b)', 'linewidth': '(2)'}), "([whitepoint[0], whitepoint[0]], [whitepoint[1], whitepoint[1]],\n 'o', color=(r, g, b), linewidth=2)\n", (3952, 4055), False, 'import pylab\n'), ((4156, 4279), 'pylab.plot', 'pylab.plot', (['[primaries[0, 0], primaries[1, 0]]', '[primaries[0, 1], primaries[1, 1]]', '"""o-"""'], {'color': '(r, g, b)', 'linewidth': '(2)'}), "([primaries[0, 0], primaries[1, 0]], [primaries[0, 1], primaries[\n 1, 1]], 'o-', color=(r, g, b), linewidth=2)\n", (4166, 4279), False, 'import pylab\n'), ((4379, 4502), 'pylab.plot', 'pylab.plot', (['[primaries[1, 0], primaries[2, 0]]', '[primaries[1, 1], primaries[2, 1]]', '"""o-"""'], {'color': '(r, g, b)', 'linewidth': '(2)'}), "([primaries[1, 0], primaries[2, 0]], [primaries[1, 1], primaries[\n 2, 1]], 'o-', color=(r, g, b), linewidth=2)\n", (4389, 4502), False, 'import pylab\n'), ((4602, 4725), 'pylab.plot', 'pylab.plot', (['[primaries[2, 0], primaries[0, 0]]', '[primaries[2, 1], primaries[0, 1]]', '"""o-"""'], {'color': '(r, g, b)', 'linewidth': '(2)'}), "([primaries[2, 0], primaries[0, 0]], [primaries[2, 1], primaries[\n 0, 1]], 'o-', color=(r, g, b), linewidth=2)\n", (4612, 4725), False, 'import pylab\n'), ((4845, 4869), 'numpy.amin', 'np.amin', (['primaries[:, 0]'], {}), '(primaries[:, 0])\n', (4852, 4869), True, 'import numpy as np\n'), ((4902, 4926), 'numpy.amin', 'np.amin', (['primaries[:, 1]'], {}), '(primaries[:, 1])\n', (4909, 4926), True, 'import numpy as np\n'), ((4959, 4983), 'numpy.amax', 'np.amax', (['primaries[:, 0]'], {}), '(primaries[:, 0])\n', (4966, 4983), True, 'import numpy as np\n'), ((5016, 5040), 'numpy.amax', 'np.amax', (['primaries[:, 1]'], {}), '(primaries[:, 1])\n', (5023, 5040), True, 'import numpy as np\n'), ((1638, 1661), 'colour.models.RGB_COLOURSPACES.keys', 'RGB_COLOURSPACES.keys', ([], {}), '()\n', (1659, 1661), False, 'from colour.models import POINTER_GAMUT_DATA, RGB_COLOURSPACES\n'), ((3498, 3521), 'random.randint', 'random.randint', (['(64)', '(224)'], {}), '(64, 224)\n', (3512, 3521), False, 'import random\n')] |
from torch.utils import data as data
from torchvision.transforms.functional import normalize
from basicsr.data.data_util import paired_paths_from_folder, paired_paths_from_lmdb, paired_paths_from_meta_info_file
from basicsr.data.transforms import augment, paired_random_crop
from basicsr.utils import FileClient, imfrombytes, img2tensor
from basicsr.utils.matlab_functions import rgb2ycbcr
from basicsr.utils.registry import DATASET_REGISTRY
import os
import torch
import cv2
import numpy as np
@DATASET_REGISTRY.register()
class Iter_reconstruction(data.Dataset):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.sino_dir = opt["sino_dir"]
self.img_dir = opt["img_dir"]
self.sino_names = os.listdir(self.sino_dir)
self.img_names = os.listdir(self.img_dir)
assert len(self.sino_names) == len(self.img_names), \
f"number of sino is not equal number of images \n " \
f"number of sino: {len(self.sino_names)} \n number of images: {len(self.img_names)}"
self.sino_names = sorted(self.sino_names, key=lambda x: int(x[:-4]))
self.img_names = sorted(self.img_names, key=lambda x: int(x[:-4]))
for i, j in zip(self.sino_names, self.img_names):
if i[:-4] != j[:-4]:
raise "datasets are not compared"
self.sino_paths = [os.path.join(self.sino_dir, name) for name in self.sino_names]
self.img_paths = [os.path.join(self.img_dir, name) for name in self.img_names]
def __getitem__(self, index):
# sino为网络输入,网络输出图像
# img为真值
sino_lq_path = self.sino_paths[index]
sino_lq = self._imread_sino(sino_lq_path)
img_path = self.img_paths[index]
img_gt = self._imread_img(img_path)
return {'sino': sino_lq, 'img_gt': img_gt, 'sino_path': sino_lq_path, 'img_gt_path': img_path}
def __len__(self):
return len(self.sino_paths)
def _imread_img(self, path):
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) / 255.
return torch.from_numpy(img).view(1, img.shape[0], img.shape[1]).float()
def _imread_sino(self, path):
sino = np.load(path, allow_pickle=True)
return torch.from_numpy(sino).view(1, sino.shape[0], sino.shape[1]).float() | [
"numpy.load",
"cv2.cvtColor",
"cv2.imread",
"basicsr.utils.registry.DATASET_REGISTRY.register",
"os.path.join",
"os.listdir",
"torch.from_numpy"
] | [((498, 525), 'basicsr.utils.registry.DATASET_REGISTRY.register', 'DATASET_REGISTRY.register', ([], {}), '()\n', (523, 525), False, 'from basicsr.utils.registry import DATASET_REGISTRY\n'), ((750, 775), 'os.listdir', 'os.listdir', (['self.sino_dir'], {}), '(self.sino_dir)\n', (760, 775), False, 'import os\n'), ((801, 825), 'os.listdir', 'os.listdir', (['self.img_dir'], {}), '(self.img_dir)\n', (811, 825), False, 'import os\n'), ((1995, 2011), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (2005, 2011), False, 'import cv2\n'), ((2202, 2234), 'numpy.load', 'np.load', (['path'], {'allow_pickle': '(True)'}), '(path, allow_pickle=True)\n', (2209, 2234), True, 'import numpy as np\n'), ((1371, 1404), 'os.path.join', 'os.path.join', (['self.sino_dir', 'name'], {}), '(self.sino_dir, name)\n', (1383, 1404), False, 'import os\n'), ((1460, 1492), 'os.path.join', 'os.path.join', (['self.img_dir', 'name'], {}), '(self.img_dir, name)\n', (1472, 1492), False, 'import os\n'), ((2026, 2063), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2038, 2063), False, 'import cv2\n'), ((2086, 2107), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (2102, 2107), False, 'import torch\n'), ((2250, 2272), 'torch.from_numpy', 'torch.from_numpy', (['sino'], {}), '(sino)\n', (2266, 2272), False, 'import torch\n')] |
from collections import Counter
import inspect
import random
import numpy
import pandas
import tensorflow as tf
import models
def reduce_dimensionality(expression_df, Z_df):
'''Convert a dataframe of gene expression data from gene space to the low
dimensional representation specified by Z_df
Arguments
---------
expression_df: pandas.DataFrame
The expression dataframe to transform to the low dimensional representation
specified by Z_df
Z_df: pandas.dataframe
The matrix that does the conversion from gene space to the low
dimensional representation specified by Z_df
Returns
-------
reduced_matrix: numpy.array
The result from translating expression_df into the low dimensional representation
specified by Z_df
'''
# Don't sort the genes in Z_df if they're already sorted
if not Z_df.index.is_monotonic:
# Ensure the gene symbols are in alphabetical order
Z_df = Z_df.sort_index()
expression_df = expression_df[expression_df.index.isin(Z_df.index)]
expression_df = expression_df.sort_index()
# Since the gene symbols are in alphabetical order and are identical between
# the two dataframes, we can drop the labels and create a numpy matrix to be multiplied by Z
expression_matrix = expression_df.values
Z_matrix = Z_df.values
reduced_matrix = numpy.matmul(expression_matrix.T, Z_matrix)
return reduced_matrix
def get_study_list(expression_df):
'''Retrieve the studies for each sample in the provided dataframe
Arguments
---------
expression_df: pandas.DataFrame
The dataframe containing gene expression. Assumes the columns of the dataframe are named
in study.sample format
Returns
-------
studies: list of strs
The list containing the study each sample is from
'''
samples = expression_df.columns
studies = [sample.split('.')[0] for sample in samples]
return studies
def prepare_input_data(Z_df, healthy_df, disease_df):
'''Convert the dataframes from run_plier and download_categorized_data into
training and validation datasets with accompanying labels
Arguments
---------
Z_df: pandas.DataFrame
The matrix to convert the expression data into a lower dimensional representation
healthy_df: pandas.DataFrame
The dataframe containing healthy gene expression samples
disease_df: pandas.DataFrame
The dataframe containin unhealthy gene expression samples
Returns
-------
train_X: numpy.array
A numpy array containing the training gene expression data
train_Y: numpy.array
The labels corresponding to whether each sample represents healthy or unhealthy
gene expression
val_X: numpy.array
The gene expression data to be held out to evaluate model training
val_Y: numpy.array
The labels for val_X
train_studies: list of strs
The list containing which study each sample of train_X is from
val_studies: list of strs
The list containing which study each sample of val_X is from
'''
# TODO stop val fraction from being hardcoded
healthy_train, healthy_val, disease_train, disease_val = get_validation_set(healthy_df,
disease_df,
.2)
healthy_train_studies = get_study_list(healthy_train)
healthy_val_studies = get_study_list(healthy_val)
disease_train_studies = get_study_list(disease_train)
disease_val_studies = get_study_list(disease_val)
healthy_train = reduce_dimensionality(healthy_train, Z_df)
healthy_val = reduce_dimensionality(healthy_val, Z_df)
disease_train = reduce_dimensionality(disease_train, Z_df)
disease_val = reduce_dimensionality(disease_val, Z_df)
healthy_train_labels = numpy.zeros(healthy_train.shape[0])
healthy_val_labels = numpy.zeros(healthy_val.shape[0])
disease_train_labels = numpy.ones(disease_train.shape[0])
disease_val_labels = numpy.ones(disease_val.shape[0])
healthy_train_studies.extend(disease_train_studies)
healthy_val_studies.extend(disease_val_studies)
# Rename the variables to make the fact that extend runs in place less confusing
train_studies = healthy_train_studies
val_studies = healthy_val_studies
train_X = numpy.concatenate([healthy_train, disease_train])
train_Y = numpy.concatenate([healthy_train_labels, disease_train_labels])
val_X = numpy.concatenate([healthy_val, disease_val])
val_Y = numpy.concatenate([healthy_val_labels, disease_val_labels])
return train_X, train_Y, val_X, val_Y, train_studies, val_studies
def load_data_and_studies(Z_file_path, healthy_file_path, disease_file_path):
'''Load and process the training data
Arguments
---------
Z_file_path: str or Path object
The path to the file containing the dataframe for transforming expression data
into a low dimensional representation
healthy_file_path: str or Path object
The path to the file containing healthy gene expression data
disease_file_path: str or Path object
The path to the file containing unhealthy gene expression data
Returns
-------
train_X: numpy.array
A numpy array containing the training gene expression data
train_Y: numpy.array
The labels corresponding to whether each sample represents healthy or unhealthy
gene expression
val_X: numpy.array
The gene expression data to be held out to evaluate model training
val_Y: numpy.array
The labels corresponnding to whether each sample in val_X represents healthy
or unhealthy gene expression
train_studies: list of strs
The list containing which study each sample of train_X is from
val_studies: list of strs
The list containing which study each sample of val_X is from
'''
Z_df = pandas.read_csv(Z_file_path, sep='\t')
healthy_df = pandas.read_csv(healthy_file_path, sep='\t')
disease_df = pandas.read_csv(disease_file_path, sep='\t')
return prepare_input_data(Z_df, healthy_df, disease_df)
def get_larger_class_percentage(Y):
'''Calculate the percentage of the labels that belong to the largest class
Arguments
---------
Y: numpy array
The labels for the data
Returns
-------
percentage: float
The percentage of the labels that belongs to the largest class
'''
_, counts = numpy.unique(Y, return_counts=True)
return max(counts) / sum(counts)
def calculate_accuracy(pred_Y, true_Y):
'''Calculate the accuracy for a set of predicted classification labels'''
# We use subtraction and count_nonzero because logical_xor only works for binary labels
num_incorrect = numpy.count_nonzero(numpy.subtract(pred_Y, true_Y))
acc = (len(pred_Y) - num_incorrect) / len(pred_Y)
return acc
def get_model_list():
'''Return the list of model classes in the models module as a list of strings'''
model_list = []
# The only classes that should be in models.py are models, so we can
# iterate over all the classes in the module to get which models exist
for model in inspect.getmembers(models, inspect.isclass):
model_list.append(model[0])
return model_list
def get_model(model_name, logdir, lr):
'''Retrieve a Model object from the models.py module by name
Arguments
---------
model_name: string
The name of the model to retrieve
logdir: string
The path to the directory to save logs to
lr: float
The learning rate to be used by the optimizer
Returns
-------
model: Model
The model object with name model_name
'''
# This retrieves whatever has the name model_name in the models module
model = getattr(models, model_name)
model_instance = model()
optimizer = tf.keras.optimizers.Adam(lr=lr)
auroc = tf.keras.metrics.AUC(curve='ROC')
auprc = tf.keras.metrics.AUC(curve='PR')
model_instance.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=['accuracy', auroc, auprc],
)
return model_instance
def get_study_counter(df):
'''Get the number of samples belonging to each study in a given pandas DataFrame'''
studies = [name.split('.')[0] for name in df.columns]
counter = Counter(studies)
return counter
def get_validation_set(healthy_df, disease_df, validation_fraction=.2):
'''Split a dataframe into training and validation data by extracting studies
that contain a certain fraction of the samples
Arguments
---------
healthy_df: pandas.DataFrame
A dataframe where the rows represent genes and columns represent samples.
The column names should be of the format 'studyid.runid' to allow the study
information to be used. healthy_df contains samples with healthy gene expression
disease_df: pandas.DataFrame
A dataframe where the rows represent genes and columns represent samples.
The column names should be of the format 'studyid.runid' to allow the study
information to be used. disease_df contains samples with unhealthy gene expression
validation_fraction: float
The fraction of the dataset to be pulled out as validation data
Returns
-------
train_df: pandas.DataFrame
A dataframe containing the fraction of the sample to be used in training
val_df:
A dataframe containing the fraction of the sample to be used for validation
'''
healthy_counter = get_study_counter(healthy_df)
disease_counter = get_study_counter(disease_df)
healthy_target_count = len(healthy_df.columns) * validation_fraction
disease_target_count = len(disease_df.columns) * validation_fraction
healthy_samples_so_far = 0
disease_samples_so_far = 0
val_studies = []
all_studies = set(healthy_counter.keys())
all_studies.update(disease_counter.keys())
all_studies = list(all_studies)
random.shuffle(all_studies)
for study in all_studies:
healthy_samples = healthy_counter[study]
disease_samples = disease_counter[study]
# Prevent the validation set from being too much larger than the target
if (healthy_samples + healthy_samples_so_far > healthy_target_count * 1.05
or disease_samples + disease_samples_so_far > disease_target_count * 1.05):
continue
val_studies.append(study)
healthy_samples_so_far += healthy_samples
disease_samples_so_far += disease_samples
if (healthy_samples_so_far >= healthy_target_count
and disease_samples_so_far >= disease_target_count):
break
healthy_train, healthy_val = get_val_and_train_subset(healthy_df, val_studies)
disease_train, disease_val = get_val_and_train_subset(disease_df, val_studies)
return healthy_train, healthy_val, disease_train, disease_val
def get_val_and_train_subset(df, val_studies):
''' Get the subset of a dataframe whose column names contain the strings in val_studies
Arguments
---------
df: pandas.DataFrame
The dataframe to be split
val_studies: list of str
The names of the studies that should go in the validation subset
Returns
-------
train_df: pandas.DataFrame
The dataframe containing the fraction of df to be used as training data
val_df: pandas.DataFrame
The dataframe containing the fraction of df to be used as validation data
'''
val_columns = []
for study in val_studies:
val_columns.extend([col for col in df.columns if study in col])
val_df = df[val_columns]
train_df = df[df.columns.difference(val_columns)]
return train_df, val_df
| [
"tensorflow.keras.metrics.AUC",
"numpy.subtract",
"pandas.read_csv",
"random.shuffle",
"numpy.unique",
"numpy.zeros",
"numpy.ones",
"tensorflow.keras.optimizers.Adam",
"numpy.matmul",
"collections.Counter",
"numpy.concatenate",
"inspect.getmembers"
] | [((1397, 1440), 'numpy.matmul', 'numpy.matmul', (['expression_matrix.T', 'Z_matrix'], {}), '(expression_matrix.T, Z_matrix)\n', (1409, 1440), False, 'import numpy\n'), ((3970, 4005), 'numpy.zeros', 'numpy.zeros', (['healthy_train.shape[0]'], {}), '(healthy_train.shape[0])\n', (3981, 4005), False, 'import numpy\n'), ((4031, 4064), 'numpy.zeros', 'numpy.zeros', (['healthy_val.shape[0]'], {}), '(healthy_val.shape[0])\n', (4042, 4064), False, 'import numpy\n'), ((4092, 4126), 'numpy.ones', 'numpy.ones', (['disease_train.shape[0]'], {}), '(disease_train.shape[0])\n', (4102, 4126), False, 'import numpy\n'), ((4152, 4184), 'numpy.ones', 'numpy.ones', (['disease_val.shape[0]'], {}), '(disease_val.shape[0])\n', (4162, 4184), False, 'import numpy\n'), ((4474, 4523), 'numpy.concatenate', 'numpy.concatenate', (['[healthy_train, disease_train]'], {}), '([healthy_train, disease_train])\n', (4491, 4523), False, 'import numpy\n'), ((4538, 4601), 'numpy.concatenate', 'numpy.concatenate', (['[healthy_train_labels, disease_train_labels]'], {}), '([healthy_train_labels, disease_train_labels])\n', (4555, 4601), False, 'import numpy\n'), ((4615, 4660), 'numpy.concatenate', 'numpy.concatenate', (['[healthy_val, disease_val]'], {}), '([healthy_val, disease_val])\n', (4632, 4660), False, 'import numpy\n'), ((4673, 4732), 'numpy.concatenate', 'numpy.concatenate', (['[healthy_val_labels, disease_val_labels]'], {}), '([healthy_val_labels, disease_val_labels])\n', (4690, 4732), False, 'import numpy\n'), ((6066, 6104), 'pandas.read_csv', 'pandas.read_csv', (['Z_file_path'], {'sep': '"""\t"""'}), "(Z_file_path, sep='\\t')\n", (6081, 6104), False, 'import pandas\n'), ((6123, 6167), 'pandas.read_csv', 'pandas.read_csv', (['healthy_file_path'], {'sep': '"""\t"""'}), "(healthy_file_path, sep='\\t')\n", (6138, 6167), False, 'import pandas\n'), ((6185, 6229), 'pandas.read_csv', 'pandas.read_csv', (['disease_file_path'], {'sep': '"""\t"""'}), "(disease_file_path, sep='\\t')\n", (6200, 6229), False, 'import pandas\n'), ((6630, 6665), 'numpy.unique', 'numpy.unique', (['Y'], {'return_counts': '(True)'}), '(Y, return_counts=True)\n', (6642, 6665), False, 'import numpy\n'), ((7352, 7395), 'inspect.getmembers', 'inspect.getmembers', (['models', 'inspect.isclass'], {}), '(models, inspect.isclass)\n', (7370, 7395), False, 'import inspect\n'), ((8051, 8082), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (8075, 8082), True, 'import tensorflow as tf\n'), ((8096, 8129), 'tensorflow.keras.metrics.AUC', 'tf.keras.metrics.AUC', ([], {'curve': '"""ROC"""'}), "(curve='ROC')\n", (8116, 8129), True, 'import tensorflow as tf\n'), ((8142, 8174), 'tensorflow.keras.metrics.AUC', 'tf.keras.metrics.AUC', ([], {'curve': '"""PR"""'}), "(curve='PR')\n", (8162, 8174), True, 'import tensorflow as tf\n'), ((8587, 8603), 'collections.Counter', 'Counter', (['studies'], {}), '(studies)\n', (8594, 8603), False, 'from collections import Counter\n'), ((10307, 10334), 'random.shuffle', 'random.shuffle', (['all_studies'], {}), '(all_studies)\n', (10321, 10334), False, 'import random\n'), ((6956, 6986), 'numpy.subtract', 'numpy.subtract', (['pred_Y', 'true_Y'], {}), '(pred_Y, true_Y)\n', (6970, 6986), False, 'import numpy\n')] |
#!/usr/bin/env python
import os
import sys
descr = """mapping procedure for numerical renormalization group."""
DISTNAME = 'nrgmap'
DESCRIPTION = descr
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = '<NAME>',
MAINTAINER_EMAIL = '<EMAIL>',
URL = 'http://github.com/GiggleLiu/nrg_mapping'
LICENSE = 'MIT'
DOWNLOAD_URL = URL
PACKAGE_NAME = 'nrgmap'
EXTRA_INFO = dict(
classifiers=[
"Development Status :: 2 - Pre-Alpha",
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Fortran',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
]
)
try:
import setuptools # If you want to enable 'python setup.py develop'
EXTRA_INFO.update(dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
))
except:
print('setuptools module not found.')
print("Install setuptools if you want to enable \
'python setup.py develop'.")
def configuration(parent_package='', top_path=None, package_name=DISTNAME):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg: "Ignoring attempt to set 'name' (from ... "
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True
)
config.add_subpackage(PACKAGE_NAME)
return config
def get_version():
"""Obtain the version number"""
import imp
mod = imp.load_source('version', os.path.join(PACKAGE_NAME, 'version.py'))
return mod.__version__
def setup_package():
# Call the setup function
metadata = dict(
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
version=get_version(),
install_requires=[
'numpy',
'scipy',
'gmpy2',
# 'mpi4py', #recommended
# 'mkl-service'
# 'pygraphviz'
],
# test_suite="nose.collector",
**EXTRA_INFO
)
if (len(sys.argv) >= 2 and
('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = get_version()
else:
metadata['configuration'] = configuration
from numpy.distutils.core import setup
setup(**metadata)
if __name__ == "__main__":
setup_package()
| [
"os.remove",
"distutils.core.setup",
"os.path.dirname",
"os.path.exists",
"numpy.distutils.misc_util.Configuration",
"os.path.join"
] | [((1558, 1584), 'os.path.exists', 'os.path.exists', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (1572, 1584), False, 'import os\n'), ((1686, 1731), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['None', 'parent_package', 'top_path'], {}), '(None, parent_package, top_path)\n', (1699, 1731), False, 'from numpy.distutils.misc_util import Configuration\n'), ((3324, 3341), 'distutils.core.setup', 'setup', ([], {}), '(**metadata)\n', (3329, 3341), False, 'from distutils.core import setup\n'), ((1594, 1615), 'os.remove', 'os.remove', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (1603, 1615), False, 'import os\n'), ((2145, 2185), 'os.path.join', 'os.path.join', (['PACKAGE_NAME', '"""version.py"""'], {}), "(PACKAGE_NAME, 'version.py')\n", (2157, 2185), False, 'import os\n'), ((177, 202), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (192, 202), False, 'import os\n')] |
import torch
import numpy as np
from mmdet.models import BottleNeck
ch_in = 64
ch_out = 64
stride = 1
shortcut = False
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 256
ch_out = 64
stride = 1
shortcut = True
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 256
ch_out = 64
stride = 1
shortcut = True
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 256
ch_out = 128
stride = 2
shortcut = False
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 512
ch_out = 128
stride = 1
shortcut = True
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 512
ch_out = 128
stride = 1
shortcut = True
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 512
ch_out = 128
stride = 1
shortcut = True
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 512
ch_out = 256
stride = 2
shortcut = False
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 1024
ch_out = 256
stride = 1
shortcut = True
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 1024
ch_out = 256
stride = 1
shortcut = True
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 1024
ch_out = 256
stride = 1
shortcut = True
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 1024
ch_out = 256
stride = 1
shortcut = True
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 1024
ch_out = 256
stride = 1
shortcut = True
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 1024
ch_out = 512
stride = 2
shortcut = False
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 2048
ch_out = 512
stride = 1
shortcut = True
variant = 'd'
groups = 1
base_width = 64
lr = 1.0
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
ch_in = 2048
ch_out = 512
stride = 1
shortcut = True
variant = 'd'
groups = 1
base_width = 64
lr = 0.5
norm_type = 'bn'
norm_decay = 0.0
freeze_norm = False
dcn_v2 = False
std_senet = False
torch.backends.cudnn.benchmark = True # Improves training speed.
torch.backends.cuda.matmul.allow_tf32 = False # Allow PyTorch to internally use tf32 for matmul
torch.backends.cudnn.allow_tf32 = False # Allow PyTorch to internally use tf32 for convolutions
model = BottleNeck(ch_in=ch_in,
ch_out=ch_out,
stride=stride,
shortcut=shortcut,
variant=variant,
groups=groups,
base_width=base_width,
lr=lr,
norm_type=norm_type,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
dcn_v2=dcn_v2,
std_senet=std_senet,
)
model.train()
need_clip = False
base_lr = 0.00000001 * 1.0
param_groups = []
base_wd = 0.0005
# base_wd = 0.0
momentum = 0.9
# 是否进行梯度裁剪
need_clip = False
clip_norm = 1000000.0
# need_clip = True
# clip_norm = 35.0
model.add_param_group(param_groups, base_lr, base_wd, need_clip, clip_norm)
optimizer = torch.optim.SGD(param_groups, lr=base_lr, momentum=momentum, weight_decay=base_wd)
model.load_state_dict(torch.load("53_00.pth", map_location=torch.device('cpu')))
model.fix_bn()
use_gpu = True
if use_gpu:
model = model.cuda()
dic2 = np.load('53.npz')
print(torch.__version__)
for batch_idx in range(8):
print('======================== batch_%.3d ========================'%batch_idx)
optimizer.zero_grad(set_to_none=True)
x = dic2['batch_%.3d.x'%batch_idx]
y_paddle = dic2['batch_%.3d.y'%batch_idx]
branch2a_conv_w_grad_paddle = dic2['batch_%.3d.branch2a_conv_w_grad'%batch_idx]
if not freeze_norm:
branch2a_norm_w_grad_paddle = dic2['batch_%.3d.branch2a_norm_w_grad'%batch_idx]
branch2a_norm_b_grad_paddle = dic2['batch_%.3d.branch2a_norm_b_grad'%batch_idx]
branch2b_conv_w_grad_paddle = dic2['batch_%.3d.branch2b_conv_w_grad'%batch_idx]
if not freeze_norm:
branch2b_norm_w_grad_paddle = dic2['batch_%.3d.branch2b_norm_w_grad'%batch_idx]
branch2b_norm_b_grad_paddle = dic2['batch_%.3d.branch2b_norm_b_grad'%batch_idx]
branch2c_conv_w_grad_paddle = dic2['batch_%.3d.branch2c_conv_w_grad'%batch_idx]
if not freeze_norm:
branch2c_norm_w_grad_paddle = dic2['batch_%.3d.branch2c_norm_w_grad'%batch_idx]
branch2c_norm_b_grad_paddle = dic2['batch_%.3d.branch2c_norm_b_grad'%batch_idx]
x = torch.Tensor(x)
if use_gpu:
x = x.cuda()
x.requires_grad_(True)
y = model(x)
y_pytorch = y.cpu().detach().numpy()
ddd = np.sum((y_pytorch - y_paddle) ** 2)
print('ddd=%.6f' % ddd)
loss = y.sum()
loss.backward()
# 注意,这是裁剪之前的梯度,Paddle无法获得裁剪后的梯度。
branch2a_conv_w_grad_pytorch = model.branch2a.conv.weight.grad.cpu().detach().numpy()
ddd = np.mean((branch2a_conv_w_grad_pytorch - branch2a_conv_w_grad_paddle) ** 2)
print('ddd=%.6f' % ddd)
if not freeze_norm:
branch2a_norm_w_grad_pytorch = model.branch2a.norm.weight.grad.cpu().detach().numpy()
ddd = np.mean((branch2a_norm_w_grad_pytorch - branch2a_norm_w_grad_paddle) ** 2)
print('ddd=%.6f' % ddd)
branch2a_norm_b_grad_pytorch = model.branch2a.norm.bias.grad.cpu().detach().numpy()
ddd = np.mean((branch2a_norm_b_grad_pytorch - branch2a_norm_b_grad_paddle) ** 2)
print('ddd=%.6f' % ddd)
branch2b_conv_w_grad_pytorch = model.branch2b.conv.weight.grad.cpu().detach().numpy()
ddd = np.mean((branch2b_conv_w_grad_pytorch - branch2b_conv_w_grad_paddle) ** 2)
print('ddd=%.6f' % ddd)
if not freeze_norm:
branch2b_norm_w_grad_pytorch = model.branch2b.norm.weight.grad.cpu().detach().numpy()
ddd = np.mean((branch2b_norm_w_grad_pytorch - branch2b_norm_w_grad_paddle) ** 2)
print('ddd=%.6f' % ddd)
branch2b_norm_b_grad_pytorch = model.branch2b.norm.bias.grad.cpu().detach().numpy()
ddd = np.mean((branch2b_norm_b_grad_pytorch - branch2b_norm_b_grad_paddle) ** 2)
print('ddd=%.6f' % ddd)
branch2c_conv_w_grad_pytorch = model.branch2c.conv.weight.grad.cpu().detach().numpy()
ddd = np.mean((branch2c_conv_w_grad_pytorch - branch2c_conv_w_grad_paddle) ** 2)
print('ddd=%.6f' % ddd)
if not freeze_norm:
branch2c_norm_w_grad_pytorch = model.branch2c.norm.weight.grad.cpu().detach().numpy()
ddd = np.mean((branch2c_norm_w_grad_pytorch - branch2c_norm_w_grad_paddle) ** 2)
print('ddd=%.6f' % ddd)
branch2c_norm_b_grad_pytorch = model.branch2c.norm.bias.grad.cpu().detach().numpy()
ddd = np.mean((branch2c_norm_b_grad_pytorch - branch2c_norm_b_grad_paddle) ** 2)
print('ddd=%.6f' % ddd)
# 梯度裁剪
if need_clip:
for param_group in optimizer.param_groups:
if param_group['need_clip']:
torch.nn.utils.clip_grad_norm_(param_group['params'], max_norm=param_group['clip_norm'], norm_type=2)
optimizer.step()
torch.save(model.state_dict(), "53_08.pth")
print(torch.__version__)
print()
| [
"numpy.load",
"numpy.sum",
"torch.nn.utils.clip_grad_norm_",
"torch.Tensor",
"numpy.mean",
"mmdet.models.BottleNeck",
"torch.device",
"torch.optim.SGD"
] | [((3392, 3645), 'mmdet.models.BottleNeck', 'BottleNeck', ([], {'ch_in': 'ch_in', 'ch_out': 'ch_out', 'stride': 'stride', 'shortcut': 'shortcut', 'variant': 'variant', 'groups': 'groups', 'base_width': 'base_width', 'lr': 'lr', 'norm_type': 'norm_type', 'norm_decay': 'norm_decay', 'freeze_norm': 'freeze_norm', 'dcn_v2': 'dcn_v2', 'std_senet': 'std_senet'}), '(ch_in=ch_in, ch_out=ch_out, stride=stride, shortcut=shortcut,\n variant=variant, groups=groups, base_width=base_width, lr=lr, norm_type\n =norm_type, norm_decay=norm_decay, freeze_norm=freeze_norm, dcn_v2=\n dcn_v2, std_senet=std_senet)\n', (3402, 3645), False, 'from mmdet.models import BottleNeck\n'), ((4353, 4440), 'torch.optim.SGD', 'torch.optim.SGD', (['param_groups'], {'lr': 'base_lr', 'momentum': 'momentum', 'weight_decay': 'base_wd'}), '(param_groups, lr=base_lr, momentum=momentum, weight_decay=\n base_wd)\n', (4368, 4440), False, 'import torch\n'), ((4593, 4610), 'numpy.load', 'np.load', (['"""53.npz"""'], {}), "('53.npz')\n", (4600, 4610), True, 'import numpy as np\n'), ((5737, 5752), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (5749, 5752), False, 'import torch\n'), ((5887, 5922), 'numpy.sum', 'np.sum', (['((y_pytorch - y_paddle) ** 2)'], {}), '((y_pytorch - y_paddle) ** 2)\n', (5893, 5922), True, 'import numpy as np\n'), ((6129, 6203), 'numpy.mean', 'np.mean', (['((branch2a_conv_w_grad_pytorch - branch2a_conv_w_grad_paddle) ** 2)'], {}), '((branch2a_conv_w_grad_pytorch - branch2a_conv_w_grad_paddle) ** 2)\n', (6136, 6203), True, 'import numpy as np\n'), ((6789, 6863), 'numpy.mean', 'np.mean', (['((branch2b_conv_w_grad_pytorch - branch2b_conv_w_grad_paddle) ** 2)'], {}), '((branch2b_conv_w_grad_pytorch - branch2b_conv_w_grad_paddle) ** 2)\n', (6796, 6863), True, 'import numpy as np\n'), ((7449, 7523), 'numpy.mean', 'np.mean', (['((branch2c_conv_w_grad_pytorch - branch2c_conv_w_grad_paddle) ** 2)'], {}), '((branch2c_conv_w_grad_pytorch - branch2c_conv_w_grad_paddle) ** 2)\n', (7456, 7523), True, 'import numpy as np\n'), ((6365, 6439), 'numpy.mean', 'np.mean', (['((branch2a_norm_w_grad_pytorch - branch2a_norm_w_grad_paddle) ** 2)'], {}), '((branch2a_norm_w_grad_pytorch - branch2a_norm_w_grad_paddle) ** 2)\n', (6372, 6439), True, 'import numpy as np\n'), ((6579, 6653), 'numpy.mean', 'np.mean', (['((branch2a_norm_b_grad_pytorch - branch2a_norm_b_grad_paddle) ** 2)'], {}), '((branch2a_norm_b_grad_pytorch - branch2a_norm_b_grad_paddle) ** 2)\n', (6586, 6653), True, 'import numpy as np\n'), ((7025, 7099), 'numpy.mean', 'np.mean', (['((branch2b_norm_w_grad_pytorch - branch2b_norm_w_grad_paddle) ** 2)'], {}), '((branch2b_norm_w_grad_pytorch - branch2b_norm_w_grad_paddle) ** 2)\n', (7032, 7099), True, 'import numpy as np\n'), ((7239, 7313), 'numpy.mean', 'np.mean', (['((branch2b_norm_b_grad_pytorch - branch2b_norm_b_grad_paddle) ** 2)'], {}), '((branch2b_norm_b_grad_pytorch - branch2b_norm_b_grad_paddle) ** 2)\n', (7246, 7313), True, 'import numpy as np\n'), ((7685, 7759), 'numpy.mean', 'np.mean', (['((branch2c_norm_w_grad_pytorch - branch2c_norm_w_grad_paddle) ** 2)'], {}), '((branch2c_norm_w_grad_pytorch - branch2c_norm_w_grad_paddle) ** 2)\n', (7692, 7759), True, 'import numpy as np\n'), ((7899, 7973), 'numpy.mean', 'np.mean', (['((branch2c_norm_b_grad_pytorch - branch2c_norm_b_grad_paddle) ** 2)'], {}), '((branch2c_norm_b_grad_pytorch - branch2c_norm_b_grad_paddle) ** 2)\n', (7906, 7973), True, 'import numpy as np\n'), ((4495, 4514), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4507, 4514), False, 'import torch\n'), ((8144, 8250), 'torch.nn.utils.clip_grad_norm_', 'torch.nn.utils.clip_grad_norm_', (["param_group['params']"], {'max_norm': "param_group['clip_norm']", 'norm_type': '(2)'}), "(param_group['params'], max_norm=param_group[\n 'clip_norm'], norm_type=2)\n", (8174, 8250), False, 'import torch\n')] |
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
def SS_Distributions(xds_ss,xds_kma):
clusters=np.where(np.unique(xds_kma.bmus)>=0)[0]
n_clusters=len(clusters)
n_rows=int(np.sqrt(n_clusters+1))
n_cols=n_rows
fig = plt.figure(figsize=[20,12])
gs = gridspec.GridSpec(n_rows, n_cols, wspace=0.0, hspace=0.0)
grid_row = 0
grid_col = 0
for ic in clusters:
# data mean
pos_cluster = np.where(xds_kma.bmus==ic)[0][:]
ax = plt.subplot(gs[grid_row, grid_col])
plt.hist(xds_ss.ss[pos_cluster],range=[-0.30, 0.30],bins=40,color='indigo',histtype='stepfilled',density=True, alpha=0.5)
ax.set_xticks([])
ax.set_yticks([])
if grid_row == n_rows-1:
ax.set_yticks([])
ax.set_xticks(np.arange(-0.30,0.31, step=0.1))
ax.set_xlabel('SS',fontsize=14)
else:
ax.set_xticks([])
ax.set_yticks([])
grid_row += 1
if grid_row >= n_rows:
grid_row = 0
grid_col += 1
fig = plt.figure(figsize=[20,12])
gs = gridspec.GridSpec(n_rows, n_cols, wspace=0.0, hspace=0.0)
grid_row = 0
grid_col = 0
for ic in clusters:
# data mean
pos_cluster = np.where(xds_kma.bmus==ic)[0][:]
ax = plt.subplot(gs[grid_row, grid_col])
plt.hist(xds_ss.Dwind[pos_cluster],range=[0,360],bins=50,color='darkorange',histtype='stepfilled', alpha=0.5)
ax.set_xticks([])
ax.set_yticks([])
if grid_row == n_rows-1:
ax.set_yticks([])
ax.set_xticks(np.arange(0,361, step=90))
ax.set_xlabel('WDir',fontsize=14)
else:
ax.set_xticks([])
ax.set_yticks([])
grid_row += 1
if grid_row >= n_rows:
grid_row = 0
grid_col += 1
fig = plt.figure(figsize=[20,12])
gs = gridspec.GridSpec(n_rows, n_cols, wspace=0.0, hspace=0.0)
grid_row = 0
grid_col = 0
for ic in clusters:
# data mean
pos_cluster = np.where(xds_kma.bmus==ic)[0][:]
ax = plt.subplot(gs[grid_row, grid_col])
plt.hist(xds_ss.wind[pos_cluster],range=[2,20],bins=50,color='peru',histtype='stepfilled', alpha=0.5)
if grid_row == n_rows-1:
ax.set_yticks([])
ax.set_xticks(np.arange(5,21, step=5))
ax.set_xlabel('WSpeed',fontsize=14)
else:
ax.set_xticks([])
ax.set_yticks([])
grid_row += 1
if grid_row >= n_rows:
grid_row = 0
grid_col += 1 | [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.arange",
"matplotlib.gridspec.GridSpec",
"numpy.unique",
"numpy.sqrt"
] | [((287, 315), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[20, 12]'}), '(figsize=[20, 12])\n', (297, 315), True, 'import matplotlib.pyplot as plt\n'), ((324, 381), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['n_rows', 'n_cols'], {'wspace': '(0.0)', 'hspace': '(0.0)'}), '(n_rows, n_cols, wspace=0.0, hspace=0.0)\n', (341, 381), True, 'import matplotlib.gridspec as gridspec\n'), ((1116, 1144), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[20, 12]'}), '(figsize=[20, 12])\n', (1126, 1144), True, 'import matplotlib.pyplot as plt\n'), ((1153, 1210), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['n_rows', 'n_cols'], {'wspace': '(0.0)', 'hspace': '(0.0)'}), '(n_rows, n_cols, wspace=0.0, hspace=0.0)\n', (1170, 1210), True, 'import matplotlib.gridspec as gridspec\n'), ((1920, 1948), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[20, 12]'}), '(figsize=[20, 12])\n', (1930, 1948), True, 'import matplotlib.pyplot as plt\n'), ((1957, 2014), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['n_rows', 'n_cols'], {'wspace': '(0.0)', 'hspace': '(0.0)'}), '(n_rows, n_cols, wspace=0.0, hspace=0.0)\n', (1974, 2014), True, 'import matplotlib.gridspec as gridspec\n'), ((231, 254), 'numpy.sqrt', 'np.sqrt', (['(n_clusters + 1)'], {}), '(n_clusters + 1)\n', (238, 254), True, 'import numpy as np\n'), ((528, 563), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[grid_row, grid_col]'], {}), '(gs[grid_row, grid_col])\n', (539, 563), True, 'import matplotlib.pyplot as plt\n'), ((572, 700), 'matplotlib.pyplot.hist', 'plt.hist', (['xds_ss.ss[pos_cluster]'], {'range': '[-0.3, 0.3]', 'bins': '(40)', 'color': '"""indigo"""', 'histtype': '"""stepfilled"""', 'density': '(True)', 'alpha': '(0.5)'}), "(xds_ss.ss[pos_cluster], range=[-0.3, 0.3], bins=40, color='indigo',\n histtype='stepfilled', density=True, alpha=0.5)\n", (580, 700), True, 'import matplotlib.pyplot as plt\n'), ((1357, 1392), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[grid_row, grid_col]'], {}), '(gs[grid_row, grid_col])\n', (1368, 1392), True, 'import matplotlib.pyplot as plt\n'), ((1401, 1520), 'matplotlib.pyplot.hist', 'plt.hist', (['xds_ss.Dwind[pos_cluster]'], {'range': '[0, 360]', 'bins': '(50)', 'color': '"""darkorange"""', 'histtype': '"""stepfilled"""', 'alpha': '(0.5)'}), "(xds_ss.Dwind[pos_cluster], range=[0, 360], bins=50, color=\n 'darkorange', histtype='stepfilled', alpha=0.5)\n", (1409, 1520), True, 'import matplotlib.pyplot as plt\n'), ((2161, 2196), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[grid_row, grid_col]'], {}), '(gs[grid_row, grid_col])\n', (2172, 2196), True, 'import matplotlib.pyplot as plt\n'), ((2205, 2315), 'matplotlib.pyplot.hist', 'plt.hist', (['xds_ss.wind[pos_cluster]'], {'range': '[2, 20]', 'bins': '(50)', 'color': '"""peru"""', 'histtype': '"""stepfilled"""', 'alpha': '(0.5)'}), "(xds_ss.wind[pos_cluster], range=[2, 20], bins=50, color='peru',\n histtype='stepfilled', alpha=0.5)\n", (2213, 2315), True, 'import matplotlib.pyplot as plt\n'), ((156, 179), 'numpy.unique', 'np.unique', (['xds_kma.bmus'], {}), '(xds_kma.bmus)\n', (165, 179), True, 'import numpy as np\n'), ((482, 510), 'numpy.where', 'np.where', (['(xds_kma.bmus == ic)'], {}), '(xds_kma.bmus == ic)\n', (490, 510), True, 'import numpy as np\n'), ((836, 867), 'numpy.arange', 'np.arange', (['(-0.3)', '(0.31)'], {'step': '(0.1)'}), '(-0.3, 0.31, step=0.1)\n', (845, 867), True, 'import numpy as np\n'), ((1311, 1339), 'numpy.where', 'np.where', (['(xds_kma.bmus == ic)'], {}), '(xds_kma.bmus == ic)\n', (1319, 1339), True, 'import numpy as np\n'), ((1653, 1679), 'numpy.arange', 'np.arange', (['(0)', '(361)'], {'step': '(90)'}), '(0, 361, step=90)\n', (1662, 1679), True, 'import numpy as np\n'), ((2115, 2143), 'numpy.where', 'np.where', (['(xds_kma.bmus == ic)'], {}), '(xds_kma.bmus == ic)\n', (2123, 2143), True, 'import numpy as np\n'), ((2396, 2420), 'numpy.arange', 'np.arange', (['(5)', '(21)'], {'step': '(5)'}), '(5, 21, step=5)\n', (2405, 2420), True, 'import numpy as np\n')] |
import numpy as np
from .Spectrogram import Spectrogram
def Spectrogram3D(t,vx,vy,vz,wind,slip,**kwargs):
CombineComps = kwargs('CombineComps',False)
#Calculate the three sets of spectra
Nw,F,xt = Spectrogram(t,vx,wind,slip,**kwargs)
Nw,F,yt = Spectrogram(t,vy,wind,slip,**kwargs)
Nw,F,zt = Spectrogram(t,vz,wind,slip,**kwargs)
Nf = F.size
#need to calculate k vector
Jxy = xt.Comp.imag*yt.Comp.real - yt.Comp.imag*xt.Comp.real
Jxz = xt.Comp.imag*zt.Comp.real - zt.Comp.imag*xt.Comp.real
Jyz = yt.Comp.imag*zt.Comp.real - zt.Comp.imag*yt.Comp.real
A = np.sqrt(Jxy**2 + Jxz**2 + Jyz**2)
kx = Jyz/A
ky =-Jxz/A
kz = Jxy/A
#create an output recarray
dtype = [('Tspec','float64'),('xSize','int32'),('ySize','int32'),('zSize','int32'),
('xGood','int32'),('yGood','int32'),('zGood','int32'),
('xVar','float32'),('yVar','float32'),('zVar','float32'),
('xPow','float32',(Nf,)),('yPow','float32',(Nf,)),('zPow','float32',(Nf,)),
('xPha','float32',(Nf,)),('yPha','float32',(Nf,)),('zPha','float32',(Nf,)),
('xAmp','float32',(Nf,)),('yAmp','float32',(Nf,)),('zAmp','float32',(Nf,)),
('xComp','complex64',(Nf,)),('yComp','complex64',(Nf,)),('zComp','complex64',(Nf,)),
('kx','float32',(Nf,)),('ky','float32',(Nf,)),('kz','float32',(Nf,))]
#combine some components
if CombineComps:
dtypec = [ ('xyComp','complex64',(Nf,)),('yzComp','complex64',(Nf,)),('zxComp','complex64',(Nf,)),
('xyPow','float32',(Nf,)),('yzPow','float32',(Nf,)),('zxPow','float32',(Nf,)),
('xyPha','float32',(Nf,)),('yzPha','float32',(Nf,)),('zxPha','float32',(Nf,)),
('xyAmp','float32',(Nf,)),('yzAmp','float32',(Nf,)),('zxAmp','float32',(Nf,)),]
for dc in dtypec:
dtype.append(dc)
out = np.recarray(Nw,dtype=dtype)
#now fill it up
names = xt.dtype.names
for n in names:
if not n == 'Tspec':
out['x'+n] = xt[n]
out['y'+n] = yt[n]
out['z'+n] = zt[n]
out.Tspec = xt.Tspec
out.kx = kx
out.ky = ky
out.kz = kz
if CombineComps:
cc = ['xy','yz','zx']
for c in cc:
c0 = c[0]
c1 = c[1]
Comp = out[c0+'Comp'] * np.conjugate(out[c1+'Comp'])
out[c+'Comp'] = Comp
out[c+'Amp'] = np.abs(Comp)
out[c+'Pow'] = out[c+'Amp']**2
out[c+'Pha'] = np.arctan2(Comp.imag,Comp.real)
#clean up previous arrays
del xt
del yt
del zt
return Nw,F,out
| [
"numpy.abs",
"numpy.arctan2",
"numpy.recarray",
"numpy.conjugate",
"numpy.sqrt"
] | [((568, 607), 'numpy.sqrt', 'np.sqrt', (['(Jxy ** 2 + Jxz ** 2 + Jyz ** 2)'], {}), '(Jxy ** 2 + Jxz ** 2 + Jyz ** 2)\n', (575, 607), True, 'import numpy as np\n'), ((1726, 1754), 'numpy.recarray', 'np.recarray', (['Nw'], {'dtype': 'dtype'}), '(Nw, dtype=dtype)\n', (1737, 1754), True, 'import numpy as np\n'), ((2148, 2160), 'numpy.abs', 'np.abs', (['Comp'], {}), '(Comp)\n', (2154, 2160), True, 'import numpy as np\n'), ((2213, 2245), 'numpy.arctan2', 'np.arctan2', (['Comp.imag', 'Comp.real'], {}), '(Comp.imag, Comp.real)\n', (2223, 2245), True, 'import numpy as np\n'), ((2077, 2107), 'numpy.conjugate', 'np.conjugate', (["out[c1 + 'Comp']"], {}), "(out[c1 + 'Comp'])\n", (2089, 2107), True, 'import numpy as np\n')] |
"""
The experiment with user cluster - booking cluster IBCF.
"""
import argparse
import logging
import pickle
import sys
from collections import Counter
import numpy as np
import pandas as pd
from sklearn.preprocessing import binarize
from ibcf.matrix_functions import get_sparse_matrix_info
from ibcf.recs import get_topk_recs
from ibcf.similarity import get_similarity_matrix
from misc.common import get_ug_data, get_bg_data
from model.build_recs_matrix import get_matrix
def hit_ratio(recs_m, testing_df, uid_to_ug, bg_iids):
hit = 0
logging.info("# of testing instances: %s", testing_df.shape[0])
for t in testing_df.itertuples():
row_id = uid_to_ug.get(t.code)
if row_id is not None:
rec_row = recs_m[row_id]
for arg_id in np.argsort(rec_row.data)[::-1]:
bg_id = rec_row.indices[arg_id]
if t.propcode in bg_iids[bg_id]:
hit += 1
break
return hit / testing_df.shape[0]
def store_data_for_eval(recs_m, testing_df, uid_to_ug, bg_iids):
ui_bg_recs = {}
ui_iids_cnt = Counter()
logging.info("Finding the number of items that should be processed before the test item will be find")
for t in testing_df.itertuples():
key = (t.code, t.propcode)
row_id = uid_to_ug.get(t.code)
if row_id is not None:
rec_row = recs_m[row_id]
bg_recs = []
processed_items = set()
for arg_id in np.argsort(rec_row.data)[::-1]:
bg_id = rec_row.indices[arg_id]
bg_recs.append(bg_id)
if t.propcode in bg_iids[bg_id]:
break
processed_items.update(bg_iids[bg_id])
ui_iids_cnt[key] = len(processed_items) + 1 # +1 is the break position
ui_bg_recs[key] = bg_recs
logging.info("Storing the found top-k numbers to: %s", args.top_k_iid_per_uid)
with open(args.top_k_iid_per_uid, "wb") as f:
pickle.dump(ui_iids_cnt, f)
logging.info("Storing users' bg recommendations without top-k to: %s", args.ui_bg_recs_path)
with open(args.ui_bg_recs_path, "wb") as f:
pickle.dump(ui_bg_recs, f)
def main():
logging.info(u"Getting clusters data")
uid_to_ug = get_ug_data(args.user_cluster)
bid_to_bg, bg_iids = get_bg_data(args.booking_cluster)
logging.info("Reading training data")
training_df = pd.read_csv(args.training_csv)
tr_m = get_matrix(training_df, uid_to_ug, bid_to_bg)
logging.info(u"Training matrix: %s", get_sparse_matrix_info(tr_m))
logging.info("Reading testing data")
# we don't care about repetitive actions in the testing
testing_df = pd.read_csv(args.testing_csv)[["code", "propcode"]].drop_duplicates()
logging.info("Preparing similarity matrix")
sim_m = get_similarity_matrix(tr_m)
logging.info("Testing hit ratio at top-%s", args.top_k)
recs_m = get_topk_recs(
tr_m,
sim_m,
binarize(tr_m),
args.top_k
)
logging.info(u"Hit ratio: %.3f", hit_ratio(recs_m, testing_df, uid_to_ug, bg_iids))
if args.top_k_iid_per_uid:
recs_m = get_topk_recs(
tr_m,
sim_m,
binarize(tr_m)
)
store_data_for_eval(recs_m, testing_df, uid_to_ug, bg_iids)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-u", required=True, dest="user_cluster",
help=u"Path to the file containing information about user clusters")
parser.add_argument("-b", required=True, dest="booking_cluster",
help=u"Path to the file containing information about booking clusters")
parser.add_argument("-k", default=3, type=int, dest="top_k",
help="Number of recommended booking clusters per a user. Default: 3")
parser.add_argument("--trf", default='training.csv', dest="training_csv",
help=u"Training data file name. Default: training.csv")
parser.add_argument("--tsf", default='testing.csv', dest="testing_csv",
help=u"Testing data file name. Default: testing.csv")
parser.add_argument("--ek", dest="top_k_iid_per_uid",
help="Path to the *.pkl where to save the value of top-k items per each user. "
"If specified, then the resulting recommendation per each user are stored to --er")
parser.add_argument("--er", default="ui_bg_recs.pkl", dest="ui_bg_recs_path",
help="Path to the file to store users recommendations for evaluation. Check --ek. "
"Default: ui_bg_recs.pkl")
parser.add_argument("--log-level", default='INFO', dest="log_level",
choices=['DEBUG', 'INFO', 'WARNINGS', 'ERROR'], help=u"Logging level")
args = parser.parse_args()
logging.basicConfig(
format='%(asctime)s %(levelname)s:%(message)s', stream=sys.stdout, level=getattr(logging, args.log_level)
)
main()
| [
"pickle.dump",
"ibcf.matrix_functions.get_sparse_matrix_info",
"sklearn.preprocessing.binarize",
"argparse.ArgumentParser",
"misc.common.get_bg_data",
"pandas.read_csv",
"numpy.argsort",
"logging.info",
"ibcf.similarity.get_similarity_matrix",
"misc.common.get_ug_data",
"collections.Counter",
... | [((551, 614), 'logging.info', 'logging.info', (['"""# of testing instances: %s"""', 'testing_df.shape[0]'], {}), "('# of testing instances: %s', testing_df.shape[0])\n", (563, 614), False, 'import logging\n'), ((1115, 1124), 'collections.Counter', 'Counter', ([], {}), '()\n', (1122, 1124), False, 'from collections import Counter\n'), ((1130, 1242), 'logging.info', 'logging.info', (['"""Finding the number of items that should be processed before the test item will be find"""'], {}), "(\n 'Finding the number of items that should be processed before the test item will be find'\n )\n", (1142, 1242), False, 'import logging\n'), ((1881, 1959), 'logging.info', 'logging.info', (['"""Storing the found top-k numbers to: %s"""', 'args.top_k_iid_per_uid'], {}), "('Storing the found top-k numbers to: %s', args.top_k_iid_per_uid)\n", (1893, 1959), False, 'import logging\n'), ((2051, 2148), 'logging.info', 'logging.info', (['"""Storing users\' bg recommendations without top-k to: %s"""', 'args.ui_bg_recs_path'], {}), '("Storing users\' bg recommendations without top-k to: %s", args\n .ui_bg_recs_path)\n', (2063, 2148), False, 'import logging\n'), ((2245, 2283), 'logging.info', 'logging.info', (['u"""Getting clusters data"""'], {}), "(u'Getting clusters data')\n", (2257, 2283), False, 'import logging\n'), ((2300, 2330), 'misc.common.get_ug_data', 'get_ug_data', (['args.user_cluster'], {}), '(args.user_cluster)\n', (2311, 2330), False, 'from misc.common import get_ug_data, get_bg_data\n'), ((2356, 2389), 'misc.common.get_bg_data', 'get_bg_data', (['args.booking_cluster'], {}), '(args.booking_cluster)\n', (2367, 2389), False, 'from misc.common import get_ug_data, get_bg_data\n'), ((2395, 2432), 'logging.info', 'logging.info', (['"""Reading training data"""'], {}), "('Reading training data')\n", (2407, 2432), False, 'import logging\n'), ((2451, 2481), 'pandas.read_csv', 'pd.read_csv', (['args.training_csv'], {}), '(args.training_csv)\n', (2462, 2481), True, 'import pandas as pd\n'), ((2493, 2538), 'model.build_recs_matrix.get_matrix', 'get_matrix', (['training_df', 'uid_to_ug', 'bid_to_bg'], {}), '(training_df, uid_to_ug, bid_to_bg)\n', (2503, 2538), False, 'from model.build_recs_matrix import get_matrix\n'), ((2615, 2651), 'logging.info', 'logging.info', (['"""Reading testing data"""'], {}), "('Reading testing data')\n", (2627, 2651), False, 'import logging\n'), ((2804, 2847), 'logging.info', 'logging.info', (['"""Preparing similarity matrix"""'], {}), "('Preparing similarity matrix')\n", (2816, 2847), False, 'import logging\n'), ((2860, 2887), 'ibcf.similarity.get_similarity_matrix', 'get_similarity_matrix', (['tr_m'], {}), '(tr_m)\n', (2881, 2887), False, 'from ibcf.similarity import get_similarity_matrix\n'), ((2893, 2948), 'logging.info', 'logging.info', (['"""Testing hit ratio at top-%s"""', 'args.top_k'], {}), "('Testing hit ratio at top-%s', args.top_k)\n", (2905, 2948), False, 'import logging\n'), ((3391, 3487), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawTextHelpFormatter)\n', (3414, 3487), False, 'import argparse\n'), ((2018, 2045), 'pickle.dump', 'pickle.dump', (['ui_iids_cnt', 'f'], {}), '(ui_iids_cnt, f)\n', (2029, 2045), False, 'import pickle\n'), ((2200, 2226), 'pickle.dump', 'pickle.dump', (['ui_bg_recs', 'f'], {}), '(ui_bg_recs, f)\n', (2211, 2226), False, 'import pickle\n'), ((2580, 2608), 'ibcf.matrix_functions.get_sparse_matrix_info', 'get_sparse_matrix_info', (['tr_m'], {}), '(tr_m)\n', (2602, 2608), False, 'from ibcf.matrix_functions import get_sparse_matrix_info\n'), ((3014, 3028), 'sklearn.preprocessing.binarize', 'binarize', (['tr_m'], {}), '(tr_m)\n', (3022, 3028), False, 'from sklearn.preprocessing import binarize\n'), ((3256, 3270), 'sklearn.preprocessing.binarize', 'binarize', (['tr_m'], {}), '(tr_m)\n', (3264, 3270), False, 'from sklearn.preprocessing import binarize\n'), ((788, 812), 'numpy.argsort', 'np.argsort', (['rec_row.data'], {}), '(rec_row.data)\n', (798, 812), True, 'import numpy as np\n'), ((1503, 1527), 'numpy.argsort', 'np.argsort', (['rec_row.data'], {}), '(rec_row.data)\n', (1513, 1527), True, 'import numpy as np\n'), ((2729, 2758), 'pandas.read_csv', 'pd.read_csv', (['args.testing_csv'], {}), '(args.testing_csv)\n', (2740, 2758), True, 'import pandas as pd\n')] |
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from tqdm import tqdm
from utils import Jaccard, nlp
from consts import JACCARD_SIM
class ContentBasedFiltering:
def __init__(self, meta_df):
"""
Args:
meta_df (pd.DataFrame):
"""
assert isinstance(meta_df, type(pd.DataFrame()))
self.__meta_df = meta_df.reset_index(drop=True)
self.__movieId_to_index = (
self.__meta_df["movieId"].reset_index().set_index("movieId").squeeze()
)
self.__movieId_to_title = (
self.__meta_df[["title", "movieId"]].set_index("movieId").squeeze()
)
self.__index_to_movieId = self.meta_df["movieId"]
# from uni-gram to tri-gram
self.__tfidf = TfidfVectorizer(
analyzer="word", stop_words="english", ngram_range=(1, 3)
)
self.__tfidf_matrix = None
self.__cosine_sim = None
self.__jaccard_sim = None
@property
def meta_df(self):
return self.__meta_df
@property
def movieId_to_index(self):
return self.__movieId_to_index
@property
def movieId_to_title(self):
return self.__movieId_to_title
@property
def index_to_movieId(self):
return self.__index_to_movieId
@property
def tfidf(self):
return self.__tfidf
@property
def tfidf_matrix(self):
return self.__tfidf_matrix
@property
def cosine_sim(self):
return self.__cosine_sim
@property
def jaccard_sim(self):
return self.__jaccard_sim
def compute_cosine_similarity(self):
"""
Compute cosine similarity based on movie description, including overview and tagline.
"""
if self.cosine_sim is None:
self.__meta_df["description"] = (
self.__meta_df["overview"] + " " + self.__meta_df["tagline"]
)
self.__meta_df.loc[:, "description"] = self.__meta_df.loc[
:, "description"
].apply(nlp)
# tfidf matrix where row represents each movie and column represents words
self.__tfidf_matrix = self.__tfidf.fit_transform(
self.__meta_df["description"]
)
self.__cosine_sim = linear_kernel(self.__tfidf_matrix)
def compute_jaccard_similarity(self, fname=JACCARD_SIM):
"""
Compute jaccard similarity based on movie meta data, including cast, keywords, genres, director
Args:
fname (str)
"""
if self.jaccard_sim is not None:
return None
else:
if not fname: # time-consuming
self.meta_df["items_for_jaccard"] = (
self.meta_df["cast"]
+ self.meta_df["keywords"]
+ self.meta_df["genres"]
+ self.meta_df["director"]
)
jaccard_sim = np.zeros(
(len(self.meta_df.index), len(self.meta_df.index))
)
for i1 in tqdm(range(len(self.meta_df.index))):
for i2 in range(i1 + 1, len(self.meta_df.index)):
s1 = set(self.meta_dfa_new.items_for_jaccard[i1])
s2 = set(self.meta_df.items_for_jaccard[i2])
try:
sim = Jaccard(s1, s2)
except:
print(f"Jaccard has trouble: (s1, s2) = ({s1}, {s2})")
sim = 0
jaccard_sim[i1, i2] = sim
jaccard_sim[i2, i1] = sim
self.__jaccard_sim = jaccard_sim
else:
assert isinstance(fname, str)
assert fname.endswith(".npz")
# require large memory
self.__jaccard_sim = np.load(fname)["arr_0"] # shape: (45116, 45116)
def recommend(self, movie_id, topk=40):
"""
Recommend movie based on `movie_id`
Args:
movie_id (int)
topk (int): recommend top-k similar movies
"""
assert isinstance(movie_id, int)
assert isinstance(topk, int)
assert self.cosine_sim is not None
assert self.jaccard_sim is not None
index = self.__movieId_to_index[movie_id]
sim = self.__cosine_sim[index] + self.__jaccard_sim[index]
sim = np.argsort(-sim)[1 : topk + 1] # skip the first one
sim = [str(self.__index_to_movieId[i]) for i in sim]
return sim
| [
"pandas.DataFrame",
"numpy.load",
"sklearn.metrics.pairwise.linear_kernel",
"sklearn.feature_extraction.text.TfidfVectorizer",
"utils.Jaccard",
"numpy.argsort"
] | [((859, 933), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'analyzer': '"""word"""', 'stop_words': '"""english"""', 'ngram_range': '(1, 3)'}), "(analyzer='word', stop_words='english', ngram_range=(1, 3))\n", (874, 933), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((2379, 2413), 'sklearn.metrics.pairwise.linear_kernel', 'linear_kernel', (['self.__tfidf_matrix'], {}), '(self.__tfidf_matrix)\n', (2392, 2413), False, 'from sklearn.metrics.pairwise import linear_kernel\n'), ((4546, 4562), 'numpy.argsort', 'np.argsort', (['(-sim)'], {}), '(-sim)\n', (4556, 4562), True, 'import numpy as np\n'), ((413, 427), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (425, 427), True, 'import pandas as pd\n'), ((3989, 4003), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (3996, 4003), True, 'import numpy as np\n'), ((3487, 3502), 'utils.Jaccard', 'Jaccard', (['s1', 's2'], {}), '(s1, s2)\n', (3494, 3502), False, 'from utils import Jaccard, nlp\n')] |
import math
import numpy as np
from numba import cuda,types, from_dtype
from raytracer.cudaOptions import cudaOptions
# Not privatizing the quaternion functions as cuda fails inside a class.
# http://graphics.stanford.edu/courses/cs348a-17-winter/Papers/pdf -- eq#3
# also https://github.com/Unity-Technologies/Unity.Mathematics/blob/master/src/Unity.Mathematics/quaternion.cs
#-------------------------------------------------------- CUDA DEVICE FUNCTION ------------- (not user callable) ------
@cuda.jit(device=True)
def _inner(qx,qy,qz,q2x,q2y,q2z):
return qx*q2x + qy*q2y + qz*q2z
@cuda.jit(device=True)
def _innerQ(qw,qx,qy,qz,q2w,q2x,q2y,q2z):
return qw*q2w + qx*q2x + qy*q2y + qz*q2z
@cuda.jit(device=True)
def _outer(qx,qy,qz,q2x,q2y,q2z):
return ((qy*q2z) - (qz*q2y), (qz*q2x) - (qx*q2z), (qx*q2y) - (qy*q2x))
@cuda.jit(device=True)
def outer(v1,v2):
qx,qy,qz = v1
q2x,q2y,q2z = v2
return _outer(qx,qy,qz,q2x,q2y,q2z)
@cuda.jit(device=True)
def _norm(vx,vy,vz):
return math.sqrt(_inner(vx,vy,vz,vx,vy,vz))
@cuda.jit(device=True)
def _norm2(vx,vy,vz):
return _inner(vx,vy,vz,vx,vy,vz)
@cuda.jit(device=True)
def _normQ(qw,qx,qy,qz):
return math.sqrt(_innerQ(qw,qx,qy,qz,qw,qx,qy,qz))
#-------------------------------------
@cuda.jit(device=True)
def _normalizeV(vx,vy,vz):
v = _norm(vx,vy,vz)
return vx/v,vy/v,vz/v
@cuda.jit(device=True)
def _normalizeQ(qw,qx,qy,qz):
q = _normQ(qw,qx,qy,qz)
return qw/q,qx/q,qy/q,qz/q
@cuda.jit(device=True)
def normalizeV(vector):
vx,vy,vz = vector
return _normalizeV(vx,vy,vz)
@cuda.jit(device=True)
def normalizeQ(q):
qw,qx,qy,qz = q
return _normalizeQ(qw,qx,qy,qz)
#-------------------------------------
@cuda.jit(device=True)
def _addQ(qw,qx,qy,qz,q2w,q2x,q2y,q2z):
return (qw+q2w,qx+q2x,qy+q2y,qz+q2z)
@cuda.jit(device=True)
def _multQ(qw,qx,qy,qz,q2w,q2x,q2y,q2z):
A = qw*q2w - _inner(qx,qy,qz,q2x,q2y,q2z)
cx,cy,cz = _outer(qx,qy,qz,q2x,q2y,q2z)
X = qw*q2x + q2w*qx + cx
Y = qw*q2y + q2w*qy + cy
Z = qw*q2z + q2w*qz + cz
return (A,X,Y,Z)
@cuda.jit(device=True)
def addQ(q,q2):
qw,qx,qy,qz = q
q2w,q2x,q2y,q2z = q2
return _addQ(qw,qx,qy,qz,q2w,q2x,q2y,q2z)
@cuda.jit(device=True)
def multQ(q,q2):
qw,qx,qy,qz = q
q2w,q2x,q2y,q2z = q2
return _multQ(qw,qx,qy,qz,q2w,q2x,q2y,q2z)
@cuda.jit(device=True)
def dirV(vert1,vert2):
vx1,vy1,vz1 = vert1
vx2,vy2,vz2 = vert2
return _normalizeV(vx2-vx1,vy2-vy1,vz2-vz1)
#-------------------------------------
@cuda.jit(device=True)
def conjugate(qw,qx,qy,qz):
return (qw,-qx,-qy,-qz)
@cuda.jit(device=True)
def _q(angle,vector):
vx,vy,vz = normalizeV(vector)
halfangle = angle/2
cos = math.cos(halfangle)
sin = math.sin(halfangle)
return (cos,vx*sin,vy*sin,vz*sin)
#-------------------------- Rotation Functions ---------------------\
@cuda.jit(device=True)
def _rotq(vx,vy,vz,qw,qx,qy,qz):
A = (qw*qw) - _norm2(qx,qy,qz)
B = 2*_inner(qx,qy,qz,vx,vy,vz)
cx,cy,cz = _outer(qx,qy,qz,vx,vy,vz)
C = 2*qw
X = A*vx + B*qx + C*cx
Y = A*vy + B*qy + C*cy
Z = A*vz + B*qz + C*cz
return X,Y,Z
@cuda.jit(device=True)
def _screenspaceq(vx,vy,vz,qw,qx,qy,qz):
A = (qw*qw) - _norm2(qx,qy,qz)
B = 2*_inner(qx,qy,qz,vx,vy,vz)
_,cy,cz = _outer(qx,qy,qz,vx,vy,vz)
C = 2*qw
Y = A*vy + B*qy + C*cy
Z = A*vz + B*qz + C*cz
return Y,Z
@cuda.jit(device=True)
def rotvq(vx,vy,vz,q):
#qw,qx,qy,qz = normalizeQ(q)
qw,qx,qy,qz = normalizeQ(q)
x,y,z = _rotq(vx,vy,vz,qw,qx,qy,qz)
return x,y,z
@cuda.jit(device=True)
def rotq(vertex,q):
vx,vy,vz = vertex
#qw,qx,qy,qz = normalizeQ(q)
qw,qx,qy,qz = normalizeQ(q)
x,y,z = _rotq(vx,vy,vz,qw,qx,qy,qz)
return x,y,z
@cuda.jit(device=True)
def screenspaceq(vx,vy,vz,q):
qw,qx,qy,qz = q
return _screenspaceq(vx,vy,vz,qw,qx,qy,qz)
# @cuda.jit(device=True)
# def rotate(vertex,angle,axis):
# vx,vy,vz = normalizeV(vertex)
# qw,qx,qy,qz = _q(angle,axis)
# rotq(vx,vy,vz,qw,qx,qy,qz)
@cuda.jit(device=True)
def genq(phi,alpha):
#alpha = (np.pi/2) - theta
qA = _q(phi,(0,0,1)) # Z-rotation
uvector = rotq((0,1,0),qA) # Y -> Y'
qB = _q(-alpha,uvector) # Y'-rotation
return multQ(qB,qA)
@cuda.jit(device=True)
def genq_LookRotation(direction):
# needs normalized input
dx,dy,dz = direction
phi = math.atan2(dy,dx)
dxy = math.sqrt(dx*dx + dy*dy)
alpha = math.acos(dxy) #dxy/1
return genq(phi,alpha)
#-------------------------------------------------------- CUDA KERNELS ------------- (user callable) -------------------
@cuda.jit
def rotateq(vertex,q):
x,y,z = rotq(vertex,q)
vertex[0] = x
vertex[1] = y
vertex[2] = z
@cuda.jit
def _rotateQ(verts,q):
i = cuda.grid(1) # thread number
if i<len(verts):
x,y,z = rotq(verts[i],q)
verts[i,0] = x
verts[i,1] = y
verts[i,2] = z
@cuda.jit
def generateq(q,phi,alpha):
q[0:4] = genq(phi,alpha)
@cuda.jit
def generateVQ_LOR(q,vertex1,vertex2):
direction = dirV(vertex1,vertex2)
q[0:4] = genq_LookRotation(direction)
#--------------------------------------------------------
class quaternion:
@staticmethod
def rotate(verts,phi=0.0,alpha=0.0):
workers = len(verts)
blocks = math.ceil(workers / cudaOptions.maxthreadsperblock)
#print(blocks)
q = np.zeros(shape=(4,))
generateq[1,1](q,phi,alpha)
#print(q)
_rotateQ[blocks,cudaOptions.maxthreadsperblock](verts,q)
### Some tests:
# vertex = np.array([1.0,1.0,0.0])
# q = np.array([0.966,0.0,0.259,0.0])
# rotateq[1,1](vertex,q)
# print(vertex)
# x = np.array([0.0,0.0,0.0,0.0])
# generateq[1,1](x,1.0,0.0)
# print(x) | [
"math.sqrt",
"math.atan2",
"math.ceil",
"numpy.zeros",
"math.sin",
"math.acos",
"math.cos",
"numba.cuda.jit",
"numba.cuda.grid"
] | [((500, 521), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (508, 521), False, 'from numba import cuda, types, from_dtype\n'), ((593, 614), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (601, 614), False, 'from numba import cuda, types, from_dtype\n'), ((703, 724), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (711, 724), False, 'from numba import cuda, types, from_dtype\n'), ((835, 856), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (843, 856), False, 'from numba import cuda, types, from_dtype\n'), ((955, 976), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (963, 976), False, 'from numba import cuda, types, from_dtype\n'), ((1047, 1068), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (1055, 1068), False, 'from numba import cuda, types, from_dtype\n'), ((1129, 1150), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (1137, 1150), False, 'from numba import cuda, types, from_dtype\n'), ((1271, 1292), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (1279, 1292), False, 'from numba import cuda, types, from_dtype\n'), ((1371, 1392), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (1379, 1392), False, 'from numba import cuda, types, from_dtype\n'), ((1483, 1504), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (1491, 1504), False, 'from numba import cuda, types, from_dtype\n'), ((1585, 1606), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (1593, 1606), False, 'from numba import cuda, types, from_dtype\n'), ((1722, 1743), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (1730, 1743), False, 'from numba import cuda, types, from_dtype\n'), ((1826, 1847), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (1834, 1847), False, 'from numba import cuda, types, from_dtype\n'), ((2088, 2109), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (2096, 2109), False, 'from numba import cuda, types, from_dtype\n'), ((2218, 2239), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (2226, 2239), False, 'from numba import cuda, types, from_dtype\n'), ((2350, 2371), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (2358, 2371), False, 'from numba import cuda, types, from_dtype\n'), ((2531, 2552), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (2539, 2552), False, 'from numba import cuda, types, from_dtype\n'), ((2610, 2631), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (2618, 2631), False, 'from numba import cuda, types, from_dtype\n'), ((2885, 2906), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (2893, 2906), False, 'from numba import cuda, types, from_dtype\n'), ((3169, 3190), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (3177, 3190), False, 'from numba import cuda, types, from_dtype\n'), ((3427, 3448), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (3435, 3448), False, 'from numba import cuda, types, from_dtype\n'), ((3596, 3617), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (3604, 3617), False, 'from numba import cuda, types, from_dtype\n'), ((3784, 3805), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (3792, 3805), False, 'from numba import cuda, types, from_dtype\n'), ((4068, 4089), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (4076, 4089), False, 'from numba import cuda, types, from_dtype\n'), ((4289, 4310), 'numba.cuda.jit', 'cuda.jit', ([], {'device': '(True)'}), '(device=True)\n', (4297, 4310), False, 'from numba import cuda, types, from_dtype\n'), ((2722, 2741), 'math.cos', 'math.cos', (['halfangle'], {}), '(halfangle)\n', (2730, 2741), False, 'import math\n'), ((2752, 2771), 'math.sin', 'math.sin', (['halfangle'], {}), '(halfangle)\n', (2760, 2771), False, 'import math\n'), ((4409, 4427), 'math.atan2', 'math.atan2', (['dy', 'dx'], {}), '(dy, dx)\n', (4419, 4427), False, 'import math\n'), ((4437, 4465), 'math.sqrt', 'math.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (4446, 4465), False, 'import math\n'), ((4474, 4488), 'math.acos', 'math.acos', (['dxy'], {}), '(dxy)\n', (4483, 4488), False, 'import math\n'), ((4801, 4813), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (4810, 4813), False, 'from numba import cuda, types, from_dtype\n'), ((5345, 5396), 'math.ceil', 'math.ceil', (['(workers / cudaOptions.maxthreadsperblock)'], {}), '(workers / cudaOptions.maxthreadsperblock)\n', (5354, 5396), False, 'import math\n'), ((5432, 5452), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4,)'}), '(shape=(4,))\n', (5440, 5452), True, 'import numpy as np\n')] |
### gcode_reader in code folder
### instructions in SETUP.txt
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##################################
# University of Wisconsin-Madison
# Author: <NAME>
##################################
"""
Gcode reader for both FDM (regular and Stratasys) and LPBF.
It supports the following functionalities
1. plot a layer in 2D, plot layers in 3D
2. list important information of path
3. animate the printing of a layer in 2D, animate the printing of layers in 3D
4. mesh the path, plot mesh, list important informations about the mesh
## below two features are under construction
5. compute closest left element and right element
6. shrink and convert FDM process plan to PBF S-Code
"""
# standard library
import argparse
import collections
from enum import Enum
import math
import os.path
import pprint
import statistics
import sys
import PRNTR
import Image_maker
# third party library
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import seaborn as sns
path1 = PRNTR.location
imagename2 = Image_maker.namer
def GCR():
# sns.set() # use seaborn style
# maximum element length in meshing
MAX_ELEMENT_LENGTH = 1 # FDM regular
# MAX_ELEMENT_LENGTH = 5 # FDM Stratasys
# MAX_ELEMENT_LENGTH = 10 # four-spirals scode
# MAX_ELEMENT_LENGTH = 50e-6 # LPBF
# MAX_ELEMENT_LENGTH = 100e-6 # LPBF (for plot mesh example)
# set true to keep support path
PLOT_SUPPORT = True
# set true to use one color for plot
# set false to use random color for plot
SINGLE_COLOR = False
# set true to plot scans with positive power in different color
# this is for powder bed fusion
PLOT_POWER = True
POWER_ZERO = 1
IGNORE_ZERO_POWER = True
# Element namedtuple
Element = collections.namedtuple('Element', ['x0', 'y0', 'x1', 'y1', 'z'])
# set true to add axis-label and title
FIG_INFO = False
# MARGIN RATIO
MARGIN_RATIO = 0.2
# zero tolerance for is_left check
ZERO_TOLERANCE = 1e-12
# global variables
pp = pprint.PrettyPrinter(indent=4)
### under construction
# plot polygon
HALF_WIDTH = 0.6 # FDM regular
# HALF_WIDTH = 1.5 # FDM stratasys
# HALF_WIDTH = 50e-6
## This is for research...
# FDM regular: current 0.5 mm = 500 mu, target 50 mu
# FDM stratasys: current 1.4 mm = 1400 mu, target 50 mu
# HORIZONTAL_SHRINK_RATIO = 0.0001 # tweety and octo
# HORIZONTAL_SHRINK_RATIO = (1 / 1000) * (1 / (1400 / 50)) # mobius arm
# HORIZONTAL_SHRINK_RATIO = (1 / 1000) * (1 / (1500 / 50)) # bunny
# HORIZONTAL_SHRINK_RATIO = (1 / 1000) * (1 / (600 / 25)) # bunny
HORIZONTAL_SHRINK_RATIO = (1 / 1000) * (1 / (600 / 25)) # wrench
DELTA_Z = 2e-5
LASER_POWER = 195
LASER_SPEED = 0.8
TRAVEL_SPEED = 0.8
def axisEqual3D(ax):
"""set 3d axis equal."""
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:,1] - extents[:,0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/4
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
def save_figure(fig, filename, dpi):
"""
#save figure to a file
def Args():
fig: figure
filename: outfilename
#dpi: dpi of the figure
"""
_, ext = filename.rsplit('.', 1)
fig.savefig(filename, format=ext, dpi=dpi, bbox_inches='tight')
print('saving to {:s} with {:d} DPI'.format(filename, dpi))
def create_axis(figsize=(8, 8), projection='2d'):
"""
create axis based on figure size and projection
returns fig, ax
Args:
figsize: size of the figure
projection: dimension of figure
Returns:
fig, ax
"""
projection = projection.lower()
if projection not in ['2d', '3d']:
raise ValueError
if projection == '2d':
fig, ax = plt.subplots(figsize=figsize)
else: # '3d'
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
return fig, ax
def create_movie_writer(title='Movie Writer', fps=15):
"""
create ffmpeg writer
Args:
title: title of the movie writer
fps: frames per second
Returns:
movie writer
"""
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title=title, artist='Matplotlib',
comment='Movie Support')
writer = FFMpegWriter(fps=15, metadata=metadata)
return writer
def add_margin_to_axis_limits(min_v, max_v, margin_ratio=MARGIN_RATIO):
"""
compute new min_v and max_v based on margin
Args:
min_v: minimum value
max_v: maximum value
margin_ratio:
Returns:
new_min_v, new_max_v
"""
dv = (max_v - min_v) * margin_ratio
return (min_v - dv, max_v + dv)
class LayerError(Exception):
""" layer number error """
pass
class GcodeType(Enum):
""" enum of GcodeType """
FDM_REGULAR = 1
FDM_STRATASYS = 2
LPBF_REGULAR = 3
LPBF_SCODE = 4
@classmethod
def has_value(cls, value):
return any(value == item.value for item in cls)
class GcodeReader:
""" Gcode reader class """
def __init__(self, filename, filetype=GcodeType.FDM_REGULAR):
if not os.path.exists(filename):
print("{} does not exist!".format(filename))
sys.exit(1)
self.filename = filename
self.filetype = filetype
# print(self.filetype)
self.n_segs = 0 # number of line segments
self.segs = None # list of line segments [(x0, y0, x1, y1, z)]
self.n_layers = 0 # number of layers
# seg_index_bars and subpath_index_bars have the same format
# e.g. ith layer has segment indexes [seg_index_bars[i-1],
# seg_index_bars[i])
self.seg_index_bars = []
self.subpath_index_bars = []
self.summary = None
self.lengths = None
self.subpaths = None
self.xyzlimits = None
self.elements = None
self.elements_index_bars = []
# read file to populate variables
self._read()
def mesh(self, max_length):
""" mesh segments according to max_length """
self.elements = []
self.elements_index_bars = []
bar = 0
n_eles = 0
if not hasattr(self, 'powers'):
self.powers = [POWER_ZERO + 10] * len(self.segs)
for i, (x0, y0, x1, y1, z) in enumerate(self.segs):
if i == self.seg_index_bars[bar]:
bar += 1
self.elements_index_bars.append(n_eles)
power = self.powers[i]
if power < POWER_ZERO:
continue
length = np.hypot(x0 - x1, y0 - y1)
n_slices = int(np.ceil(length / max_length))
n_eles += n_slices
dx = (x1 - x0) / n_slices
dy = (y1 - y0) / n_slices
for _ in range(n_slices - 1):
# self.elements.append((x0, y0, x0 + dx, y0 + dy, z))
self.elements.append(Element(x0, y0, x0 + dx, y0 + dy, z))
x0, y0 = x0 + dx, y0 + dy
# self.elements.append((x0, y0, x1, y1, z))
self.elements.append(Element(x0, y0, x1, y1, z))
self.elements_index_bars.append(n_eles)
# print(self.elements_index_bars)
print("Meshing finished, {:d} elements generated".
format(len(self.elements)))
def plot_mesh_layer(self, layernum, ax=None):
""" plot mesh in one layer """
if not self.elements:
self.mesh(max_length=MAX_ELEMENT_LENGTH)
fig, ax = self.plot_layer(layer=layernum)
# if not ax:
# fig, ax = create_axis(projection='2d')
left, right = self.elements_index_bars[layernum - 1:layernum + 1]
print(left, right)
for x0, y0, x1, y1, _ in self.elements[left:right]:
# ax.plot([x0, x1], [y0, y1], 'b-')
# ax.scatter(0.5 * (x0 + x1), 0.5 * (y0 + y1), s=4, color='r')
ax.plot([0.5 * (x0 + x1)], [0.5 * (y0 + y1)], 'ro', markersize=4)
return fig, ax
def convert_to_scode(self):
""" convert path to scode file. """
name, _ = self.filename.rsplit('.', 1)
outpath = "{}.scode".format(name)
old_z = -np.inf
z = -DELTA_Z
old_x0 = old_y0 = old_x1 = old_y1 = -np.inf
with open(outpath, 'w') as out_f:
out_f.write('# x1 y1 x2 y2 z power speed \n')
for x0, y0, x1, y1, cur_z in self.segs:
x0 *= HORIZONTAL_SHRINK_RATIO
y0 *= HORIZONTAL_SHRINK_RATIO
x1 *= HORIZONTAL_SHRINK_RATIO
y1 *= HORIZONTAL_SHRINK_RATIO
if old_x0 != -np.inf and (old_x1 != x0 or old_y1 != y0 or cur_z != old_z):
out_f.write("{:.8f} {:.8f} {:.8f} {:.8f} {:.8f} {:d} {:.4f}\n".format(old_x1, old_y1, x0, y0, z, 0, TRAVEL_SPEED))
if cur_z > old_z:
z += DELTA_Z
old_z = cur_z
old_x0 = x0
old_y0 = y0
old_x1 = x1
old_y1 = y1
# check if two segs are connected
out_f.write("{:.8f} {:.8f} {:.8f} {:.8f} {:.8f} {:d} {:.4f}\n".format(x0, y0, x1, y1, z, LASER_POWER, LASER_SPEED))
print('Save path to s-code file {}'.format(outpath))
def plot_mesh(self, ax=None):
""" plot mesh """
if not self.elements:
self.mesh()
if not ax:
fig, ax = create_axis(projection='3d')
for x0, y0, x1, y1, z in self.elements:
ax.plot([x0, x1], [y0, y1], [z, z], 'b-')
ax.scatter(0.5 * (x0 + x1), 0.5 * (y0 + y1), z, 'r', s=4,
color='r')
return fig, ax
def _read(self):
"""
read the file and populate self.segs, self.n_segs and
self.seg_index_bars
"""
if self.filetype == GcodeType.FDM_REGULAR:
self._read_fdm_regular()
elif self.filetype == GcodeType.FDM_STRATASYS:
self._read_fdm_stratasys()
elif self.filetype == GcodeType.LPBF_REGULAR:
self._read_lpbf_regular()
elif self.filetype == GcodeType.LPBF_SCODE:
self._read_lpbf_scode()
else:
print("file type is not supported")
sys.exit(1)
self.xyzlimits = self._compute_xyzlimits(self.segs)
def _compute_xyzlimits(self, seg_list):
""" compute axis limits of a segments list """
xmin, xmax = float('inf'), -float('inf')
ymin, ymax = float('inf'), -float('inf')
zmin, zmax = float('inf'), -float('inf')
for x0, y0, x1, y1, z in seg_list:
xmin = min(x0, x1) if min(x0, x1) < xmin else xmin
ymin = min(y0, y1) if min(y0, y1) < ymin else ymin
zmin = z if z < zmin else zmin
xmax = max(x0, x1) if max(x0, x1) > xmax else xmax
ymax = max(y0, y1) if max(y0, y1) > ymax else ymax
zmax = z if z > zmax else zmax
return (xmin, xmax, ymin, ymax, zmin, zmax)
def _read_lpbf_regular(self):
""" read regular LPBF gcode """
with open(self.filename, 'r') as infile:
# read nonempty lines
lines = (line.strip() for line in infile.readlines()
if line.strip())
# only keep line that starts with 'N'
lines = (line for line in lines if line.startswith('N'))
# pp.pprint(lines) # for debug
self.segs = []
self.powers = []
temp = -float('inf')
ngxyzfl = [temp, temp, temp, temp, temp, temp, temp]
d = dict(zip(['N', 'G', 'X', 'Y', 'Z', 'F', 'L'], range(7)))
seg_count = 0
for line in lines:
old_ngxyzfl = ngxyzfl[:]
tokens = line.split()
for token in tokens:
ngxyzfl[d[token[0]]] = float(token[1:])
if ngxyzfl[d['Z']] > old_ngxyzfl[d['Z']]:
self.n_layers += 1
self.seg_index_bars.append(seg_count)
if (ngxyzfl[1] == 1 and ngxyzfl[2:4] != old_ngxyzfl[2:4]
and ngxyzfl[4] == old_ngxyzfl[4]
and ngxyzfl[5] > 0):
x0, y0, z = old_ngxyzfl[2:5]
x1, y1 = ngxyzfl[2:4]
self.segs.append((x0, y0, x1, y1, z))
self.powers.append(ngxyzfl[-1])
seg_count += 1
self.n_segs = len(self.segs)
self.segs = np.array(self.segs)
self.seg_index_bars.append(self.n_segs)
# print(self.n_layers)
# print(self.powers)
assert(len(self.seg_index_bars) - self.n_layers == 1)
def _read_lpbf_scode(self):
""" read LPBF scode """
with open(self.filename, 'r') as infile:
# read nonempty lines
lines = (line.strip() for line in infile.readlines()
if line.strip())
# only keep line that not starts with '#'
lines = (line for line in lines if not line.startswith('#'))
# pp.pprint(lines) # for debug
self.segs = []
self.powers = []
seg_count = 0
old_z = -np.inf
for line in lines:
x0, y0, x1, y1, z, power, speed = map(float, line.split())
if z > old_z:
self.n_layers += 1
self.seg_index_bars.append(seg_count)
old_z = z
self.segs.append((x0, y0, x1, y1, z))
self.powers.append(power)
seg_count += 1
self.n_segs = len(self.segs)
self.segs = np.array(self.segs)
# print(self.segs)
self.seg_index_bars.append(self.n_segs)
assert(len(self.seg_index_bars) - self.n_layers == 1)
def _read_fdm_regular(self):
""" read fDM regular gcode type """
with open(self.filename, 'r') as infile:
# read nonempty lines
lines = (line.strip() for line in infile.readlines()
if line.strip())
# only keep line that starts with 'G'
# lines = (line for line in lines if line.startswith('G'))
new_lines = []
for line in lines:
if line.startswith('G'):
idx = line.find(';')
if idx != -1:
line = line[:idx]
new_lines.append(line)
lines = new_lines
# pp.pprint(lines) # for debug
self.segs = []
temp = -float('inf')
gxyzef = [temp, temp, temp, temp, temp, temp]
d = dict(zip(['G', 'X', 'Y', 'Z', 'E', 'F'], range(6)))
seg_count = 0
mx_z = -math.inf
for line in lines:
old_gxyzef = gxyzef[:]
for token in line.split():
gxyzef[d[token[0]]] = float(token[1:])
"""
# if gxyzef[3] > old_gxyzef[3]: # z value
# it may lift z in the beginning or during the printing process
if gxyzef[4] > old_gxyzef[4] and gxyzef[3] > mx_z:
mx_z = gxyzef[3]
# print(gxyzef[3], old_gxyzef[3])
self.n_layers += 1
self.seg_index_bars.append(seg_count)
"""
if (gxyzef[0] == 1 and gxyzef[1:3] != old_gxyzef[1:3]
and gxyzef[3] == old_gxyzef[3]
and gxyzef[4] > old_gxyzef[4]):
# update layer here
# print(gxyzef[3], mx_z)
if gxyzef[3] > mx_z:
mx_z = gxyzef[3]
self.n_layers += 1
self.seg_index_bars.append(seg_count)
x0, y0, z = old_gxyzef[1:4]
x1, y1 = gxyzef[1:3]
self.segs.append((x0, y0, x1, y1, z))
seg_count += 1
self.n_segs = len(self.segs)
self.segs = np.array(self.segs)
self.seg_index_bars.append(self.n_segs)
assert(len(self.seg_index_bars) - self.n_layers == 1)
def _read_fdm_stratasys(self):
""" read stratasys fdm G-code file """
self.areas = []
self.is_supports = []
self.styles = []
self.deltTs = []
self.segs = []
temp = -float('inf')
# x, y, z, area, deltaT, is_support, style
xyzATPS = [temp, temp, temp, temp, temp, False, '']
seg_count = 0
with open(self.filename, 'r') as in_file:
lines = in_file.readlines()
# means position denoted by the line is the start of subpath
is_start = True
for line in lines:
# filter out supports path
if not PLOT_SUPPORT and 'True' in line:
continue
if line.startswith('#'):
continue
if not line.strip(): # skip empty line
start = True
continue
old_xyzATPS = xyzATPS[:]
tokens = line.split()
# print(tokens)
xyzATPS[:5] = [float(token) for token in tokens[:5]]
xyzATPS[5] = bool(tokens[5])
xyzATPS[6] = tokens[6]
if xyzATPS[2] != old_xyzATPS[2]: # z value
self.seg_index_bars.append(seg_count)
self.n_layers += 1
elif not start:
# make sure is_support and style do not change
assert(xyzATPS[5:] == old_xyzATPS[5:])
x0, y0 = old_xyzATPS[:2]
x1, y1, z = xyzATPS[:3]
self.segs.append((x0, y0, x1, y1, z))
seg_count += 1
self.areas.append(xyzATPS[3])
self.deltTs.append(xyzATPS[4])
self.is_supports.append(xyzATPS[5])
self.styles.append(xyzATPS[6])
start = False
self.n_segs = len(self.segs)
self.segs = np.array(self.segs)
self.seg_index_bars.append(self.n_segs)
# print(self.seg_index_bars)
def _compute_subpaths(self):
""" compute subpaths
a subpath is represented by (xs, ys, zs)
subpath makes it easier to plot
"""
if not self.subpaths:
self.subpaths = []
self.subpath_index_bars = [0]
x0, y0, x1, y1, z = self.segs[0, :]
xs, ys, zs = [x0, x1], [y0, y1], [z, z]
mx_z = zs[-1]
for x0, y0, x1, y1, z in self.segs[1:, :]:
if x0 != xs[-1] or y0 != ys[-1] or z != zs[-1]:
self.subpaths.append((xs, ys, zs))
# if z != zs[-1]:
if z > mx_z:
mx_z = z
self.subpath_index_bars.append(len(self.subpaths))
xs, ys, zs = [x0, x1], [y0, y1], [z, z]
else:
xs.append(x1)
ys.append(y1)
zs.append(z)
if len(xs) != 0:
self.subpaths.append((xs, ys, zs))
self.subpath_index_bars.append(len(self.subpaths))
# print(self.subpath_index_bars)
# print(self.segs)
def _compute_center_distance(self, i, j):
"""compute center distance between element i and j."""
n = len(self.elements)
assert(i < n and j < n)
elements = self.elements
ax = 0.5 * (elements[i].x0 + elements[i].x1)
ay = 0.5 * (elements[i].y0 + elements[i].y1)
bx = 0.5 * (elements[j].x0 + elements[j].x1)
by = 0.5 * (elements[j].y0 + elements[j].y1)
return math.sqrt((ax - bx) ** 2 + (ay - by) ** 2)
def _compute_parallel_distance(self, i, j):
"""compute the parallel distance between element i and j."""
n = len(self.elements)
assert(i < n and j < n)
elements = self.elements
x = 0.5 * (elements[i].x0 + elements[i].x1)
y = 0.5 * (elements[i].y0 + elements[i].y1)
ax, ay, bx, by, _ = elements[j]
dx = ax - bx
dy = ay - by
deno = math.sqrt(dx * dx + dy * dy)
nume = abs((by - ay) * x - (bx - ax) * y + bx * ay - by * ax)
return nume / deno
def _is_element_nearly_parallel(self, i, j, threshold):
"""check if element i and element j are nearly parallel."""
n = len(self.elements)
assert(i < n and j < n)
elements = self.elements
ax, ay, bx, by, _ = elements[i]
cx, cy, dx, dy, _ = elements[j]
dx1 = bx - ax
dy1 = by - ay
dx2 = dx - cx
dy2 = dy - cy
cos_theta = abs((dx1 * dx2 + dy1 * dy2) / (math.sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2))))
return True if 1 - cos_theta < threshold else False
def _is_element_left(self, i, j):
"""check if element j is on the left of element i."""
n = len(self.elements)
assert(i < n and j < n)
assert(self.elements[i].z == self.elements[j].z)
elements = self.elements
ax, ay, bx, by, _ = elements[i]
cx = 0.5 * (elements[j].x0 + elements[j].x1)
cy = 0.5 * (elements[j].y0 + elements[j].y1)
cross_product = (bx - ax) * (cy - ay) - (cx - ax) * (by - ay)
if abs(cross_product) < ZERO_TOLERANCE:
return 0
else:
return 1 if cross_product > 0 else -1
def compute_nearest_neighbors(self, layer=0):
"""compute nearest neighbors for each element."""
if not self.elements:
self.mesh(max_length=MAX_ELEMENT_LENGTH)
start_idx, end_idx = self.elements_index_bars[layer - 1:layer + 1]
INF = math.inf
left_neis = []
right_neis = []
print(start_idx, end_idx)
for i in range(start_idx, end_idx):
left_mn = INF
right_mn = INF
# left_is_left = 0
# right_is_left = 0
left_idx = -1
right_idx = -1
for j in range(start_idx, end_idx):
if j == i:
continue
if (self._is_element_nearly_parallel(i, j, 0.0001) and
self._compute_center_distance(i, j) < 2.0 * HALF_WIDTH * 2):
is_left = self._is_element_left(i, j)
distance = self._compute_parallel_distance(i, j)
if distance < 0.4 * HALF_WIDTH * 2:
continue
# print(distance, is_left)
if is_left == 1:
if distance < left_mn:
left_idx = j
left_mn = distance
elif is_left == -1:
if distance < right_mn:
right_idx = j
right_mn = distance
# print("{:d} {:f} {:f}".format(i, left_mn, right_mn))
# if left_mn > 5:
left_neis.append((left_idx, left_mn))
right_neis.append((right_idx, right_mn))
print("Finished computing left and right neighbors.")
return left_neis, right_neis
def plot_neighbors_layer(self, layer=0):
"""plot neighbors in a layer."""
left_neis, right_neis = self.compute_nearest_neighbors(layer)
#"""
fig, ax = self.plot_mesh_layer(layer)
left, right = self.elements_index_bars[layer - 1:layer + 1]
print(left, right)
es = self.elements
for idx, (x0, y0, x1, y1, _) in enumerate(self.elements[left:right]):
xc = 0.5 * (x0 + x1)
yc = 0.5 * (y0 + y1)
# ax.plot([0.5 * (x0 + x1)], [0.5 * (y0 + y1)], 'ro', markersize=1.5)
left_idx, left_mn = left_neis[idx]
if left_idx != -1:
lx = 0.5 * (es[left_idx].x0 + es[left_idx].x1)
ly = 0.5 * (es[left_idx].y0 + es[left_idx].y1)
# print(left_mn, math.sqrt((lx - xc) ** 2 + (ly - yc) ** 2),self._compute_parallel_distance(idx, left_idx))
ax.plot([xc, lx], [yc, ly], 'r-')
right_idx, right_mn = right_neis[idx]
if right_idx != -1:
rx = 0.5 * (es[right_idx].x0 + es[right_idx].x1)
ry = 0.5 * (es[right_idx].y0 + es[right_idx].y1)
# print(left_mn, math.sqrt((lx - xc) ** 2 + (ly - yc) ** 2),self._compute_parallel_distance(idx, left_idx))
ax.plot([xc, rx], [yc, ry], 'r-')
#"""
# plot histogram
left_mns = [mn for idx, mn in left_neis if idx != -1]
print("left median = {}".format(statistics.median(left_mns)))
print("left mean = {}".format(statistics.mean(left_mns)))
print("left min = {}".format(min(left_mns)))
print("left max = {}".format(max(left_mns)))
right_mns = [mn for idx, mn in right_neis if idx != -1]
print("right median = {}".format(statistics.median(right_mns)))
print("right mean = {}".format(statistics.mean(right_mns)))
print("right min = {}".format(min(right_mns)))
print("right max = {}".format(max(right_mns)))
fig2, ax2 = plt.subplots(figsize=(8, 8))
ax2.boxplot(left_mns)
# return fig, ax
return fig2, ax2
def plot_polygon_layer(self, layer):
"""plot element polygons in one layer. """
left_neis, right_neis = self.compute_nearest_neighbors(layer)
fig, ax = self.plot_mesh_layer(layer)
left, right = self.elements_index_bars[layer - 1:layer + 1]
# print(left, right)
es = self.elements
for idx, (sx, sy, ex, ey, _) in enumerate(self.elements[left:right]):
reverse = False
if sx > ex or ey < sy:
sx, sy, ex, ey = ex, ey, sx, sy
reverse = True
dx = ex - sx
dy = ey - sy
theta = np.arctan2(dy, dx)
beta = theta + np.pi / 2.0
lw = HALF_WIDTH
left_idx, left_mn = left_neis[idx]
if left_mn / 2 < lw:
lw = left_mn / 2
rw = HALF_WIDTH
right_idx, right_mn = right_neis[idx]
if right_mn / 2 < rw:
rw = right_mn / 2
if reverse:
lw, rw = rw, lw
x1 = sx - rw * np.cos(beta)
y1 = sy - rw * np.sin(beta)
x2 = ex - rw * np.cos(beta)
y2 = ey - rw * np.sin(beta)
x3 = ex + lw * np.cos(beta)
y3 = ey + lw * np.sin(beta)
x4 = sx + lw * np.cos(beta)
y4 = sy + lw * np.sin(beta)
ax.plot([x1, x2, x3, x4, x1], [y1, y2, y3, y4, y1], 'r-')
return fig, ax
def plot(self, color='blue', ax=None):
""" plot the whole part in 3D """
if not ax:
fig, ax = create_axis(projection='3d')
assert(self.n_segs > 0)
self._compute_subpaths()
for xs, ys, zs in self.subpaths:
if SINGLE_COLOR:
ax.plot(xs, ys, zs, color=color)
else:
ax.plot(xs, ys, zs)
xmin, xmax, ymin, ymax, _, _ = self.xyzlimits
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([ymin, ymax])
ax.set_xlim(add_margin_to_axis_limits(xmin, xmax))
ax.set_ylim(add_margin_to_axis_limits(ymin, ymax))
return fig, ax
def plot_layers(self, min_layer, max_layer, ax=None):
""" plot the layers in [min_layer, max_layer) in 3D """
if (min_layer >= max_layer or min_layer < 1 or max_layer >
self.n_layers + 1):
raise LayerError("Layer number is invalid!")
self._compute_subpaths()
if not ax:
fig, ax = create_axis(projection='3d')
left, right = (self.subpath_index_bars[min_layer - 1],
self.subpath_index_bars[max_layer - 1])
for xs, ys, zs in self.subpaths[left: right]:
ax.plot(xs, ys, zs)
return fig, ax
def plot_layer(self, layer=1, ax=None):
""" plot a specific layer in 2D """
# make sure layer is in [1, self.n_layers]
# layer = max(layer, 1)
# layer = min(self.n_layers, layer)
if layer < 1 or layer > self.n_layers:
raise LayerError("Layer number is invalid!")
self._compute_subpaths()
if not hasattr(self, 'powers'):
self.powers = [POWER_ZERO + 10] * len(self.segs)
if not ax:
fig, ax = create_axis(projection='2d')
if not PLOT_POWER:
left, right = (self.subpath_index_bars[layer - 1],
self.subpath_index_bars[layer])
for xs, ys, _ in self.subpaths[left: right]:
ax.plot(xs, ys)
"""
if SINGLE_COLOR:
if (IGNORE_ZERO_POWER and power > POWER_ZERO) or (not IGNORE_ZERO_POWER):
ax.plot(xs, ys, color='blue')
else:
if (IGNORE_ZERO_POWER and power > POWER_ZERO) or (not IGNORE_ZERO_POWER):
ax.plot(xs, ys)
"""
else:
left, right = (self.seg_index_bars[layer - 1],
self.seg_index_bars[layer])
for (x1, y1, x2, y2, z), power in zip(self.segs, self.powers):
if power > POWER_ZERO:
ax.plot([x1, x2], [y1, y2], 'r-')
else:
if not IGNORE_ZERO_POWER:
ax.plot([x1, x2], [y1, y2], 'b-')
ax.axis('equal')
return fig, ax
def describe_mesh(self, max_length):
"""print basic information of meshing"""
if not self.elements:
self.mesh(max_length)
self.mesh_lengths = [np.hypot(x1 - x0, y1 - y0) for x0, y0, x1, y1, _
in self.elements]
series = pd.Series(self.mesh_lengths)
print('1. Element length information:')
print(series.describe())
print('2. Number of layers: {:d}'.format(self.n_layers))
data = {'# elements': np.array(self.elements_index_bars[1:]) -
np.array(self.elements_index_bars[:-1]),
'layer': np.arange(1, self.n_layers + 1),
}
df = pd.DataFrame(data)
df = df.set_index('layer')
print(df)
def describe(self):
"""print basic information of process plan"""
if not self.summary:
self.lengths = [np.hypot(x1 - x0, y1 - y0) for x0, y0, x1, y1, _
in self.segs]
series = pd.Series(self.lengths)
self.summary = series.describe()
print('1. Line segments information: ')
print(self.summary)
print('2. Number of layers: {:d}'.format(self.n_layers))
self._compute_subpaths()
assert(len(self.seg_index_bars) == len(self.subpath_index_bars))
"""
try:
assert(len(self.seg_index_bars) == len(self.subpath_index_bars))
except:
print(len(self.seg_index_bars))
print(len(self.subpath_index_bars))
print(self.n_layers)
"""
data = {'# segments': np.array(self.seg_index_bars[1:]) -
np.array(self.seg_index_bars[:-1]),
'layer': np.arange(1, self.n_layers + 1),
'# subpaths': np.array(self.subpath_index_bars[1:]) -
np.array(self.subpath_index_bars[:-1]),
}
df = pd.DataFrame(data)
df = df.set_index('layer')
print(df)
print('3. Other information: ')
print('Total path length equals {:0.4f}.'.format(sum(self.lengths)))
# compute total travel lengths
travels = []
for i in range(len(self.subpaths) - 1):
xsi, ysi, zsi = self.subpaths[i]
xsj, ysj, zsj = self.subpaths[i + 1]
travels.append(abs(xsj[0] - xsi[-1]) + abs(ysj[0] - ysi[-1])
+ abs(zsj[0] - zsi[-1]))
print("Total travel length equals {:0.4f}.".format(sum(travels)))
if self.filetype == GcodeType.LPBF_REGULAR or self.filetype == GcodeType.LPBF_SCODE:
print("Laser power range [{}, {}]".format(
min(self.powers), max(self.powers)))
print("Number of nozzle travels equals {:d}.".format(
len(self.subpaths)))
print("Number of subpaths equals {:d}.".format(len(self.subpaths)))
print("X, Y and Z limits: [{:0.2f}, {:0.2f}] X [{:0.2f}, {:0.2f}] X [{:0.2f}, {:0.2f}]".format(
*self.xyzlimits))
def animate_layer(self, layer=1, animation_time=5, outfile=None):
"""
animate the printing of a specific layer in 2D
"""
if layer < 1 or layer > self.n_layers:
raise LayerError("Layer number is invalid!")
fig, ax = create_axis(projection='2d')
if outfile:
writer = create_movie_writer()
writer.setup(fig, outfile=outfile, dpi=100)
xmin, xmax, ymin, ymax, _, _ = self.xyzlimits
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([ymin, ymax])
ax.set_xlim(add_margin_to_axis_limits(xmin, xmax))
ax.set_ylim(add_margin_to_axis_limits(ymin, ymax))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title("{:s}(layer = {:d})".format(self.filename, layer))
if not FIG_INFO:
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
ax.set_title("")
# ax.set_title(self.fila)
left, right = (self.seg_index_bars[layer - 1],
self.seg_index_bars[layer])
seg_lst = self.segs[left: right]
lens = np.array([abs(x0 - x1) + abs(y0 - y1) for x0, y0, x1, y1, z in
seg_lst])
times = lens / lens.sum() * animation_time
# print(times.sum())
for time, (x0, y0, x1, y1, _) in zip(times, seg_lst):
ax.plot([x0, x1], [y0, y1], 'b-')
plt.pause(time)
if outfile:
writer.grab_frame()
plt.draw()
if outfile:
writer.finish()
print('Creating movie {:s}'.format(outfile))
plt.show()
def animate_layers(self, min_layer, max_layer=None, outfile=None):
"""
animation of the print process of multiple layers [min_layer,
max_layer)
implement with plt.pause() and plt.draw()
"""
if max_layer is None:
max_layer = self.n_layers + 1
if (min_layer >= max_layer or min_layer < 1 or max_layer >
self.n_layers + 1):
raise LayerError("Layer number is invalid!")
left, right = (self.subpath_index_bars[min_layer - 1],
self.subpath_index_bars[max_layer - 1])
fig, ax = create_axis(projection='3d')
if outfile:
writer = create_movie_writer()
writer.setup(fig, outfile=outfile, dpi=100)
xmin, xmax, ymin, ymax, zmin, zmax = self.xyzlimits
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
if zmax > zmin:
ax.set_zlim([zmin, zmax])
for sub_path in self.subpaths[left:right]:
xs, ys, zs = sub_path
ax.plot(xs, ys, zs)
if outfile:
writer.grab_frame()
plt.pause(0.1)
plt.draw()
if outfile:
writer.finish()
print('Creating movie {:s}'.format(outfile))
plt.show()
def get_parser():
"""set up parser and return it"""
parser = argparse.ArgumentParser(description='Gcode Reader')
parser.add_argument(dest='gcode_file', action='store',
help='specify path of the input gcode file')
parser.add_argument('-t', '--type', dest='filetype', help="""File Type
1: Regular FDM; 2: Stratasys FDM; 3: Regular LPBF; 4: Scode LPBF""",
required=True, type=int, action='store')
parser.add_argument('-l', '--layer', dest='plot_layer_idx', action='store',
type=int, help='plot a layer in 2D')
parser.add_argument('-a', '--animation', dest='ani_layer_idx',
action='store', type=int, help='animate printing of a layer in 2D')
parser.add_argument('-m', '--mesh', dest='mesh_layer_idx', action='store',
type=int, help='plot the mesh of a layer in 2D')
parser.add_argument('-p', '--plot', dest='plot3d', action='store_true',
help='plot the whole part')
parser.add_argument('-s', '--save', dest='outfile', action='store',
help='specify the path of output file')
### below part is in construction
#"""
parser.add_argument('-conv', '--convert', dest='convert', action='store_true',
help='convert FDM path to LPBF scode')
parser.add_argument('-nei', '--neighbor', dest='neighbor_layer_idx',
action='store', default=-1, type=int, help='plot nearest neighbor of each element in one layer')
parser.add_argument('-poly', '--polygon', dest='polygon_layer_idx',
action='store', default=-1, type=int, help='plot element polygon in one layer')
#"""
return parser
def command_line_runner():
""" command line runner """
# 1. parse arguments
parser = get_parser()
args = parser.parse_args()
# pp.pprint(args)
# 2. handle Gcode file type
if not GcodeType.has_value(args.filetype):
print('Invalid G-code file type: {:d}'.format(args.filetype))
print('Valid types are listed below')
for gcode_type in GcodeType:
print('{:s} : {:d}'.format(gcode_type.name, gcode_type.value))
sys.exit(1)
else:
filetype = GcodeType(args.filetype)
# construct Gcode Reader object
gcode_reader = GcodeReader(filename=args.gcode_file, filetype=filetype)
# 3. print out some statistic information to standard output
gcode_reader.describe()
## describe meshing results
# gcode_reader.describe_mesh(max_length=MAX_ELEMENT_LENGTH)
# 4. plot the part in 3D, plot a layer in 2D, animate the printing process
# of single layer in 2D, mesh and plot a layer in 2D
if args.plot3d:
fig, ax = gcode_reader.plot()
else:
if args.plot_layer_idx:
fig, ax = gcode_reader.plot_layer(layer=args.plot_layer_idx)
elif args.ani_layer_idx:
gcode_reader.animate_layer(layer=args.ani_layer_idx)
elif args.mesh_layer_idx:
fig, ax = gcode_reader.plot_mesh_layer(layernum=args.mesh_layer_idx)
## Below part is under construction
# 5. convert FDM G-Code to PBF S-Code
if args.convert:
gcode_reader.convert_to_scode()
# 6. plot contact graph in 2D
if args.neighbor_layer_idx != -1:
# gcode_reader.compute_nearest_neighbors()
fig, ax = gcode_reader.plot_neighbors_layer(layer=args.neighbor_layer_idx)
# 7. plot mesh (representing elements using polygons)
if args.polygon_layer_idx != -1:
fig, ax = gcode_reader.plot_polygon_layer(layer=args.polygon_layer_idx)
# specify title and x, y label, set axis
if args.plot3d or args.plot_layer_idx or args.mesh_layer_idx:
if args.plot3d:
axisEqual3D(ax)
if FIG_INFO:
_, filename = args.gcode_file.rsplit(os.path.sep, 1)
ax.set_title(filename)
ax.set_xlabel('x')
ax.set_ylabel('y')
if ax.name == '3d':
ax.set_zlabel('z')
else:
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
# save figure to file
if args.outfile:
save_figure(fig, args.outfile, dpi=100)
#plt.show()
plt.savefig("{}/files/Layer/ideal/{}.jpg".format(path1, imagename2))
# plt.savefig()
if __name__ == "__main__":
print("Gcode Reader")
command_line_runner()
if __name__ == '__main__':
GCR() | [
"numpy.arctan2",
"argparse.ArgumentParser",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.sin",
"pandas.DataFrame",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.ceil",
"math.sqrt",
"statistics.median"... | [((1879, 1943), 'collections.namedtuple', 'collections.namedtuple', (['"""Element"""', "['x0', 'y0', 'x1', 'y1', 'z']"], {}), "('Element', ['x0', 'y0', 'x1', 'y1', 'z'])\n", (1901, 1943), False, 'import collections\n'), ((2152, 2182), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (2172, 2182), False, 'import pprint\n'), ((3113, 3137), 'numpy.mean', 'np.mean', (['extents'], {'axis': '(1)'}), '(extents, axis=1)\n', (3120, 3137), True, 'import numpy as np\n'), ((38981, 39032), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Gcode Reader"""'}), "(description='Gcode Reader')\n", (39004, 39032), False, 'import argparse\n'), ((4160, 4189), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4172, 4189), True, 'import matplotlib.pyplot as plt\n'), ((4230, 4257), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4240, 4257), True, 'import matplotlib.pyplot as plt\n'), ((13680, 13699), 'numpy.array', 'np.array', (['self.segs'], {}), '(self.segs)\n', (13688, 13699), True, 'import numpy as np\n'), ((14903, 14922), 'numpy.array', 'np.array', (['self.segs'], {}), '(self.segs)\n', (14911, 14922), True, 'import numpy as np\n'), ((17400, 17419), 'numpy.array', 'np.array', (['self.segs'], {}), '(self.segs)\n', (17408, 17419), True, 'import numpy as np\n'), ((21563, 21605), 'math.sqrt', 'math.sqrt', (['((ax - bx) ** 2 + (ay - by) ** 2)'], {}), '((ax - bx) ** 2 + (ay - by) ** 2)\n', (21572, 21605), False, 'import math\n'), ((22066, 22094), 'math.sqrt', 'math.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (22075, 22094), False, 'import math\n'), ((27541, 27569), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (27553, 27569), True, 'import matplotlib.pyplot as plt\n'), ((32714, 32742), 'pandas.Series', 'pd.Series', (['self.mesh_lengths'], {}), '(self.mesh_lengths)\n', (32723, 32742), True, 'import pandas as pd\n'), ((33138, 33156), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (33150, 33156), True, 'import pandas as pd\n'), ((34478, 34496), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (34490, 34496), True, 'import pandas as pd\n'), ((37455, 37465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37463, 37465), True, 'import matplotlib.pyplot as plt\n'), ((38887, 38897), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (38895, 38897), True, 'import matplotlib.pyplot as plt\n'), ((41274, 41285), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (41282, 41285), False, 'import sys\n'), ((5837, 5848), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5845, 5848), False, 'import sys\n'), ((7326, 7352), 'numpy.hypot', 'np.hypot', (['(x0 - x1)', '(y0 - y1)'], {}), '(x0 - x1, y0 - y1)\n', (7334, 7352), True, 'import numpy as np\n'), ((19700, 19719), 'numpy.array', 'np.array', (['self.segs'], {}), '(self.segs)\n', (19708, 19719), True, 'import numpy as np\n'), ((28345, 28363), 'numpy.arctan2', 'np.arctan2', (['dy', 'dx'], {}), '(dy, dx)\n', (28355, 28363), True, 'import numpy as np\n'), ((32593, 32619), 'numpy.hypot', 'np.hypot', (['(x1 - x0)', '(y1 - y0)'], {}), '(x1 - x0, y1 - y0)\n', (32601, 32619), True, 'import numpy as np\n'), ((33066, 33097), 'numpy.arange', 'np.arange', (['(1)', '(self.n_layers + 1)'], {}), '(1, self.n_layers + 1)\n', (33075, 33097), True, 'import numpy as np\n'), ((33490, 33513), 'pandas.Series', 'pd.Series', (['self.lengths'], {}), '(self.lengths)\n', (33499, 33513), True, 'import pandas as pd\n'), ((34272, 34303), 'numpy.arange', 'np.arange', (['(1)', '(self.n_layers + 1)'], {}), '(1, self.n_layers + 1)\n', (34281, 34303), True, 'import numpy as np\n'), ((37215, 37230), 'matplotlib.pyplot.pause', 'plt.pause', (['time'], {}), '(time)\n', (37224, 37230), True, 'import matplotlib.pyplot as plt\n'), ((37315, 37325), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (37323, 37325), True, 'import matplotlib.pyplot as plt\n'), ((38716, 38730), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (38725, 38730), True, 'import matplotlib.pyplot as plt\n'), ((38747, 38757), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (38755, 38757), True, 'import matplotlib.pyplot as plt\n'), ((7384, 7412), 'numpy.ceil', 'np.ceil', (['(length / max_length)'], {}), '(length / max_length)\n', (7391, 7412), True, 'import numpy as np\n'), ((22693, 22753), 'math.sqrt', 'math.sqrt', (['((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2))'], {}), '((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2))\n', (22702, 22753), False, 'import math\n'), ((26969, 26996), 'statistics.median', 'statistics.median', (['left_mns'], {}), '(left_mns)\n', (26986, 26996), False, 'import statistics\n'), ((27041, 27066), 'statistics.mean', 'statistics.mean', (['left_mns'], {}), '(left_mns)\n', (27056, 27066), False, 'import statistics\n'), ((27296, 27324), 'statistics.median', 'statistics.median', (['right_mns'], {}), '(right_mns)\n', (27313, 27324), False, 'import statistics\n'), ((27370, 27396), 'statistics.mean', 'statistics.mean', (['right_mns'], {}), '(right_mns)\n', (27385, 27396), False, 'import statistics\n'), ((32935, 32973), 'numpy.array', 'np.array', (['self.elements_index_bars[1:]'], {}), '(self.elements_index_bars[1:])\n', (32943, 32973), True, 'import numpy as np\n'), ((32996, 33035), 'numpy.array', 'np.array', (['self.elements_index_bars[:-1]'], {}), '(self.elements_index_bars[:-1])\n', (33004, 33035), True, 'import numpy as np\n'), ((33370, 33396), 'numpy.hypot', 'np.hypot', (['(x1 - x0)', '(y1 - y0)'], {}), '(x1 - x0, y1 - y0)\n', (33378, 33396), True, 'import numpy as np\n'), ((34151, 34184), 'numpy.array', 'np.array', (['self.seg_index_bars[1:]'], {}), '(self.seg_index_bars[1:])\n', (34159, 34184), True, 'import numpy as np\n'), ((34207, 34241), 'numpy.array', 'np.array', (['self.seg_index_bars[:-1]'], {}), '(self.seg_index_bars[:-1])\n', (34215, 34241), True, 'import numpy as np\n'), ((34339, 34376), 'numpy.array', 'np.array', (['self.subpath_index_bars[1:]'], {}), '(self.subpath_index_bars[1:])\n', (34347, 34376), True, 'import numpy as np\n'), ((34399, 34437), 'numpy.array', 'np.array', (['self.subpath_index_bars[:-1]'], {}), '(self.subpath_index_bars[:-1])\n', (34407, 34437), True, 'import numpy as np\n'), ((28821, 28833), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (28827, 28833), True, 'import numpy as np\n'), ((28865, 28877), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (28871, 28877), True, 'import numpy as np\n'), ((28909, 28921), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (28915, 28921), True, 'import numpy as np\n'), ((28953, 28965), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (28959, 28965), True, 'import numpy as np\n'), ((28997, 29009), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (29003, 29009), True, 'import numpy as np\n'), ((29041, 29053), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (29047, 29053), True, 'import numpy as np\n'), ((29085, 29097), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (29091, 29097), True, 'import numpy as np\n'), ((29129, 29141), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (29135, 29141), True, 'import numpy as np\n'), ((11326, 11337), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (11334, 11337), False, 'import sys\n')] |
import json
import argparse
import json
import numpy as np
import torch
import torch.optim as optim
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score
from torch.nn import functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from dnn import DNNNet
USE_CUDA = True
BATCH_SIZE = 32
EPOCHS = 5
MAX_LEN = 60
WORD_EMBED = 128
THOUGHT_SIZE = 256
device = torch.device("cuda" if torch.cuda.is_available() and USE_CUDA else "cpu")
# torch.manual_seed(1357)
class DataSet:
def __init__(self, X, y):
self.X = []
for line, _ in X:
if len(line) < MAX_LEN:
line += [0] * (MAX_LEN - len(line))
line = line[:MAX_LEN]
self.X.append(torch.LongTensor(line).to(device))
self.y = y
def __len__(self):
return len(self.y)
def __getitem__(self, index):
target = [0, 0]
target[self.y[index]] = 1
return self.X[index], torch.FloatTensor(np.asarray(target)).to(device)
def train():
# if not os.path.exists('./classifier'):
# os.mkdir('./classifier')
print("Load data...")
dataset = json.load(open('data/classify.tag.json', 'r'))
train_dataset = DataSet(dataset['X_train'], dataset['y_train'])
test_dataset = DataSet(dataset['X_test'], dataset['y_test'])
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE)
# Build model.
dnn_model = DNNNet(thought_size=THOUGHT_SIZE)
dnn_model.to(device)
print(dnn_model)
optimizer = optim.Adam(dnn_model.parameters(), lr=1e-3)
for epoch in range(1, EPOCHS + 1):
dnn_model.train()
train_loss = 0
total = 0
correct = 0
with tqdm(total=len(train_loader)) as bar:
for batch_idx, (batch_xs, target) in enumerate(train_loader):
pred = dnn_model(batch_xs)
optimizer.zero_grad()
loss = F.binary_cross_entropy_with_logits(pred, target)
loss.backward()
optimizer.step()
train_loss += loss.item()
target = torch.argmax(target, -1, keepdim=False)
batch_pred = torch.argmax(pred, -1, keepdim=False)
correct += (target == batch_pred).sum().item()
total += target.shape[0]
bar.update()
bar.set_postfix_str('loss:%.4f | avg_loss:%.4f acc:%.4f' %
(loss.item(), train_loss / (batch_idx + 1), correct / total))
# checkpoint_path = os.path.join('./classifier', "classifier.ckpt")
# torch.save(dnn_model, checkpoint_path)
# Test model.
dnn_model.eval()
total_target = list()
total_pred = list()
for batch_xs, target in test_loader:
target = torch.argmax(target, -1, keepdim=False)
batch_out = dnn_model(batch_xs)
batch_pred = torch.argmax(batch_out, -1, keepdim=False)
total_target.extend(target.data.cpu().numpy())
total_pred.extend(batch_pred.data.cpu().numpy())
accuracy = accuracy_score(total_target, total_pred)
precision = precision_score(total_target, total_pred)
recall = recall_score(total_target, total_pred)
f1 = f1_score(total_target, total_pred)
print("Epoch %d, Test accuracy %.4f, precision %.4f, recall %.4f, f1 %.4f" %
(epoch, accuracy, precision, recall, f1))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, default="preprocess", choices=["train", "test"])
parser.add_argument("--epoch-idx", type=int, default=1)
args = parser.parse_args()
train()
| [
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.LongTensor",
"torch.argmax",
"dnn.DNNNet",
"sklearn.metrics.accuracy_score",
"numpy.asarray",
"sklearn.metrics.recall_score",
"torch.nn.functional.binary_cross_entropy_with_logits",
"sklearn.metrics.f1_score",
"torch.cuda.is_availa... | [((1366, 1428), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)'}), '(train_dataset, batch_size=BATCH_SIZE, shuffle=True)\n', (1376, 1428), False, 'from torch.utils.data import DataLoader\n'), ((1447, 1494), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'BATCH_SIZE'}), '(test_dataset, batch_size=BATCH_SIZE)\n', (1457, 1494), False, 'from torch.utils.data import DataLoader\n'), ((1531, 1564), 'dnn.DNNNet', 'DNNNet', ([], {'thought_size': 'THOUGHT_SIZE'}), '(thought_size=THOUGHT_SIZE)\n', (1537, 1564), False, 'from dnn import DNNNet\n'), ((3606, 3631), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3629, 3631), False, 'import argparse\n'), ((3215, 3255), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['total_target', 'total_pred'], {}), '(total_target, total_pred)\n', (3229, 3255), False, 'from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score\n'), ((3276, 3317), 'sklearn.metrics.precision_score', 'precision_score', (['total_target', 'total_pred'], {}), '(total_target, total_pred)\n', (3291, 3317), False, 'from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score\n'), ((3335, 3373), 'sklearn.metrics.recall_score', 'recall_score', (['total_target', 'total_pred'], {}), '(total_target, total_pred)\n', (3347, 3373), False, 'from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score\n'), ((3387, 3421), 'sklearn.metrics.f1_score', 'f1_score', (['total_target', 'total_pred'], {}), '(total_target, total_pred)\n', (3395, 3421), False, 'from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score\n'), ((434, 459), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (457, 459), False, 'import torch\n'), ((2922, 2961), 'torch.argmax', 'torch.argmax', (['target', '(-1)'], {'keepdim': '(False)'}), '(target, -1, keepdim=False)\n', (2934, 2961), False, 'import torch\n'), ((3031, 3073), 'torch.argmax', 'torch.argmax', (['batch_out', '(-1)'], {'keepdim': '(False)'}), '(batch_out, -1, keepdim=False)\n', (3043, 3073), False, 'import torch\n'), ((2027, 2075), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['pred', 'target'], {}), '(pred, target)\n', (2061, 2075), True, 'from torch.nn import functional as F\n'), ((2209, 2248), 'torch.argmax', 'torch.argmax', (['target', '(-1)'], {'keepdim': '(False)'}), '(target, -1, keepdim=False)\n', (2221, 2248), False, 'import torch\n'), ((2278, 2315), 'torch.argmax', 'torch.argmax', (['pred', '(-1)'], {'keepdim': '(False)'}), '(pred, -1, keepdim=False)\n', (2290, 2315), False, 'import torch\n'), ((754, 776), 'torch.LongTensor', 'torch.LongTensor', (['line'], {}), '(line)\n', (770, 776), False, 'import torch\n'), ((1000, 1018), 'numpy.asarray', 'np.asarray', (['target'], {}), '(target)\n', (1010, 1018), True, 'import numpy as np\n')] |
import os
import numpy as np
def initialize_pyrngs():
from gslrandom import PyRNG, get_omp_num_threads
if "OMP_NUM_THREADS" in os.environ:
num_threads = os.environ["OMP_NUM_THREADS"]
else:
num_threads = get_omp_num_threads()
assert num_threads > 0
# Choose random seeds
seeds = np.random.randint(2**16, size=num_threads)
return [PyRNG(seed) for seed in seeds]
def convert_discrete_to_continuous(S, dt):
# Convert S to continuous time
from pybasicbayes.util.general import ibincount
T = S.shape[0] * dt
S_ct = dt * np.concatenate([ibincount(Sk) for Sk in S.T]).astype(float)
S_ct += dt * np.random.rand(*S_ct.shape)
assert np.all(S_ct < T)
C_ct = np.concatenate([k*np.ones(Sk.sum()) for k,Sk in enumerate(S.T)]).astype(int)
# Sort the data
perm = np.argsort(S_ct)
S_ct = S_ct[perm]
C_ct = C_ct[perm]
return S_ct, C_ct, T
def convert_continuous_to_discrete(S, C, dt, T_min, T_max):
bins = np.arange(T_min, T_max, dt)
if bins[-1] != T_max:
bins = np.hstack((bins, [T_max]))
T = bins.size - 1
K = C.max()+1
S_dt = np.zeros((T,K))
for k in range(K):
S_dt[:,k] = np.histogram(S[C==k], bins)[0]
assert S_dt.sum() == len(S)
return S_dt.astype(np.int)
def get_unique_file_name(filedir, filename):
"""
Get a unique filename by appending filename with .x, where x
is the next untaken number
"""
import fnmatch
# Get the number of conflicting log files
fnames = os.listdir(filedir)
conflicts = fnmatch.filter(fnames, "%s*" % filename)
nconflicts = len(conflicts)
if nconflicts > 0:
unique_name = "%s.%d" % (filename, nconflicts+1)
else:
unique_name = filename
return unique_name
def logistic(x,lam_max=1.0):
return lam_max*1.0/(1.0+np.exp(-x))
def logit(x,lam_max=1.0):
return np.log(x/lam_max)-np.log(1-(x/lam_max))
def sample_nig(mu0, lmbda0, alpha0, beta0):
mu0, lmbda0, alpha0, beta0 = np.broadcast_arrays(mu0, lmbda0, alpha0, beta0)
shp = mu0.shape
assert lmbda0.shape == alpha0.shape == beta0.shape == shp
tau = np.array(np.random.gamma(alpha0, 1./beta0)).reshape(shp)
mu = np.array(np.random.normal(mu0, np.sqrt(1./(lmbda0 * tau)))).reshape(shp)
return mu, tau
| [
"fnmatch.filter",
"gslrandom.PyRNG",
"numpy.log",
"numpy.zeros",
"gslrandom.get_omp_num_threads",
"numpy.hstack",
"numpy.argsort",
"numpy.random.gamma",
"numpy.histogram",
"numpy.random.randint",
"numpy.arange",
"numpy.exp",
"pybasicbayes.util.general.ibincount",
"numpy.random.rand",
"nu... | [((320, 364), 'numpy.random.randint', 'np.random.randint', (['(2 ** 16)'], {'size': 'num_threads'}), '(2 ** 16, size=num_threads)\n', (337, 364), True, 'import numpy as np\n'), ((693, 709), 'numpy.all', 'np.all', (['(S_ct < T)'], {}), '(S_ct < T)\n', (699, 709), True, 'import numpy as np\n'), ((830, 846), 'numpy.argsort', 'np.argsort', (['S_ct'], {}), '(S_ct)\n', (840, 846), True, 'import numpy as np\n'), ((988, 1015), 'numpy.arange', 'np.arange', (['T_min', 'T_max', 'dt'], {}), '(T_min, T_max, dt)\n', (997, 1015), True, 'import numpy as np\n'), ((1136, 1152), 'numpy.zeros', 'np.zeros', (['(T, K)'], {}), '((T, K))\n', (1144, 1152), True, 'import numpy as np\n'), ((1531, 1550), 'os.listdir', 'os.listdir', (['filedir'], {}), '(filedir)\n', (1541, 1550), False, 'import os\n'), ((1567, 1607), 'fnmatch.filter', 'fnmatch.filter', (['fnames', "('%s*' % filename)"], {}), "(fnames, '%s*' % filename)\n", (1581, 1607), False, 'import fnmatch\n'), ((2024, 2071), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['mu0', 'lmbda0', 'alpha0', 'beta0'], {}), '(mu0, lmbda0, alpha0, beta0)\n', (2043, 2071), True, 'import numpy as np\n'), ((232, 253), 'gslrandom.get_omp_num_threads', 'get_omp_num_threads', ([], {}), '()\n', (251, 253), False, 'from gslrandom import PyRNG, get_omp_num_threads\n'), ((375, 386), 'gslrandom.PyRNG', 'PyRNG', (['seed'], {}), '(seed)\n', (380, 386), False, 'from gslrandom import PyRNG, get_omp_num_threads\n'), ((654, 681), 'numpy.random.rand', 'np.random.rand', (['*S_ct.shape'], {}), '(*S_ct.shape)\n', (668, 681), True, 'import numpy as np\n'), ((1057, 1083), 'numpy.hstack', 'np.hstack', (['(bins, [T_max])'], {}), '((bins, [T_max]))\n', (1066, 1083), True, 'import numpy as np\n'), ((1906, 1925), 'numpy.log', 'np.log', (['(x / lam_max)'], {}), '(x / lam_max)\n', (1912, 1925), True, 'import numpy as np\n'), ((1924, 1947), 'numpy.log', 'np.log', (['(1 - x / lam_max)'], {}), '(1 - x / lam_max)\n', (1930, 1947), True, 'import numpy as np\n'), ((1195, 1224), 'numpy.histogram', 'np.histogram', (['S[C == k]', 'bins'], {}), '(S[C == k], bins)\n', (1207, 1224), True, 'import numpy as np\n'), ((1856, 1866), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1862, 1866), True, 'import numpy as np\n'), ((2173, 2209), 'numpy.random.gamma', 'np.random.gamma', (['alpha0', '(1.0 / beta0)'], {}), '(alpha0, 1.0 / beta0)\n', (2188, 2209), True, 'import numpy as np\n'), ((2261, 2290), 'numpy.sqrt', 'np.sqrt', (['(1.0 / (lmbda0 * tau))'], {}), '(1.0 / (lmbda0 * tau))\n', (2268, 2290), True, 'import numpy as np\n'), ((593, 606), 'pybasicbayes.util.general.ibincount', 'ibincount', (['Sk'], {}), '(Sk)\n', (602, 606), False, 'from pybasicbayes.util.general import ibincount\n')] |
"""The WaveBlocks Project
This file contains a tiny wrapper to wrap
numpy ndarrays into Grid instances.
@author: <NAME>
@copyright: Copyright (C) 2012, 2013, 2014 <NAME>
@license: Modified BSD License
"""
from numpy import atleast_1d, abs, product
from WaveBlocksND.AbstractGrid import AbstractGrid
__all__ = ["GridWrapper"]
class GridWrapper(AbstractGrid):
r"""This class constructs a thin layer around an ``ndarray`` and wraps
it as :py:class:`Grid` subclass for API compatibility. The array must
have a shape of :math:`(D, N)` with :math:`N` the overall number of nodes.
"""
# TODO: Rather than using this class, one should try to eliminate the
# cases where it is used now.
def __init__(self, anarray):
# Shape is (D, #nodes)
self._data = anarray
self._dimension = self._data.shape[0]
# Compute some additional data
# TODO: Note that these values are only correct for closed, aperiodic grids
self._limits = [(anarray[d, 0], anarray[d, -1]) for d in range(self._dimension)]
self._extensions = [abs(l[-1] - l[0]) for l in self._limits]
def get_number_nodes(self, overall=False):
r"""Returns the number of grid nodes.
:param overall: Compute the product :math:`N = \prod_i^D N_i` of the
number :math:`N_i` of grid nodes along each dimension
:math:`i` specified.
:type overall: Boolean, default is ``False``
:return: A list of :math:`N_i` values or a single value :math:`N`.
"""
if overall is False:
return self._data.shape[1:]
else:
return product(self._data.shape[1:])
def get_nodes(self, flat=True, split=False):
r"""Returns all grid nodes.
:param flat: Whether to return the grid with a `hypercubic`
:math:`(D, N_1, ..., N_D)` shape or a `flat`
:math:`(D, \prod_i^D N_i)` shape. Note that the
hypercubic shape is not implemented!
:type flat: Boolean, default is ``True``.
:param split: Whether to return the different components, one for each
dimension inside a single ndarray or a list with ndarrays,
with one item per dimension.
:type split: Boolean, default is ``False``.
:return: Depends of the optional arguments.
"""
if flat is False:
raise NotImplementedError("Grid wrapping for hypercubic storage.")
if split is True:
return [self._data[i, ...] for i in range(self._data.shape[0])]
else:
return self._data
def get_limits(self, axes=None):
r"""Returns the limits of the bounding box.
:param axes: The axes for which we want to get the limits.
:type axes: A single integer or a list of integers. If set
to ``None`` (default) we return the limits for all axes.
:return: A list of :math:`(min_i, max_i)` ndarrays.
"""
if axes is None:
axes = range(self._dimension)
return [self._limits[i] for i in atleast_1d(axes)]
def get_extensions(self, axes=None):
r"""Returns the extensions (length of the edges) of the bounding box.
:param axes: The axes for which we want to get the extensions.
:type axes: A single integer or a list of integers. If set
to ``None`` (default) we return the extensions for all axes.
:return: A list of :math:`|max_i-min_i|` values.
"""
if axes is None:
axes = range(self._dimension)
return [self._extensions[i] for i in atleast_1d(axes)]
| [
"numpy.product",
"numpy.abs",
"numpy.atleast_1d"
] | [((1095, 1112), 'numpy.abs', 'abs', (['(l[-1] - l[0])'], {}), '(l[-1] - l[0])\n', (1098, 1112), False, 'from numpy import atleast_1d, abs, product\n'), ((1674, 1703), 'numpy.product', 'product', (['self._data.shape[1:]'], {}), '(self._data.shape[1:])\n', (1681, 1703), False, 'from numpy import atleast_1d, abs, product\n'), ((3166, 3182), 'numpy.atleast_1d', 'atleast_1d', (['axes'], {}), '(axes)\n', (3176, 3182), False, 'from numpy import atleast_1d, abs, product\n'), ((3707, 3723), 'numpy.atleast_1d', 'atleast_1d', (['axes'], {}), '(axes)\n', (3717, 3723), False, 'from numpy import atleast_1d, abs, product\n')] |
#!/usr/bin/hfo_env python3
# encoding utf-8
from datetime import date, datetime as dt
import os
import numpy as np
from matias_hfo import settings
from matias_hfo.agents.utils import ServerDownError, NoActionPlayedError
def mkdir(name: str, idx: int = None, **kwargs):
today = date.today()
name_dir = ""
# Train type:
name_dir += str(name) + "_"
# Extra arguments:
for key, value in kwargs.items():
name_dir += str(value) + str(key) + "_"
# Date:
name_dir += today.isoformat()
if isinstance(idx, int):
name_dir += "_" + str(idx)
path = os.path.join(settings.MODELS_DIR, name_dir)
try:
os.mkdir(path)
except FileExistsError:
if idx is None:
idx = 1
else:
idx += 1
mkdir(name, idx=idx, **kwargs)
return path
def save_model(q_table: str, directory: str, file_name: str):
file_path = os.path.join(directory, file_name)
np.save(file_path, q_table)
def print_transiction(arr: tuple, actions_instance, simplex=False):
""" Transcition array format
(observation space, action, reward, new observation space, done) """
def round_list(lista):
return [round(el, 2) for el in lista]
if simplex:
print("+ R:{}; Act:{} D?:{}; {};".format(
arr[2],
actions_instance.actions[arr[1]],
arr[4],
round_list(arr[0].tolist())
))
else:
print("+ D?:{}; {} -> {}; R:{}; Act:{};".format(
arr[4],
round_list(arr[0].tolist()),
round_list(arr[3].tolist()),
arr[2],
actions_instance.actions[arr[1]]
))
def check_same_model(model1, model2):
obs1 = np.array([-0.38, -0.0, -0.16, -0.89, -0.11, -1.0, -0.92, 0.2, 0.0])
obs2 = np.array([-0.28, 0.34, -0.03, -0.88, -0.13, -1.0, -0.86, -0.12, 0.0])
obs3 = np.array([0.49, 0.16, -0.23, -0.65, -0.72, 1.0, -0.61, -0.62, 0.0])
for obs in [obs1, obs2, obs3]:
state = obs[np.newaxis, :]
qs1 = model1.predict(state)
qs2 = model2.predict(state)
if qs1.all() != qs2.all():
print("Different Models: ", qs1, qs2)
return False
return True
| [
"os.mkdir",
"numpy.save",
"datetime.date.today",
"numpy.array",
"os.path.join"
] | [((293, 305), 'datetime.date.today', 'date.today', ([], {}), '()\n', (303, 305), False, 'from datetime import date, datetime as dt\n'), ((614, 657), 'os.path.join', 'os.path.join', (['settings.MODELS_DIR', 'name_dir'], {}), '(settings.MODELS_DIR, name_dir)\n', (626, 657), False, 'import os\n'), ((932, 966), 'os.path.join', 'os.path.join', (['directory', 'file_name'], {}), '(directory, file_name)\n', (944, 966), False, 'import os\n'), ((971, 998), 'numpy.save', 'np.save', (['file_path', 'q_table'], {}), '(file_path, q_table)\n', (978, 998), True, 'import numpy as np\n'), ((1747, 1814), 'numpy.array', 'np.array', (['[-0.38, -0.0, -0.16, -0.89, -0.11, -1.0, -0.92, 0.2, 0.0]'], {}), '([-0.38, -0.0, -0.16, -0.89, -0.11, -1.0, -0.92, 0.2, 0.0])\n', (1755, 1814), True, 'import numpy as np\n'), ((1826, 1895), 'numpy.array', 'np.array', (['[-0.28, 0.34, -0.03, -0.88, -0.13, -1.0, -0.86, -0.12, 0.0]'], {}), '([-0.28, 0.34, -0.03, -0.88, -0.13, -1.0, -0.86, -0.12, 0.0])\n', (1834, 1895), True, 'import numpy as np\n'), ((1907, 1974), 'numpy.array', 'np.array', (['[0.49, 0.16, -0.23, -0.65, -0.72, 1.0, -0.61, -0.62, 0.0]'], {}), '([0.49, 0.16, -0.23, -0.65, -0.72, 1.0, -0.61, -0.62, 0.0])\n', (1915, 1974), True, 'import numpy as np\n'), ((675, 689), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (683, 689), False, 'import os\n')] |
"""Generate case files"""
from argparse import ArgumentParser
from datetime import datetime
import json
import os
import numpy as np
from tqdm import trange
from tti_explorer import sensitivity
from tti_explorer.utils import ROOT_DIR
from tti_explorer.case_generator import get_generator_configs, CaseGenerator
def load_csv(pth):
return np.loadtxt(pth, dtype=int, skiprows=1, delimiter=",")
def get_output_file_name(config_name, target, i, seed):
if target is not None:
return f"{config_name}_{target}{i}_seed{seed}.json"
else:
return f"{config_name}_seed{seed}.json"
if __name__ == "__main__":
parser = ArgumentParser(description="Generate JSON files of cases and contacts")
parser.add_argument(
"config_name",
type=str,
help="Name for config of cases and contacts. Will pull from config.py.",
)
parser.add_argument(
"ncases", help="Number of cases w/ contacts to generate", type=int
)
parser.add_argument(
"output_folder",
help="Folder in which to store json files of cases and contacts",
type=str,
)
parser.add_argument(
"--seeds",
help="random seeds for each population",
default=-1,
type=int,
nargs="*",
)
parser.add_argument(
"--sensitivity",
help=(
"Method for sensitivity analysis "
"over parameters designated for sensitivity analysis in config.py. "
"Empty string does no sensitivity analysis. Default '%(default)s'."
),
default="",
type=str,
)
parser.add_argument(
"--n-pops",
help="Number of i.i.d. populations to draw. Ignored if seeds is given.",
type=int,
default=1,
)
parser.add_argument(
"--data-dir",
default=os.path.join(ROOT_DIR, "data", "bbc-pandemic"),
type=str,
help="Folder containing empirical tables of contact numbers. "
"Two files are expected: contact_distributions_o18.csv and contact_distributions_u18.csv",
)
args = parser.parse_args()
seeds = range(args.n_pops) if args.seeds == -1 else args.seeds
os.makedirs(args.output_folder, exist_ok=True)
case_configs, contacts_config = get_generator_configs(args.config_name, args.sensitivity)
over18 = load_csv(os.path.join(args.data_dir, "contact_distributions_o18.csv"))
under18 = load_csv(os.path.join(args.data_dir, "contact_distributions_u18.csv"))
for i, dct in enumerate(case_configs):
case_config = dct[sensitivity.CONFIG_KEY]
target = dct[sensitivity.TARGET_KEY]
print(target)
for seed in seeds:
case_generator = CaseGenerator(seed, over18, under18)
cases_and_contacts = list()
for _ in trange(args.ncases, smoothing=0, desc=f"Generating case set with seed {seed}."):
output = case_generator.generate_case_with_contacts(case_config, contacts_config)
cases_and_contacts.append(output)
full_output = dict(
timestamp=datetime.now().strftime("%c"),
case_config=case_config,
contacts_config=contacts_config,
args=dict(args.__dict__, seed=seed),
cases=cases_and_contacts,
)
fname = get_output_file_name(args.config_name, target, i, seed)
with open(os.path.join(args.output_folder, fname), "w") as f:
json.dump(full_output, f)
| [
"json.dump",
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"tqdm.trange",
"datetime.datetime.now",
"numpy.loadtxt",
"tti_explorer.case_generator.CaseGenerator",
"tti_explorer.case_generator.get_generator_configs"
] | [((348, 401), 'numpy.loadtxt', 'np.loadtxt', (['pth'], {'dtype': 'int', 'skiprows': '(1)', 'delimiter': '""","""'}), "(pth, dtype=int, skiprows=1, delimiter=',')\n", (358, 401), True, 'import numpy as np\n'), ((647, 718), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Generate JSON files of cases and contacts"""'}), "(description='Generate JSON files of cases and contacts')\n", (661, 718), False, 'from argparse import ArgumentParser\n'), ((2187, 2233), 'os.makedirs', 'os.makedirs', (['args.output_folder'], {'exist_ok': '(True)'}), '(args.output_folder, exist_ok=True)\n', (2198, 2233), False, 'import os\n'), ((2271, 2328), 'tti_explorer.case_generator.get_generator_configs', 'get_generator_configs', (['args.config_name', 'args.sensitivity'], {}), '(args.config_name, args.sensitivity)\n', (2292, 2328), False, 'from tti_explorer.case_generator import get_generator_configs, CaseGenerator\n'), ((2352, 2412), 'os.path.join', 'os.path.join', (['args.data_dir', '"""contact_distributions_o18.csv"""'], {}), "(args.data_dir, 'contact_distributions_o18.csv')\n", (2364, 2412), False, 'import os\n'), ((2437, 2497), 'os.path.join', 'os.path.join', (['args.data_dir', '"""contact_distributions_u18.csv"""'], {}), "(args.data_dir, 'contact_distributions_u18.csv')\n", (2449, 2497), False, 'import os\n'), ((1842, 1888), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""data"""', '"""bbc-pandemic"""'], {}), "(ROOT_DIR, 'data', 'bbc-pandemic')\n", (1854, 1888), False, 'import os\n'), ((2717, 2753), 'tti_explorer.case_generator.CaseGenerator', 'CaseGenerator', (['seed', 'over18', 'under18'], {}), '(seed, over18, under18)\n', (2730, 2753), False, 'from tti_explorer.case_generator import get_generator_configs, CaseGenerator\n'), ((2816, 2895), 'tqdm.trange', 'trange', (['args.ncases'], {'smoothing': '(0)', 'desc': 'f"""Generating case set with seed {seed}."""'}), "(args.ncases, smoothing=0, desc=f'Generating case set with seed {seed}.')\n", (2822, 2895), False, 'from tqdm import trange\n'), ((3501, 3526), 'json.dump', 'json.dump', (['full_output', 'f'], {}), '(full_output, f)\n', (3510, 3526), False, 'import json\n'), ((3433, 3472), 'os.path.join', 'os.path.join', (['args.output_folder', 'fname'], {}), '(args.output_folder, fname)\n', (3445, 3472), False, 'import os\n'), ((3104, 3118), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3116, 3118), False, 'from datetime import datetime\n')] |
"""
Testing for (Normalized) DCG metric.
"""
from . import helpers
import itertools
import numpy as np
import pyltr
class TestDCG(helpers.TestMetric):
def get_metric(self):
return pyltr.metrics.DCG(k=3)
def get_queries_with_values(self):
yield [], 0.0
yield [0], 0.0
yield [1], 1.0
yield [2], 3.0
yield [2, 1, 0], 3.6309297535714578
yield [0, 0, 0], 0.0
yield [2, 5, 1], 23.058822360715183
yield [2, 5, 1, 9], 23.058822360715183
def get_queries(self):
for i in range(0, 5):
for tup in itertools.product(*([(0, 1, 2.5)] * i)):
yield np.array(tup)
class TestNDCG(helpers.TestMetric):
def get_metric(self):
return pyltr.metrics.NDCG(k=3)
def get_queries_with_values(self):
yield [], 0.0
yield [0], 0.0
yield [1], 1.0
yield [2], 1.0
yield [2, 1, 0], 1.0
yield [1, 2, 0], 0.7967075809905066
yield [0, 0, 0], 0.0
yield [2, 5, 1], 0.6905329824556825
yield [2, 5, 1, 9], 0.04333885914794999
yield [3, 2, 1, 1], 1.0
def get_queries(self):
for i in range(0, 5):
for tup in itertools.product(*([(0, 1, 2.5)] * i)):
yield np.array(tup)
| [
"itertools.product",
"numpy.array",
"pyltr.metrics.NDCG",
"pyltr.metrics.DCG"
] | [((197, 219), 'pyltr.metrics.DCG', 'pyltr.metrics.DCG', ([], {'k': '(3)'}), '(k=3)\n', (214, 219), False, 'import pyltr\n'), ((752, 775), 'pyltr.metrics.NDCG', 'pyltr.metrics.NDCG', ([], {'k': '(3)'}), '(k=3)\n', (770, 775), False, 'import pyltr\n'), ((596, 635), 'itertools.product', 'itertools.product', (['*([(0, 1, 2.5)] * i)'], {}), '(*([(0, 1, 2.5)] * i))\n', (613, 635), False, 'import itertools\n'), ((1214, 1253), 'itertools.product', 'itertools.product', (['*([(0, 1, 2.5)] * i)'], {}), '(*([(0, 1, 2.5)] * i))\n', (1231, 1253), False, 'import itertools\n'), ((659, 672), 'numpy.array', 'np.array', (['tup'], {}), '(tup)\n', (667, 672), True, 'import numpy as np\n'), ((1277, 1290), 'numpy.array', 'np.array', (['tup'], {}), '(tup)\n', (1285, 1290), True, 'import numpy as np\n')] |
'''
Created on: see version log.
@author: rigonz
coding: utf-8
IMPORTANT: requires py3.6 (rasterio)
Script that:
1) reads a series of raster files,
2) computes aggregated statistics,
3) outputs the results.
The input data files correspond to countries and represent population.
For each country there is one file with the count of people per cell/pixel.
The input raster data is prepared separately, in specific GIS software.
The script calculates the projected area of each cell and from it the density.
The calculations are run over each country/input file.
The output is a raster file with the population density in the area of the
input rasters.
Version log.
R0 (20210506):
First trials.
R1 (20210507):
Creates a new geotiff with the densities and saves it.
Does not create plots.
'''
# %% Imports.
from pyproj import Geod
import rasterio # IMPORTANT: requires py3.6
import numpy as np
# %% Directories.
RootDirIn = 'D:/0 DOWN/zz EXTSave/GIS/POP/EUR/SHP/WP/'
# Country codes.
l_ctry = ['FRA', 'ITA', 'DEU', 'ESP']
l_ctry = ['ITA', 'DEU', 'ESP']
#l_ctry = ['FRA']
# %% Read and compute.
# Auxiliaries:
geod = Geod('+a=6378137 +f=0.0033528106647475126')
# Main loop:
for ctry in l_ctry:
# Update the user with the progress:
print('\nStarting {}.'.format(ctry))
# Open file and read data:
print('Opening and reading the data files...')
FileNameI = RootDirIn + ctry + '_ppp_2020_UNadj_constrained.tif'
dataset = rasterio.open(FileNameI)
band_c = dataset.read(1)
# Check CRS:
try:
if dataset.crs.data['init'] != 'epsg:4326':
print('WARNING: CRS is not EPSG4326 for {}.'.format(ctry))
except:
print('WARNING: CRS is not available for {}.'.format(ctry))
# Calculate areas:
print('Calculating the densities...')
dlon = dataset.transform[0] # increase in lon between adjacent cells; dataset.res[0]
dlat = dataset.transform[4] # increase in lat between adjacent cells; dataset.res[1]
lon0 = dataset.transform[2] # lon of upper left cell
lat0 = dataset.transform[5] # lat of upper left cell
band_d = np.full_like(band_c, -99999)
count = int(dataset.shape[0]/20)
for i in range(0, dataset.shape[0] - 1, 1):
for j in range(0, dataset.shape[1] - 1, 1):
if band_c[i, j] > 0:
lons = [lon0 + dlon*j, lon0 + dlon*j, lon0 + dlon*(j+1), lon0 + dlon*(j+1)]
lats = [lat0 + dlat*i, lat0 + dlat*(i+1), lat0 + dlat*(i+1), lat0 + dlat*i]
area, perim = geod.polygon_area_perimeter(lons, lats)
band_d[i, j] = band_c[i, j] / area * 1E6 # area in m2, d in hab/km2
# Update the user with the progress:
if i % count == 0:
print('PopDens_{}: {:4.1f}%'.format(ctry, i/dataset.shape[0]*100))
# Save the results:
print('Saving the results...')
FileNameO = FileNameI.replace('.tif', '_d.tif')
with rasterio.open(FileNameO,'w', driver='GTiff',
height=band_d.shape[0], width=band_d.shape[1], count=1,
dtype=band_d.dtype, crs=dataset.crs,
transform=dataset.transform, compress='lzw') as dataset_d:
dataset_d.write(band_d, 1)
# %% Script done.
print('\nScript completed. Thanks!')
| [
"numpy.full_like",
"rasterio.open",
"pyproj.Geod"
] | [((1126, 1169), 'pyproj.Geod', 'Geod', (['"""+a=6378137 +f=0.0033528106647475126"""'], {}), "('+a=6378137 +f=0.0033528106647475126')\n", (1130, 1169), False, 'from pyproj import Geod\n'), ((1453, 1477), 'rasterio.open', 'rasterio.open', (['FileNameI'], {}), '(FileNameI)\n', (1466, 1477), False, 'import rasterio\n'), ((2121, 2149), 'numpy.full_like', 'np.full_like', (['band_c', '(-99999)'], {}), '(band_c, -99999)\n', (2133, 2149), True, 'import numpy as np\n'), ((2933, 3125), 'rasterio.open', 'rasterio.open', (['FileNameO', '"""w"""'], {'driver': '"""GTiff"""', 'height': 'band_d.shape[0]', 'width': 'band_d.shape[1]', 'count': '(1)', 'dtype': 'band_d.dtype', 'crs': 'dataset.crs', 'transform': 'dataset.transform', 'compress': '"""lzw"""'}), "(FileNameO, 'w', driver='GTiff', height=band_d.shape[0], width\n =band_d.shape[1], count=1, dtype=band_d.dtype, crs=dataset.crs,\n transform=dataset.transform, compress='lzw')\n", (2946, 3125), False, 'import rasterio\n')] |
# -*- coding: utf-8 -*-
# /usr/bin/python2
'''
June 2017 by <NAME>.
<EMAIL>.
https://www.github.com/kyubyong/neurobind
'''
from __future__ import print_function
import re
from hyperparams import Hyperparams as hp
import numpy as np
import tensorflow as tf
def load_vocab():
vocab = "ACGT"
nucl2idx = {nucl: idx for idx, nucl in enumerate(vocab)}
idx2nucl = {idx: nucl for idx, nucl in enumerate(vocab)}
return nucl2idx, idx2nucl
def load_data(mode="train"):
nucl2idx, idx2nucl = load_vocab()
def to_idx(probe):
return [nucl2idx[nucl] for nucl in probe.strip()]
xs, ys = [], []
f = hp.train if mode in ("train", "val") else hp.test
for line in open(f, "r").read().splitlines()[1:]:
probe, intensity = line.split("\t")
try:
x = to_idx(line.split("\t")[0])
y = float(intensity.split(".")[0])
except:
continue
xs.append(x)
ys.append(y)
X = np.array(xs, np.int32)
Y = np.array(ys, np.float32)
if mode == "test":
return X, Y
elif mode == "train":
return X[:int(len(X)*.7)], Y[:int(len(X)*.7)]
elif mode == "val":
return X[int(len(X)*.7):], Y[int(len(X)*.7):]
else:
raise ValueError("Mode must either `train`, `val`, or `test`.")
def get_batch_data():
# Load data
X, Y = load_data()
# calc total batch count
num_batch = len(X) // hp.batch_size
# Convert to tensor
X = tf.convert_to_tensor(X, tf.int32)
Y = tf.convert_to_tensor(Y, tf.float32)
# Create Queues
input_queues = tf.train.slice_input_producer([X, Y])
# create batch queues
x, y = tf.train.batch(input_queues,
num_threads=8,
batch_size=hp.batch_size,
capacity=hp.batch_size * 64,
allow_smaller_final_batch=False)
return x, y, num_batch # (N, T), (N, T), ()
| [
"tensorflow.convert_to_tensor",
"tensorflow.train.batch",
"numpy.array",
"tensorflow.train.slice_input_producer"
] | [((969, 991), 'numpy.array', 'np.array', (['xs', 'np.int32'], {}), '(xs, np.int32)\n', (977, 991), True, 'import numpy as np\n'), ((1000, 1024), 'numpy.array', 'np.array', (['ys', 'np.float32'], {}), '(ys, np.float32)\n', (1008, 1024), True, 'import numpy as np\n'), ((1474, 1507), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['X', 'tf.int32'], {}), '(X, tf.int32)\n', (1494, 1507), True, 'import tensorflow as tf\n'), ((1516, 1551), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Y', 'tf.float32'], {}), '(Y, tf.float32)\n', (1536, 1551), True, 'import tensorflow as tf\n'), ((1592, 1629), 'tensorflow.train.slice_input_producer', 'tf.train.slice_input_producer', (['[X, Y]'], {}), '([X, Y])\n', (1621, 1629), True, 'import tensorflow as tf\n'), ((1668, 1803), 'tensorflow.train.batch', 'tf.train.batch', (['input_queues'], {'num_threads': '(8)', 'batch_size': 'hp.batch_size', 'capacity': '(hp.batch_size * 64)', 'allow_smaller_final_batch': '(False)'}), '(input_queues, num_threads=8, batch_size=hp.batch_size,\n capacity=hp.batch_size * 64, allow_smaller_final_batch=False)\n', (1682, 1803), True, 'import tensorflow as tf\n')] |
"""
pysteps.postprocessing.ensemblestats
====================================
Methods for the computation of ensemble statistics.
.. autosummary::
:toctree: ../generated/
mean
excprob
"""
import numpy as np
def mean(X, ignore_nan=False, X_thr=None):
"""Compute the mean value from a forecast ensemble field.
Parameters
----------
X : array_like
Array of shape (n_members,m,n) containing an ensemble of forecast
fields of shape (m,n).
ignore_nan : bool
If True, ignore nan values.
X_thr : float
Optional threshold for computing the ensemble mean. Values below X_thr
are ignored.
Returns
-------
out : ndarray
Array of shape (m,n) containing the ensemble mean.
"""
X = np.asanyarray(X)
X_ndim = X.ndim
if X_ndim > 3 or X_ndim <= 1:
raise Exception('Number of dimensions of X should be 2 or 3.' +
'It was: {}'.format(X_ndim))
elif X.ndim == 2:
X = X[None, ...]
if ignore_nan or X_thr is not None:
if X_thr is not None:
X = X.copy()
X[X < X_thr] = np.nan
return np.nanmean(X, axis=0)
else:
return np.mean(X, axis=0)
def excprob(X, X_thr, ignore_nan=False):
"""For a given forecast ensemble field, compute exceedance probabilities
for the given intensity thresholds.
Parameters
----------
X : array_like
Array of shape (k,m,n,...) containing an k-member ensemble of forecasts
with shape (m,n,...).
X_thr : float or a sequence of floats
Intensity threshold(s) for which the exceedance probabilities are
computed.
ignore_nan : bool
If True, ignore nan values.
Returns
-------
out : ndarray
Array of shape (len(X_thr),m,n) containing the exceedance probabilities
for the given intensity thresholds.
If len(X_thr)=1, the first dimension is dropped.
"""
# Checks
X = np.asanyarray(X)
X_ndim = X.ndim
if X_ndim < 3:
raise Exception('Number of dimensions of X should be 3 or more.' +
' It was: {}'.format(X_ndim))
P = []
if np.isscalar(X_thr):
X_thr = [X_thr]
scalar_thr = True
else:
scalar_thr = False
for x in X_thr:
X_ = X.copy()
X_[X >= x] = 1.0
X_[X < x] = 0.0
if ignore_nan:
P.append(np.nanmean(X_, axis=0))
else:
P.append(np.mean(X_, axis=0))
if not scalar_thr:
return np.stack(P)
else:
return P[0]
| [
"numpy.stack",
"numpy.isscalar",
"numpy.asanyarray",
"numpy.mean",
"numpy.nanmean"
] | [((780, 796), 'numpy.asanyarray', 'np.asanyarray', (['X'], {}), '(X)\n', (793, 796), True, 'import numpy as np\n'), ((2002, 2018), 'numpy.asanyarray', 'np.asanyarray', (['X'], {}), '(X)\n', (2015, 2018), True, 'import numpy as np\n'), ((2208, 2226), 'numpy.isscalar', 'np.isscalar', (['X_thr'], {}), '(X_thr)\n', (2219, 2226), True, 'import numpy as np\n'), ((1170, 1191), 'numpy.nanmean', 'np.nanmean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1180, 1191), True, 'import numpy as np\n'), ((1217, 1235), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1224, 1235), True, 'import numpy as np\n'), ((2572, 2583), 'numpy.stack', 'np.stack', (['P'], {}), '(P)\n', (2580, 2583), True, 'import numpy as np\n'), ((2453, 2475), 'numpy.nanmean', 'np.nanmean', (['X_'], {'axis': '(0)'}), '(X_, axis=0)\n', (2463, 2475), True, 'import numpy as np\n'), ((2512, 2531), 'numpy.mean', 'np.mean', (['X_'], {'axis': '(0)'}), '(X_, axis=0)\n', (2519, 2531), True, 'import numpy as np\n')] |
# TODO: avoid useless comparisons
from collections import defaultdict
from numbers import Number
from typing import Dict, List, Union
import numba as nb
import numpy as np
from numba import set_num_threads
from .frozenset_dict import FrozensetDict
from .metrics import (
average_precision,
hits,
ndcg,
ndcg_burges,
precision,
r_precision,
recall,
reciprocal_rank,
)
from .qrels import Qrels
from .report import Report
from .run import Run
from .statistical_testing import fisher_randomization_test
from .utils import python_dict_to_typed_list
def metric_functions_switch(metric):
if metric == "hits":
return hits
elif metric == "precision":
return precision
elif metric == "recall":
return recall
elif metric == "r-precision":
return r_precision
elif metric == "mrr":
return reciprocal_rank
elif metric == "map":
return average_precision
elif metric == "ndcg":
return ndcg
elif metric == "ndcg_burges":
return ndcg_burges
else:
raise ValueError(
f"Metric {metric} not supported. Supported metrics are `hits`, `precision`, `recall`, `r-precision`, `mrr`, `map`, `ndcg`, and `ndcg_burges`."
)
def format_metrics(metrics: Union[List[str], str]) -> List[str]:
if type(metrics) == str:
metrics = [metrics]
return metrics
def format_k(k: Union[List[int], int], metrics: List[str]) -> Dict[str, int]:
if type(k) == int:
return {m: k for m in metrics}
elif type(k) == list:
return {m: k[i] for i, m in enumerate(list(metrics))}
return k
def extract_metric_and_k(metric):
metric_splitted = metric.split("@")
m = metric_splitted[0]
k = int(metric_splitted[1]) if len(metric_splitted) > 1 else 0
return m, k
def convert_qrels(qrels):
if type(qrels) == Qrels:
return qrels.to_typed_list()
elif type(qrels) == dict:
return python_dict_to_typed_list(qrels, sort=True)
return qrels
def convert_run(run):
if type(run) == Run:
return run.to_typed_list()
elif type(run) == dict:
return python_dict_to_typed_list(run, sort=True)
return run
def check_keys(qrels, run):
assert qrels.keys() == run.keys(), "Qrels and Run query ids do not match"
def evaluate(
qrels: Union[
Qrels,
Dict[str, Dict[str, Number]],
nb.typed.typedlist.List,
np.ndarray,
],
run: Union[
Run,
Dict[str, Dict[str, Number]],
nb.typed.typedlist.List,
np.ndarray,
],
metrics: Union[List[str], str],
return_mean: bool = True,
threads: int = 0,
save_results_in_run=True,
) -> Union[Dict[str, float], float]:
"""Compute performance scores for all the provided metrics."""
if len(qrels) < 10:
set_num_threads(1)
elif threads != 0:
set_num_threads(threads)
if type(qrels) in [Qrels, dict] and type(run) in [Run, dict]:
check_keys(qrels, run)
_qrels = convert_qrels(qrels)
_run = convert_run(run)
metrics = format_metrics(metrics)
assert all(type(m) == str for m in metrics), "Metrics error"
# Compute metrics ----------------------------------------------------------
metric_scores_dict = {}
for metric in metrics:
m, k = extract_metric_and_k(metric)
metric_scores_dict[metric] = metric_functions_switch(m)(
_qrels,
_run,
k=k,
)
# Save results in Run ------------------------------------------------------
if type(run) == Run and save_results_in_run:
for m, scores in metric_scores_dict.items():
run.mean_scores[m] = np.mean(scores)
for i, q_id in enumerate(run.get_query_ids()):
run.scores[m][q_id] = scores[i]
# Prepare output -----------------------------------------------------------
if return_mean:
for m, scores in metric_scores_dict.items():
metric_scores_dict[m] = np.mean(scores)
if len(metrics) == 1:
return metric_scores_dict[m]
return metric_scores_dict
def compute_statistical_significance(
control_metric_scores,
treatment_metric_scores,
n_permutations: int = 1000,
max_p: float = 0.01,
random_seed: int = 42,
):
metric_p_values = {}
for m in list(control_metric_scores):
(
control_mean,
treatment_mean,
p_value,
significant,
) = fisher_randomization_test(
control_metric_scores[m],
treatment_metric_scores[m],
n_permutations,
max_p,
random_seed,
)
metric_p_values[m] = {
"p_value": p_value,
"significant": significant,
}
return metric_p_values
def compare(
qrels: Qrels,
runs: List[Run],
metrics: Union[List[str], str],
n_permutations: int = 1000,
max_p: float = 0.01,
random_seed: int = 42,
threads: int = 0,
):
metrics = format_metrics(metrics)
assert all(type(m) == str for m in metrics), "Metrics error"
model_names = []
results = defaultdict(dict)
comparisons = FrozensetDict()
metric_scores = {}
# Compute scores for each run for each query -------------------------------
for run in runs:
model_names.append(run.name)
metric_scores[run.name] = evaluate(
qrels=qrels,
run=run,
metrics=metrics,
return_mean=False,
threads=threads,
)
for m in metrics:
results[run.name][m] = np.mean(metric_scores[run.name][m])
# Run statistical testing --------------------------------------------------
for i, control in enumerate(runs):
control_metric_scores = metric_scores[control.name]
for j, treatment in enumerate(runs):
if i < j:
treatment_metric_scores = metric_scores[treatment.name]
# Compute statistical significance
comparisons[
frozenset([control.name, treatment.name])
] = compute_statistical_significance(
control_metric_scores,
treatment_metric_scores,
n_permutations,
max_p,
random_seed,
)
# Compute win / tie / lose -------------------------------------------------
win_tie_loss = defaultdict(dict)
for control in runs:
for treatment in runs:
for m in metrics:
control_scores = metric_scores[control.name][m]
treatment_scores = metric_scores[treatment.name][m]
win_tie_loss[(control.name, treatment.name)][m] = {
"W": sum(control_scores > treatment_scores),
"T": sum(control_scores == treatment_scores),
"L": sum(control_scores < treatment_scores),
}
return Report(
model_names=model_names,
results=dict(results),
comparisons=comparisons,
metrics=metrics,
max_p=max_p,
win_tie_loss=dict(win_tie_loss),
)
| [
"collections.defaultdict",
"numba.set_num_threads",
"numpy.mean"
] | [((5182, 5199), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (5193, 5199), False, 'from collections import defaultdict\n'), ((6504, 6521), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (6515, 6521), False, 'from collections import defaultdict\n'), ((2851, 2869), 'numba.set_num_threads', 'set_num_threads', (['(1)'], {}), '(1)\n', (2866, 2869), False, 'from numba import set_num_threads\n'), ((2901, 2925), 'numba.set_num_threads', 'set_num_threads', (['threads'], {}), '(threads)\n', (2916, 2925), False, 'from numba import set_num_threads\n'), ((3718, 3733), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (3725, 3733), True, 'import numpy as np\n'), ((4032, 4047), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (4039, 4047), True, 'import numpy as np\n'), ((5648, 5683), 'numpy.mean', 'np.mean', (['metric_scores[run.name][m]'], {}), '(metric_scores[run.name][m])\n', (5655, 5683), True, 'import numpy as np\n')] |
"""Helper module for calculating the live activation counts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from morph_net.framework import op_regularizer_manager as orm
import numpy as np
import tensorflow as tf
from typing import Text, Sequence, Dict, Optional, IO, Iterable, Callable
_SUPPORTED_OPS = ['Conv2D']
_ALIVE_FILENAME = 'alive'
def compute_alive_counts(
alive_vectors: Dict[Text, Sequence[bool]]) -> Dict[Text, int]:
"""Computes alive counts.
Args:
alive_vectors: A mapping from op_name to a vector where each element says
whether the corresponding output activation is alive.
Returns:
Mapping from op_name to the number of its alive output activations.
"""
return {
op_name: int(np.sum(alive_vector))
for op_name, alive_vector in alive_vectors.items()
}
class StructureExporter(object):
"""Reports statistics about the current state of regularization.
Obtains live activation counts for supported ops: a map from each op name
to its count of alive activations (filters). Optionally, thresholds the counts
so that very low counts are reported as 0. Currently, only supports Conv2D.
"""
def __init__(self,
op_regularizer_manager: orm.OpRegularizerManager,
remove_common_prefix: bool = False) -> None:
"""Build a StructureExporter object.
Args:
op_regularizer_manager: An OpRegularizerManager, an object that contains
info about every op we care about and its corresponding regularizer.
remove_common_prefix: A bool. If True, determine if all op names start
with the same prefix (up to and including the first '/'), and if so,
skip that prefix in exported data.
"""
self._op_regularizer_manager = op_regularizer_manager
self._alive_tensors = {} # type: Dict[Text, tf.Tensor]
self._alive_vectors = None # type: Optional[Dict[Text, Sequence[bool]]]
for op in self._op_regularizer_manager.ops:
if op.type not in _SUPPORTED_OPS:
continue
opreg = self._op_regularizer_manager.get_regularizer(op)
if opreg:
# TODO(p1): use bool here (no cast), and then convert later?
self._alive_tensors[op.name] = tf.cast(opreg.alive_vector, tf.int32)
else:
tf.logging.warning('No regularizer found for: %s', op.name)
if remove_common_prefix:
rename_op = get_remove_common_prefix_op(self._alive_tensors)
self._alive_tensors = {
rename_op(k): v for k, v in self._alive_tensors.items()
}
@property
def tensors(self):
"""The list of tensors required to compute statistics.
Returns:
Dict: op name -> alive vector tensor
"""
return self._alive_tensors
def populate_tensor_values(self, values: Dict[Text, Sequence[bool]]) -> None:
# TODO(p1): make this a hierarchy with 'alive_vectors' key at the top
assert sorted(values) == sorted(self.tensors)
self._alive_vectors = values
def get_alive_counts(self) -> Dict[Text, int]:
"""Computes alive counts.
populate_tensor_values() must have been called earlier.
Returns:
A dict {op_name: alive_count}, alive_count is a scalar integer tf.Tensor.
Raises:
RuntimeError: tensor values not populated.
"""
if self._alive_vectors is None:
raise RuntimeError('Tensor values not populated.')
# TODO(p1): consider warning if same values are used twice?
return compute_alive_counts(self._alive_vectors)
def save_alive_counts(self, f: IO[bytes]) -> None:
"""Saves live counts to a file.
Args:
f: a file object where alive counts are saved.
"""
f.write(
json.dumps(
self.get_alive_counts(), indent=2, sort_keys=True, default=str))
def create_file_and_save_alive_counts(self, train_dir: Text,
global_step: tf.Tensor) -> None:
"""Creates a file and saves live counts to it.
Creates the directory {train_dir}/learned_structure/ and saves the current
alive counts to {path}/{_ALIVE_FILENAME}_{global_step} and overwrites
{path}/{_ALIVE_FILENAME}.
Args:
train_dir: where to export the alive counts.
global_step: current value of global step, used as a suffix in filename.
"""
current_filename = '%s_%s' % (_ALIVE_FILENAME, global_step)
directory = os.path.join(train_dir, 'learned_structure')
try:
tf.gfile.MkDir(directory)
except tf.errors.OpError:
# Probably already exists. If not, we'll see the error in the next line.
pass
with tf.gfile.Open(os.path.join(directory, current_filename), 'w') as f:
self.save_alive_counts(f)
with tf.gfile.Open(os.path.join(directory, _ALIVE_FILENAME), 'w') as f:
self.save_alive_counts(f)
# TODO(p1): maybe check that we still end up with unique names after prefix
# removal, and do nothing if that's not the case?
def get_remove_common_prefix_op(
iterable: Iterable[Text]) -> Callable[[Text], Text]:
"""Obtains a function that removes common prefix.
Determines if all items in iterable start with the same substring (up to and
including the first '/'). If so, returns a function str->str that removes
the prefix of matching length. Otherwise returns identity function.
Args:
iterable: strings to process.
Returns:
A function that removes the common prefix from a string.
"""
try:
first = next(iter(iterable))
except StopIteration:
return lambda x: x
separator_index = first.find('/')
if separator_index == -1:
return lambda x: x
prefix = first[:separator_index + 1]
if not all(k.startswith(prefix) for k in iterable):
return lambda x: x
return lambda item: item[len(prefix):]
| [
"numpy.sum",
"tensorflow.logging.warning",
"tensorflow.cast",
"tensorflow.gfile.MkDir",
"os.path.join"
] | [((4432, 4476), 'os.path.join', 'os.path.join', (['train_dir', '"""learned_structure"""'], {}), "(train_dir, 'learned_structure')\n", (4444, 4476), False, 'import os\n'), ((822, 842), 'numpy.sum', 'np.sum', (['alive_vector'], {}), '(alive_vector)\n', (828, 842), True, 'import numpy as np\n'), ((4492, 4517), 'tensorflow.gfile.MkDir', 'tf.gfile.MkDir', (['directory'], {}), '(directory)\n', (4506, 4517), True, 'import tensorflow as tf\n'), ((2297, 2334), 'tensorflow.cast', 'tf.cast', (['opreg.alive_vector', 'tf.int32'], {}), '(opreg.alive_vector, tf.int32)\n', (2304, 2334), True, 'import tensorflow as tf\n'), ((2355, 2414), 'tensorflow.logging.warning', 'tf.logging.warning', (['"""No regularizer found for: %s"""', 'op.name'], {}), "('No regularizer found for: %s', op.name)\n", (2373, 2414), True, 'import tensorflow as tf\n'), ((4661, 4702), 'os.path.join', 'os.path.join', (['directory', 'current_filename'], {}), '(directory, current_filename)\n', (4673, 4702), False, 'import os\n'), ((4770, 4810), 'os.path.join', 'os.path.join', (['directory', '_ALIVE_FILENAME'], {}), '(directory, _ALIVE_FILENAME)\n', (4782, 4810), False, 'import os\n')] |
from abc import ABC
import numpy as np
from shared_utils.constants import EMPTY_ARRAY, EMPTY_ARRAY_2D
class RKStep(ABC):
"""
Butcher - 2016 - NUMERICAL METHODS FOR ORDINARY DIFFERENTIAL EQUATIONS - pg 98
c | A
----------
| b^T
A - dependence of the stages on the derivatives found at other stages
b - vector of quadrature weights
c - positions within step
"""
a_mat = EMPTY_ARRAY_2D
b = EMPTY_ARRAY
def __init__(self):
self.c = np.array(sum(a_j for a_j in self.a_mat.T))
def stability_region_func(self, z: complex):
"""Butcher - 2016 - NUMERICAL METHODS FOR ORDINARY DIFFERENTIAL EQUATIONS - pg 98 """
return 1. + z * np.sum(self.b @ np.linalg.inv(np.eye(*self.a_mat.shape) - z * self.a_mat))
def plot_stability_region(self, n=150, max_len=10, max_root_steps=10):
pass
| [
"numpy.eye"
] | [((738, 763), 'numpy.eye', 'np.eye', (['*self.a_mat.shape'], {}), '(*self.a_mat.shape)\n', (744, 763), True, 'import numpy as np\n')] |
''' Analysis of trained RNN '''
import sys
import random
import numpy as np
from scipy import stats
import tensorflow as tf
from sklearn.metrics import r2_score
from sklearn.decomposition import FastICA, PCA
import matplotlib.pyplot as plt
def lstm_states(sess, cell, x, dtype=tf.float32):
''' Get LSTM states at all time steps '''
batch_size, num_steps, input_size = x.shape
c_size, h_size = cell.state_size
curr_x = tf.placeholder(dtype, [None, input_size], name='curr_x')
curr_c = tf.placeholder(dtype, [None, c_size], name='curr_c')
curr_h = tf.placeholder(dtype, [None, h_size], name='curr_h')
_, new_state = cell(curr_x, [curr_c, curr_h])
c_state = np.random.uniform(-1, 1, (batch_size, c_size))
h_state = np.random.uniform(-1, 1, (batch_size, h_size))
c_states = []
h_states = []
for j in range(num_steps):
feed_dict = {curr_x: x[:, j, :], curr_c: c_state, curr_h: h_state}
(new_c_state, new_h_state) = sess.run(new_state, feed_dict=feed_dict)
c_states.append(new_c_state)
h_states.append(new_h_state)
c_state = new_c_state
h_state = new_h_state
c_states = np.stack(c_states, axis=1)
h_states = np.stack(h_states, axis=1)
return c_states, h_states
def component_analysis(states, n_components):
''' PCA and ICA of states
Return the ratios of variance of each component.
'''
batch_size, num_steps, state_size = states.shape
states_ = states.reshape((-1, state_size))
pca = PCA(n_components=n_components)
ica = FastICA(n_components=n_components)
pca.fit(states_)
ica.fit(states_)
# Orthogonal the unmixing vectors
w = ica.components_
w /= np.linalg.norm(w, axis=1)[:, None]
ica_comp_var = np.var(states_.dot(w.T), axis=0)
ica_comp_var = np.sort(ica_comp_var)[::-1]
return pca.explained_variance_ratio_, ica_comp_var / sum(ica_comp_var)
def plot_component_analysis(pca_var, ica_var):
''' Plot the ratios of variances of components '''
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
ax[0].plot(pca_var, label='PCA', alpha=.4, linestyle='--')
ax[0].plot(ica_var, label='ICA')
ax[0].legend()
ax[0].grid(True)
ax[0].set_xlabel('Component')
ax[0].set_ylabel('Ratio')
ax[0].set_title('Ratio of Variance of Component')
ax[1].plot(np.cumsum(pca_var), label='PCA', alpha=.4, linestyle='--')
ax[1].plot(np.cumsum(ica_var), label='ICA')
ax[1].legend()
ax[1].grid(True)
ax[1].set_xlabel('Component')
ax[1].set_ylabel('Cumulative Ratio')
ax[1].set_title('Cumulative Ratio of Variance of Component')
def build_mlp_graph(n_features, n_hidden, y_type,
variable_scope, dtype=tf.float32):
''' Build graph for MLP regressor '''
with tf.variable_scope(variable_scope):
x = tf.placeholder(dtype, [None, n_features], name='mlp_x')
y = tf.placeholder(dtype, [None], name='mlp_y')
y_pred_dim = 1 if y_type.lower().startswith('n') else 2
laplacian = tf.distributions.Laplace(0.0, 1.0)
w1_init = laplacian.sample([n_features, n_hidden])
w2_init = laplacian.sample([n_hidden, y_pred_dim])
w1 = tf.get_variable('w1', dtype=dtype, initializer=w1_init)
b1 = tf.get_variable('b1', [n_hidden], dtype)
h1 = tf.tanh(tf.nn.xw_plus_b(x, w1, b1))
w2 = tf.get_variable('w2', dtype=dtype, initializer=w2_init)
b2 = tf.get_variable('b2', [y_pred_dim], dtype)
y_pred = tf.squeeze(tf.nn.xw_plus_b(h1, w2, b2))
if y_type.lower().startswith('n'): # numerical
# loss = tf.losses.mean_squared_error(y, y_pred)
loss = .5 * (y - y_pred) - tf.minimum(y - y_pred, 0)
loss = tf.reduce_mean(loss)
else: # categorical
loss = tf.losses.softmax_cross_entropy(
tf.stack([y, 1 - y], axis=1), y_pred)
return {'mlp_x': x, 'mlp_y': y, 'y_pred': y_pred, 'loss': loss,
'w1': w1, 'b1': b1}
def mlp_reg(sess, x, y, y_type, n_hidden, variable_scope, dtype=tf.float32,
batch_size=50, lr0=3e-3, lr_decay=(50, .99), n_iter=500,
print_every=5):
''' Perform MLP regression '''
p = x.shape[-1]
symbols = build_mlp_graph(p, n_hidden, y_type, variable_scope, dtype)
with tf.variable_scope(variable_scope):
global_step = tf.Variable(0, name='step', trainable=False)
learning_rate = tf.train.exponential_decay(lr0, global_step,
lr_decay[0], lr_decay[1])
optimizer = (tf.train.MomentumOptimizer(learning_rate, momentum=.5)
.minimize(symbols['loss'], global_step=global_step))
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=variable_scope)
sess.run(tf.variables_initializer(var_list))
try:
for i in range(n_iter):
msg = f'Iter {i}'
batch = random.sample(range(x.shape[0]), batch_size)
# Run SGD
feed_dict = {symbols['mlp_x']: x[batch].reshape((-1, p)),
symbols['mlp_y']: y[batch].ravel()}
_, y_pred, loss = sess.run([optimizer, symbols['y_pred'],
symbols['loss']],
feed_dict=feed_dict)
msg += f' Train loss {loss:.4f}'
if y_type.lower().startswith('n'):
r2 = r2_score(y[batch].ravel(), y_pred)
msg += f' R2 {r2}'
correl = stats.spearmanr(y[batch].ravel(), y_pred)
msg += f' Spearman R {correl}'
if i % print_every == 0:
print(msg, file=sys.stderr)
except KeyboardInterrupt:
pass
return symbols
| [
"tensorflow.get_collection",
"tensorflow.variables_initializer",
"tensorflow.Variable",
"numpy.linalg.norm",
"tensorflow.get_variable",
"tensorflow.variable_scope",
"tensorflow.minimum",
"tensorflow.stack",
"tensorflow.placeholder",
"numpy.cumsum",
"matplotlib.pyplot.subplots",
"numpy.stack",
... | [((437, 493), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', '[None, input_size]'], {'name': '"""curr_x"""'}), "(dtype, [None, input_size], name='curr_x')\n", (451, 493), True, 'import tensorflow as tf\n'), ((507, 559), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', '[None, c_size]'], {'name': '"""curr_c"""'}), "(dtype, [None, c_size], name='curr_c')\n", (521, 559), True, 'import tensorflow as tf\n'), ((573, 625), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', '[None, h_size]'], {'name': '"""curr_h"""'}), "(dtype, [None, h_size], name='curr_h')\n", (587, 625), True, 'import tensorflow as tf\n'), ((692, 738), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(batch_size, c_size)'], {}), '(-1, 1, (batch_size, c_size))\n', (709, 738), True, 'import numpy as np\n'), ((753, 799), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(batch_size, h_size)'], {}), '(-1, 1, (batch_size, h_size))\n', (770, 799), True, 'import numpy as np\n'), ((1169, 1195), 'numpy.stack', 'np.stack', (['c_states'], {'axis': '(1)'}), '(c_states, axis=1)\n', (1177, 1195), True, 'import numpy as np\n'), ((1211, 1237), 'numpy.stack', 'np.stack', (['h_states'], {'axis': '(1)'}), '(h_states, axis=1)\n', (1219, 1237), True, 'import numpy as np\n'), ((1524, 1554), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (1527, 1554), False, 'from sklearn.decomposition import FastICA, PCA\n'), ((1565, 1599), 'sklearn.decomposition.FastICA', 'FastICA', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (1572, 1599), False, 'from sklearn.decomposition import FastICA, PCA\n'), ((2042, 2077), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 4)'}), '(1, 2, figsize=(10, 4))\n', (2054, 2077), True, 'import matplotlib.pyplot as plt\n'), ((4788, 4858), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'variable_scope'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=variable_scope)\n', (4805, 4858), True, 'import tensorflow as tf\n'), ((1715, 1740), 'numpy.linalg.norm', 'np.linalg.norm', (['w'], {'axis': '(1)'}), '(w, axis=1)\n', (1729, 1740), True, 'import numpy as np\n'), ((1821, 1842), 'numpy.sort', 'np.sort', (['ica_comp_var'], {}), '(ica_comp_var)\n', (1828, 1842), True, 'import numpy as np\n'), ((2353, 2371), 'numpy.cumsum', 'np.cumsum', (['pca_var'], {}), '(pca_var)\n', (2362, 2371), True, 'import numpy as np\n'), ((2428, 2446), 'numpy.cumsum', 'np.cumsum', (['ica_var'], {}), '(ica_var)\n', (2437, 2446), True, 'import numpy as np\n'), ((2802, 2835), 'tensorflow.variable_scope', 'tf.variable_scope', (['variable_scope'], {}), '(variable_scope)\n', (2819, 2835), True, 'import tensorflow as tf\n'), ((2849, 2904), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', '[None, n_features]'], {'name': '"""mlp_x"""'}), "(dtype, [None, n_features], name='mlp_x')\n", (2863, 2904), True, 'import tensorflow as tf\n'), ((2917, 2960), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', '[None]'], {'name': '"""mlp_y"""'}), "(dtype, [None], name='mlp_y')\n", (2931, 2960), True, 'import tensorflow as tf\n'), ((3055, 3089), 'tensorflow.distributions.Laplace', 'tf.distributions.Laplace', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (3079, 3089), True, 'import tensorflow as tf\n'), ((3222, 3277), 'tensorflow.get_variable', 'tf.get_variable', (['"""w1"""'], {'dtype': 'dtype', 'initializer': 'w1_init'}), "('w1', dtype=dtype, initializer=w1_init)\n", (3237, 3277), True, 'import tensorflow as tf\n'), ((3291, 3331), 'tensorflow.get_variable', 'tf.get_variable', (['"""b1"""', '[n_hidden]', 'dtype'], {}), "('b1', [n_hidden], dtype)\n", (3306, 3331), True, 'import tensorflow as tf\n'), ((3395, 3450), 'tensorflow.get_variable', 'tf.get_variable', (['"""w2"""'], {'dtype': 'dtype', 'initializer': 'w2_init'}), "('w2', dtype=dtype, initializer=w2_init)\n", (3410, 3450), True, 'import tensorflow as tf\n'), ((3464, 3506), 'tensorflow.get_variable', 'tf.get_variable', (['"""b2"""', '[y_pred_dim]', 'dtype'], {}), "('b2', [y_pred_dim], dtype)\n", (3479, 3506), True, 'import tensorflow as tf\n'), ((4374, 4407), 'tensorflow.variable_scope', 'tf.variable_scope', (['variable_scope'], {}), '(variable_scope)\n', (4391, 4407), True, 'import tensorflow as tf\n'), ((4431, 4475), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""step"""', 'trainable': '(False)'}), "(0, name='step', trainable=False)\n", (4442, 4475), True, 'import tensorflow as tf\n'), ((4500, 4570), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['lr0', 'global_step', 'lr_decay[0]', 'lr_decay[1]'], {}), '(lr0, global_step, lr_decay[0], lr_decay[1])\n', (4526, 4570), True, 'import tensorflow as tf\n'), ((4905, 4939), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['var_list'], {}), '(var_list)\n', (4929, 4939), True, 'import tensorflow as tf\n'), ((3353, 3379), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['x', 'w1', 'b1'], {}), '(x, w1, b1)\n', (3368, 3379), True, 'import tensorflow as tf\n'), ((3536, 3563), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['h1', 'w2', 'b2'], {}), '(h1, w2, b2)\n', (3551, 3563), True, 'import tensorflow as tf\n'), ((3769, 3789), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (3783, 3789), True, 'import tensorflow as tf\n'), ((3724, 3749), 'tensorflow.minimum', 'tf.minimum', (['(y - y_pred)', '(0)'], {}), '(y - y_pred, 0)\n', (3734, 3749), True, 'import tensorflow as tf\n'), ((3922, 3950), 'tensorflow.stack', 'tf.stack', (['[y, 1 - y]'], {'axis': '(1)'}), '([y, 1 - y], axis=1)\n', (3930, 3950), True, 'import tensorflow as tf\n'), ((4643, 4698), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['learning_rate'], {'momentum': '(0.5)'}), '(learning_rate, momentum=0.5)\n', (4669, 4698), True, 'import tensorflow as tf\n')] |
"""
BeamColumnElement
================
Module contains a beam column element under bending action and axial forces.
"""
import numpy as np
import numpy.linalg as la
from FE_code.element import Element
from FE_code.node import Node
import scipy
class BeamColumnElement(Element):
"""Two dimensional beam column element.
Attributes
----------
id : int
Unique ID of the element
nodes : array_like
A list of Nodes on the corners of the element
E : float
Young's Modulus
b : float
width of element [m]
h : float
height of element [m]
Raises
------
RuntimeError
if the number of nodes given does not equal 4
RuntimeError
if the nodes given are not of class `Node`
"""
def __init__(self, id, nodes, E, b, h):
"""Creates a new element
Parameters
----------
id : int
Unique ID of the element
nodes : array_like
A list of Nodes at the corners of the element
"""
if len(nodes) != 2:
raise RuntimeError(f'Error in creating a beam column element. Given {len(nodes)} nodes instead of 2 nodes')
for node in nodes:
if not isinstance(node, Node):
raise RuntimeError('Error in creating a beam column element. Nodes must be a list of objects of class Node')
self.id = id
self.nodes = nodes
self._E = E
self._b = b
self._h = h
self.local_internal_forces = list ()
self.global_internal_forces = list ()
self.load_elements = list()
self.bending_reinforcement = list()
self.shear_reinforcement = list()
@property
def E(self):
return self._E
@E.setter
def E(self, value):
self._E = value
@property
def b(self):
return self._b
@b.setter
def b(self, value):
self._b = value
@property
def h(self):
return self._h
@h.setter
def h(self, value):
self._h = value
def _get_dof_tuple_from_node_id(self, node_id): #Aus Elements uebernommen und ueberschrieben
return [(node_id, 'u'), (node_id, 'v'), (node_id, 'phi')]
@property
def node_coords(self):
"""array_like: nodal coordinates matrix
"""
node_coords = np.array( [node.coords for node in self.nodes] )
return node_coords
def get_vector(self):
"""Get vector between the end nodes.
a = Vector of the first node
b = Vector of the second node
Returns
-------
Vector between the end nodes.
"""
a = self.node_coords[0]
b = self.node_coords[1]
return b-a
def get_length(self):
"""Get length between the end nodes.
a = Vector of the first node
b = Vector of the second node
Returns
-------
length of the element.
"""
a = self.node_coords[0]
b = self.node_coords[1]
return la.norm(b-a)
def get_transform_matrix(self):
""" Transformation matrix.
Returns
-------
transform_matrix: ndarray
Transformation matrix.
"""
element_vector = self.get_vector()
reference_vector = np.array([1, 0])
dot_product = np.dot(element_vector, reference_vector)
length = self.get_length()
angle = np.arccos(dot_product/length)
s = np.sin(angle)
c = np.cos(angle)
transform_matrix = np.array([[c, s, 0, 0, 0, 0],
[-s, c, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0,],
[0, 0, 0, c, s, 0],
[0, 0, 0, -s, c, 0],
[0, 0, 0, 0, 0, 1]])
return transform_matrix
def calculate_elastic_stiffness_matrix_local(self):
"""Calculate local Stiffness Matrix for one beam column element
using the Euler Bernoulli theory
Returns
-------
K_e_l : array_like
local element stiffness matrix
"""
EA = self._E*self._b*self._h
EI = self._E*(self._b*self._h**3/12)
l = self.get_length()
K_e_l = np.array([[EA/l, 0, 0, -EA/l, 0, 0],
[0, 12*EI/l**3, -6*EI/l**2, 0, -12*EI/l**3, -6*EI/l**2],
[0, -6*EI/l**2, 4*EI/l, 0, 6*EI/l**2, 2*EI/l],
[-EA/l, 0, 0, EA/l, 0, 0],
[0, -12*EI/l**3, 6*EI/l**2, 0, 12*EI/l**3, 6*EI/l**2],
[0, -6*EI/l**2, 2*EI/l, 0, 6*EI/l**2, 4*EI/l]])
return K_e_l
def calculate_elastic_stiffness_matrix(self):
"""Calculate Stiffness Matrix for one beam column element
using the Euler Bernoulli theory
Returns
-------
K_e : array_like
element stiffness matrix
"""
transform_matrix = self.get_transform_matrix()
K_e_l = self.calculate_elastic_stiffness_matrix_local()
a = np.dot(transform_matrix.T, K_e_l)
K_e = np.dot(a, transform_matrix)
# K_e = transform_matrix.T @ K_e_l @ transform_matrix
return K_e
def calculate_element_end_forces(self):
"""Calculate global element end forces of one beam column element
Returns
-------
f_g : array_like
element stiffness matrix
"""
u_g = list()
for i_node in self.nodes:
u_g.extend( [ i_node.results['u'], i_node.results['v'], i_node.results['phi'] ] )
K_g = self.calculate_elastic_stiffness_matrix()
f_g = np.dot(K_g, u_g)
return f_g
def calculate_local_element_end_forces(self):
"""Calculate local element end forces of one beam column element
Returns
-------
f_l : array_like
element stiffness matrix
"""
u_e = list()
for i_node in self.nodes:
u_e.extend([i_node.results['u'], i_node.results['v'], i_node.results['phi']])
transform_matrix = self.get_transform_matrix()
u_l = np.dot(transform_matrix, u_e)
K_e_l = self.calculate_elastic_stiffness_matrix_local()
f_l = np.dot(K_e_l, u_l)
return f_l
def reset_design(self):
self.bending_reinforcement = list()
self.shear_reinforcement = list() | [
"numpy.sin",
"numpy.linalg.norm",
"numpy.array",
"numpy.cos",
"numpy.dot",
"numpy.arccos"
] | [((2384, 2430), 'numpy.array', 'np.array', (['[node.coords for node in self.nodes]'], {}), '([node.coords for node in self.nodes])\n', (2392, 2430), True, 'import numpy as np\n'), ((3081, 3095), 'numpy.linalg.norm', 'la.norm', (['(b - a)'], {}), '(b - a)\n', (3088, 3095), True, 'import numpy.linalg as la\n'), ((3351, 3367), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (3359, 3367), True, 'import numpy as np\n'), ((3390, 3430), 'numpy.dot', 'np.dot', (['element_vector', 'reference_vector'], {}), '(element_vector, reference_vector)\n', (3396, 3430), True, 'import numpy as np\n'), ((3483, 3514), 'numpy.arccos', 'np.arccos', (['(dot_product / length)'], {}), '(dot_product / length)\n', (3492, 3514), True, 'import numpy as np\n'), ((3526, 3539), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3532, 3539), True, 'import numpy as np\n'), ((3552, 3565), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3558, 3565), True, 'import numpy as np\n'), ((3594, 3731), 'numpy.array', 'np.array', (['[[c, s, 0, 0, 0, 0], [-s, c, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, c,\n s, 0], [0, 0, 0, -s, c, 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[c, s, 0, 0, 0, 0], [-s, c, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, \n 0, 0, c, s, 0], [0, 0, 0, -s, c, 0], [0, 0, 0, 0, 0, 1]])\n', (3602, 3731), True, 'import numpy as np\n'), ((4400, 4782), 'numpy.array', 'np.array', (['[[EA / l, 0, 0, -EA / l, 0, 0], [0, 12 * EI / l ** 3, -6 * EI / l ** 2, 0, \n -12 * EI / l ** 3, -6 * EI / l ** 2], [0, -6 * EI / l ** 2, 4 * EI / l,\n 0, 6 * EI / l ** 2, 2 * EI / l], [-EA / l, 0, 0, EA / l, 0, 0], [0, -12 *\n EI / l ** 3, 6 * EI / l ** 2, 0, 12 * EI / l ** 3, 6 * EI / l ** 2], [0,\n -6 * EI / l ** 2, 2 * EI / l, 0, 6 * EI / l ** 2, 4 * EI / l]]'], {}), '([[EA / l, 0, 0, -EA / l, 0, 0], [0, 12 * EI / l ** 3, -6 * EI / l **\n 2, 0, -12 * EI / l ** 3, -6 * EI / l ** 2], [0, -6 * EI / l ** 2, 4 *\n EI / l, 0, 6 * EI / l ** 2, 2 * EI / l], [-EA / l, 0, 0, EA / l, 0, 0],\n [0, -12 * EI / l ** 3, 6 * EI / l ** 2, 0, 12 * EI / l ** 3, 6 * EI / l **\n 2], [0, -6 * EI / l ** 2, 2 * EI / l, 0, 6 * EI / l ** 2, 4 * EI / l]])\n', (4408, 4782), True, 'import numpy as np\n'), ((5209, 5242), 'numpy.dot', 'np.dot', (['transform_matrix.T', 'K_e_l'], {}), '(transform_matrix.T, K_e_l)\n', (5215, 5242), True, 'import numpy as np\n'), ((5257, 5284), 'numpy.dot', 'np.dot', (['a', 'transform_matrix'], {}), '(a, transform_matrix)\n', (5263, 5284), True, 'import numpy as np\n'), ((5828, 5844), 'numpy.dot', 'np.dot', (['K_g', 'u_g'], {}), '(K_g, u_g)\n', (5834, 5844), True, 'import numpy as np\n'), ((6322, 6351), 'numpy.dot', 'np.dot', (['transform_matrix', 'u_e'], {}), '(transform_matrix, u_e)\n', (6328, 6351), True, 'import numpy as np\n'), ((6430, 6448), 'numpy.dot', 'np.dot', (['K_e_l', 'u_l'], {}), '(K_e_l, u_l)\n', (6436, 6448), True, 'import numpy as np\n')] |
from numpy import prod
def persistence(n):
if n < 10: return 0
nums = [int(x) for x in str(n)]
steps = 1
while prod(nums) > 9:
nums = [int(x) for x in str(int(prod(nums)))]
steps += 1
return steps
| [
"numpy.prod"
] | [((132, 142), 'numpy.prod', 'prod', (['nums'], {}), '(nums)\n', (136, 142), False, 'from numpy import prod\n'), ((188, 198), 'numpy.prod', 'prod', (['nums'], {}), '(nums)\n', (192, 198), False, 'from numpy import prod\n')] |
import torch
import numpy as np
from scipy.interpolate import RectBivariateSpline
from scipy.ndimage import binary_dilation
from scipy.stats import gaussian_kde
from utils import prediction_output_to_trajectories
import visualization
def compute_ade(predicted_trajs, gt_traj):
error = np.linalg.norm(predicted_trajs - gt_traj, axis=-1)
ade = np.mean(error, axis=-1)
return ade.flatten()
def compute_fde(predicted_trajs, gt_traj):
final_error = np.linalg.norm(predicted_trajs[:, -1] - gt_traj[-1], axis=-1)
return final_error.flatten()
def compute_nll(predicted_trajs, gt_traj):
gt_traj_t = torch.tensor(gt_traj, dtype=torch.float32)
nll_per_t = -predicted_trajs.position_log_prob(gt_traj_t.unsqueeze(1)).numpy()
return np.mean(nll_per_t)
def compute_obs_violations(predicted_trajs, map):
obs_map = map.data
interp_obs_map = RectBivariateSpline(range(obs_map.shape[1]),
range(obs_map.shape[0]),
binary_dilation(obs_map.T, iterations=4),
kx=1, ky=1)
old_shape = predicted_trajs.shape
pred_trajs_map = map.to_map_points(predicted_trajs.reshape((-1, 2)))
traj_obs_values = interp_obs_map(pred_trajs_map[:, 0], pred_trajs_map[:, 1], grid=False)
traj_obs_values = traj_obs_values.reshape((old_shape[0], old_shape[1]))
num_viol_trajs = np.sum(traj_obs_values.max(axis=1) > 0, dtype=float)
return num_viol_trajs
def compute_mink_ade(predicted_trajs, gt_traj):
ades_per_k = list()
for k in range(1, predicted_trajs.shape[0] + 1):
ades = compute_ade(predicted_trajs[:k], gt_traj)
ades_per_k.append(np.min(ades))
return ades_per_k
def compute_mink_fde(predicted_trajs, gt_traj):
fdes_per_k = list()
for k in range(1, predicted_trajs.shape[0] + 1):
fdes = compute_fde(predicted_trajs[:k], gt_traj)
fdes_per_k.append(np.min(fdes))
return fdes_per_k
def compute_mintopk_statistics(prediction_output_dict,
max_hl,
ph,
node_type_enum,
prune_ph_to_future=False):
(prediction_dict,
_,
futures_dict) = prediction_output_to_trajectories(prediction_output_dict,
max_hl,
ph,
prune_ph_to_future=prune_ph_to_future)
batch_error_dict = dict()
for node_type in node_type_enum:
batch_error_dict[node_type.name] = {'min_ade_k': list(),
'min_fde_k': list()}
for t in prediction_dict.keys():
for node in prediction_dict[t].keys():
node_type_name = node.type.name
gaussian_means = prediction_dict[t][node].component_distribution.mean[:, 0, :, :2]
component_pis = prediction_dict[t][node].pis
rank_order = torch.argsort(component_pis, descending=True)
ranked_predictions = torch.transpose(gaussian_means, 0, 1)[rank_order]
min_ade_errors = compute_mink_ade(ranked_predictions, futures_dict[t][node])
min_fde_errors = compute_mink_fde(ranked_predictions, futures_dict[t][node])
batch_error_dict[node_type_name]['min_ade_k'].append(np.array(min_ade_errors))
batch_error_dict[node_type_name]['min_fde_k'].append(np.array(min_fde_errors))
return batch_error_dict
def plot_mintopk_curves(mintopk_errors,
log_writer,
namespace,
curr_iter):
for node_type in mintopk_errors[0].keys():
for metric in mintopk_errors[0][node_type].keys():
metric_batch_error = []
for batch_errors in mintopk_errors:
metric_batch_error.extend(batch_errors[node_type][metric])
if len(metric_batch_error) > 0:
mink_errors = np.stack(metric_batch_error, axis=0)
avg_mink_errors = np.mean(mink_errors, axis=0)
fig = visualization.visualize_mink(avg_mink_errors)
log_writer.add_figure(f"{namespace}/{node_type}/{metric}",
fig, curr_iter)
def compute_batch_statistics(prediction_output_dict,
max_hl,
ph,
node_type_enum,
kde=False,
obs=False,
map=None,
prune_ph_to_future=False):
(prediction_dict,
_,
futures_dict) = prediction_output_to_trajectories(prediction_output_dict,
max_hl,
ph,
prune_ph_to_future=prune_ph_to_future)
batch_error_dict = dict()
for node_type in node_type_enum:
batch_error_dict[node_type.name] = {'mm_ade': list(),
'mm_fde': list()}
if kde:
batch_error_dict[node_type.name]['nll'] = list()
if obs:
batch_error_dict[node_type.name]['obs_viols'] = list()
for t in prediction_dict.keys():
for node in prediction_dict[t].keys():
node_type_name = node.type.name
mm_prediction = prediction_dict[t][node].mode_mode().unsqueeze(0)
mm_ade_errors = compute_ade(mm_prediction, futures_dict[t][node])
mm_fde_errors = compute_fde(mm_prediction, futures_dict[t][node])
if kde:
nll = compute_nll(prediction_dict[t][node], futures_dict[t][node])
if obs:
obs_viols = compute_obs_violations(prediction_dict[t][node], map)
batch_error_dict[node_type_name]['mm_ade'].extend(list(mm_ade_errors))
batch_error_dict[node_type_name]['mm_fde'].extend(list(mm_fde_errors))
if kde:
batch_error_dict[node_type_name]['nll'].append(nll)
if obs:
batch_error_dict[node_type_name]['obs_viols'].append(obs_viols)
return batch_error_dict
def log_batch_errors(batch_errors_list, log_writer, namespace, curr_iter, bar_plot=[], box_plot=[]):
for node_type in batch_errors_list[0].keys():
for metric in batch_errors_list[0][node_type].keys():
metric_batch_error = []
for batch_errors in batch_errors_list:
metric_batch_error.extend(batch_errors[node_type][metric])
if len(metric_batch_error) > 0:
log_writer.add_histogram(f"{namespace}/{node_type}/{metric}",
metric_batch_error, curr_iter)
log_writer.add_scalar(f"{namespace}/{node_type}/{metric}_mean",
np.mean(metric_batch_error), curr_iter)
log_writer.add_scalar(f"{namespace}/{node_type}/{metric}_median",
np.median(metric_batch_error), curr_iter)
def print_batch_errors(batch_errors_list, namespace, curr_iter):
for node_type in batch_errors_list[0].keys():
for metric in batch_errors_list[0][node_type].keys():
metric_batch_error = []
for batch_errors in batch_errors_list:
metric_batch_error.extend(batch_errors[node_type][metric])
if len(metric_batch_error) > 0:
print(f"{curr_iter}: {node_type.name}/{namespace}/{metric}_mean", np.mean(metric_batch_error))
print(f"{curr_iter}: {node_type.name}/{namespace}/{metric}_median", np.median(metric_batch_error))
| [
"numpy.stack",
"scipy.ndimage.binary_dilation",
"numpy.median",
"torch.argsort",
"torch.transpose",
"numpy.min",
"numpy.mean",
"numpy.linalg.norm",
"numpy.array",
"visualization.visualize_mink",
"torch.tensor",
"utils.prediction_output_to_trajectories"
] | [((290, 340), 'numpy.linalg.norm', 'np.linalg.norm', (['(predicted_trajs - gt_traj)'], {'axis': '(-1)'}), '(predicted_trajs - gt_traj, axis=-1)\n', (304, 340), True, 'import numpy as np\n'), ((351, 374), 'numpy.mean', 'np.mean', (['error'], {'axis': '(-1)'}), '(error, axis=-1)\n', (358, 374), True, 'import numpy as np\n'), ((463, 524), 'numpy.linalg.norm', 'np.linalg.norm', (['(predicted_trajs[:, -1] - gt_traj[-1])'], {'axis': '(-1)'}), '(predicted_trajs[:, -1] - gt_traj[-1], axis=-1)\n', (477, 524), True, 'import numpy as np\n'), ((619, 661), 'torch.tensor', 'torch.tensor', (['gt_traj'], {'dtype': 'torch.float32'}), '(gt_traj, dtype=torch.float32)\n', (631, 661), False, 'import torch\n'), ((756, 774), 'numpy.mean', 'np.mean', (['nll_per_t'], {}), '(nll_per_t)\n', (763, 774), True, 'import numpy as np\n'), ((2281, 2393), 'utils.prediction_output_to_trajectories', 'prediction_output_to_trajectories', (['prediction_output_dict', 'max_hl', 'ph'], {'prune_ph_to_future': 'prune_ph_to_future'}), '(prediction_output_dict, max_hl, ph,\n prune_ph_to_future=prune_ph_to_future)\n', (2314, 2393), False, 'from utils import prediction_output_to_trajectories\n'), ((4764, 4876), 'utils.prediction_output_to_trajectories', 'prediction_output_to_trajectories', (['prediction_output_dict', 'max_hl', 'ph'], {'prune_ph_to_future': 'prune_ph_to_future'}), '(prediction_output_dict, max_hl, ph,\n prune_ph_to_future=prune_ph_to_future)\n', (4797, 4876), False, 'from utils import prediction_output_to_trajectories\n'), ((1024, 1064), 'scipy.ndimage.binary_dilation', 'binary_dilation', (['obs_map.T'], {'iterations': '(4)'}), '(obs_map.T, iterations=4)\n', (1039, 1064), False, 'from scipy.ndimage import binary_dilation\n'), ((1712, 1724), 'numpy.min', 'np.min', (['ades'], {}), '(ades)\n', (1718, 1724), True, 'import numpy as np\n'), ((1958, 1970), 'numpy.min', 'np.min', (['fdes'], {}), '(fdes)\n', (1964, 1970), True, 'import numpy as np\n'), ((3061, 3106), 'torch.argsort', 'torch.argsort', (['component_pis'], {'descending': '(True)'}), '(component_pis, descending=True)\n', (3074, 3106), False, 'import torch\n'), ((3140, 3177), 'torch.transpose', 'torch.transpose', (['gaussian_means', '(0)', '(1)'], {}), '(gaussian_means, 0, 1)\n', (3155, 3177), False, 'import torch\n'), ((3435, 3459), 'numpy.array', 'np.array', (['min_ade_errors'], {}), '(min_ade_errors)\n', (3443, 3459), True, 'import numpy as np\n'), ((3526, 3550), 'numpy.array', 'np.array', (['min_fde_errors'], {}), '(min_fde_errors)\n', (3534, 3550), True, 'import numpy as np\n'), ((4070, 4106), 'numpy.stack', 'np.stack', (['metric_batch_error'], {'axis': '(0)'}), '(metric_batch_error, axis=0)\n', (4078, 4106), True, 'import numpy as np\n'), ((4141, 4169), 'numpy.mean', 'np.mean', (['mink_errors'], {'axis': '(0)'}), '(mink_errors, axis=0)\n', (4148, 4169), True, 'import numpy as np\n'), ((4193, 4238), 'visualization.visualize_mink', 'visualization.visualize_mink', (['avg_mink_errors'], {}), '(avg_mink_errors)\n', (4221, 4238), False, 'import visualization\n'), ((7032, 7059), 'numpy.mean', 'np.mean', (['metric_batch_error'], {}), '(metric_batch_error)\n', (7039, 7059), True, 'import numpy as np\n'), ((7192, 7221), 'numpy.median', 'np.median', (['metric_batch_error'], {}), '(metric_batch_error)\n', (7201, 7221), True, 'import numpy as np\n'), ((7702, 7729), 'numpy.mean', 'np.mean', (['metric_batch_error'], {}), '(metric_batch_error)\n', (7709, 7729), True, 'import numpy as np\n'), ((7815, 7844), 'numpy.median', 'np.median', (['metric_batch_error'], {}), '(metric_batch_error)\n', (7824, 7844), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, unicode_literals
from itertools import product
import numpy as np
import param
from matplotlib.patches import Wedge, Circle
from matplotlib.collections import LineCollection, PatchCollection
from ...core.data import GridInterface
from ...core.util import dimension_sanitizer, is_nan
from ...core.spaces import HoloMap
from ..mixins import HeatMapMixin
from .element import ColorbarPlot
from .raster import QuadMeshPlot
from .util import filter_styles
class HeatMapPlot(HeatMapMixin, QuadMeshPlot):
clipping_colors = param.Dict(default={'NaN': 'white'}, doc="""
Dictionary to specify colors for clipped values, allows
setting color for NaN values and for values above and below
the min and max value. The min, max or NaN color may specify
an RGB(A) color as a color hex string of the form #FFFFFF or
#FFFFFFFF or a length 3 or length 4 tuple specifying values in
the range 0-1 or a named HTML color.""")
padding = param.ClassSelector(default=0, class_=(int, float, tuple))
radial = param.Boolean(default=False, doc="""
Whether the HeatMap should be radial""")
show_values = param.Boolean(default=False, doc="""
Whether to annotate each pixel with its value.""")
xmarks = param.Parameter(default=None, doc="""
Add separation lines to the heatmap for better readability. By
default, does not show any separation lines. If parameter is of type
integer, draws the given amount of separations lines spread across
heatmap. If parameter is of type list containing integers, show
separation lines at given indices. If parameter is of type tuple, draw
separation lines at given categorical values. If parameter is of type
function, draw separation lines where function returns True for passed
heatmap category.""")
ymarks = param.Parameter(default=None, doc="""
Add separation lines to the heatmap for better readability. By
default, does not show any separation lines. If parameter is of type
integer, draws the given amount of separations lines spread across
heatmap. If parameter is of type list containing integers, show
separation lines at given indices. If parameter is of type tuple, draw
separation lines at given categorical values. If parameter is of type
function, draw separation lines where function returns True for passed
heatmap category.""")
xticks = param.Parameter(default=20, doc="""
Ticks along x-axis/segments specified as an integer, explicit list of
ticks or function. If `None`, no ticks are shown.""")
yticks = param.Parameter(default=20, doc="""
Ticks along y-axis/annulars specified as an integer, explicit list of
ticks or function. If `None`, no ticks are shown.""")
@classmethod
def is_radial(cls, heatmap):
heatmap = heatmap.last if isinstance(heatmap, HoloMap) else heatmap
opts = cls.lookup_options(heatmap, 'plot').options
return ((any(o in opts for o in ('start_angle', 'radius_inner', 'radius_outer'))
and not (opts.get('radial') == False)) or opts.get('radial', False))
def _annotate_plot(self, ax, annotations):
for a in self.handles.get('annotations', {}).values():
a.remove()
handles = {}
for plot_coord, text in annotations.items():
handles[plot_coord] = ax.annotate(text, xy=plot_coord,
xycoords='data',
horizontalalignment='center',
verticalalignment='center')
return handles
def _annotate_values(self, element, xvals, yvals):
val_dim = element.vdims[0]
vals = element.dimension_values(val_dim).flatten()
xpos = xvals[:-1] + np.diff(xvals)/2.
ypos = yvals[:-1] + np.diff(yvals)/2.
plot_coords = product(xpos, ypos)
annotations = {}
for plot_coord, v in zip(plot_coords, vals):
text = '-' if is_nan(v) else val_dim.pprint_value(v)
annotations[plot_coord] = text
return annotations
def _compute_ticks(self, element, xvals, yvals, xfactors, yfactors):
xdim, ydim = element.kdims
if self.invert_axes:
xdim, ydim = ydim, xdim
opts = self.lookup_options(element, 'plot').options
xticks = opts.get('xticks')
if xticks is None:
xpos = xvals[:-1] + np.diff(xvals)/2.
if not xfactors:
xfactors = element.gridded.dimension_values(xdim, False)
xlabels = [xdim.pprint_value(k) for k in xfactors]
xticks = list(zip(xpos, xlabels))
yticks = opts.get('yticks')
if yticks is None:
ypos = yvals[:-1] + np.diff(yvals)/2.
if not yfactors:
yfactors = element.gridded.dimension_values(ydim, False)
ylabels = [ydim.pprint_value(k) for k in yfactors]
yticks = list(zip(ypos, ylabels))
return xticks, yticks
def _draw_markers(self, ax, element, marks, values, factors, axis='x'):
if marks is None or self.radial:
return
self.param.warning('Only radial HeatMaps supports marks, to make the'
'HeatMap quads more distinguishable set linewidths'
'to a non-zero value.')
def init_artists(self, ax, plot_args, plot_kwargs):
xfactors = plot_kwargs.pop('xfactors')
yfactors = plot_kwargs.pop('yfactors')
annotations = plot_kwargs.pop('annotations', None)
prefixes = ['annular', 'xmarks', 'ymarks']
plot_kwargs = {k: v for k, v in plot_kwargs.items()
if not any(p in k for p in prefixes)}
artist = ax.pcolormesh(*plot_args, **plot_kwargs)
if self.show_values and annotations:
self.handles['annotations'] = self._annotate_plot(ax, annotations)
self._draw_markers(ax, self.current_frame, self.xmarks,
plot_args[0], xfactors, axis='x')
self._draw_markers(ax, self.current_frame, self.ymarks,
plot_args[1], yfactors, axis='y')
return {'artist': artist}
def get_data(self, element, ranges, style):
xdim, ydim = element.kdims
aggregate = element.gridded
if not element._unique:
self.param.warning('HeatMap element index is not unique, ensure you '
'aggregate the data before displaying it, e.g. '
'using heatmap.aggregate(function=np.mean). '
'Duplicate index values have been dropped.')
data = aggregate.dimension_values(2, flat=False)
data = np.ma.array(data, mask=np.logical_not(np.isfinite(data)))
if self.invert_axes:
xdim, ydim = ydim, xdim
data = data.T[::-1, ::-1]
xtype = aggregate.interface.dtype(aggregate, xdim)
if xtype.kind in 'SUO':
xvals = np.arange(data.shape[1]+1)-0.5
else:
xvals = aggregate.dimension_values(xdim, expanded=False)
xvals = GridInterface._infer_interval_breaks(xvals)
ytype = aggregate.interface.dtype(aggregate, ydim)
if ytype.kind in 'SUO':
yvals = np.arange(data.shape[0]+1)-0.5
else:
yvals = aggregate.dimension_values(ydim, expanded=False)
yvals = GridInterface._infer_interval_breaks(yvals)
xfactors = list(ranges.get(xdim.name, {}).get('factors', []))
yfactors = list(ranges.get(ydim.name, {}).get('factors', []))
xticks, yticks = self._compute_ticks(element, xvals, yvals, xfactors, yfactors)
style['xfactors'] = xfactors
style['yfactors'] = yfactors
if self.show_values:
style['annotations'] = self._annotate_values(element.gridded, xvals, yvals)
vdim = element.vdims[0]
self._norm_kwargs(element, ranges, style, vdim)
if 'vmin' in style:
style['clim'] = style.pop('vmin'), style.pop('vmax')
return (xvals, yvals, data), style, {'xticks': xticks, 'yticks': yticks}
class RadialHeatMapPlot(ColorbarPlot):
start_angle = param.Number(default=np.pi/2, doc="""
Define starting angle of the first annulars. By default, beings
at 12 o clock.""")
max_radius = param.Number(default=0.5, doc="""
Define the maximum radius which is used for the x and y range extents.
""")
radius_inner = param.Number(default=0.1, bounds=(0, 0.5), doc="""
Define the radius fraction of inner, empty space.""")
radius_outer = param.Number(default=0.05, bounds=(0, 1), doc="""
Define the radius fraction of outer space including the labels.""")
radial = param.Boolean(default=True, doc="""
Whether the HeatMap should be radial""")
show_values = param.Boolean(default=False, doc="""
Whether to annotate each pixel with its value.""")
xmarks = param.Parameter(default=None, doc="""
Add separation lines between segments for better readability. By
default, does not show any separation lines. If parameter is of type
integer, draws the given amount of separations lines spread across
radial heatmap. If parameter is of type list containing integers, show
separation lines at given indices. If parameter is of type tuple, draw
separation lines at given segment values. If parameter is of type
function, draw separation lines where function returns True for passed
segment value.""")
ymarks = param.Parameter(default=None, doc="""
Add separation lines between annulars for better readability. By
default, does not show any separation lines. If parameter is of type
integer, draws the given amount of separations lines spread across
radial heatmap. If parameter is of type list containing integers, show
separation lines at given indices. If parameter is of type tuple, draw
separation lines at given annular values. If parameter is of type
function, draw separation lines where function returns True for passed
annular value.""")
xticks = param.Parameter(default=4, doc="""
Ticks along x-axis/segments specified as an integer, explicit list of
ticks or function. If `None`, no ticks are shown.""")
yticks = param.Parameter(default=4, doc="""
Ticks along y-axis/annulars specified as an integer, explicit list of
ticks or function. If `None`, no ticks are shown.""")
projection = param.ObjectSelector(default='polar', objects=['polar'])
_style_groups = ['annular', 'xmarks', 'ymarks']
style_opts = ['annular_edgecolors', 'annular_linewidth',
'xmarks_linewidth', 'xmarks_edgecolor', 'cmap',
'ymarks_linewidth', 'ymarks_edgecolor']
@staticmethod
def _map_order_to_ticks(start, end, order, reverse=False):
"""Map elements from given `order` array to bins ranging from `start`
to `end`.
"""
size = len(order)
bounds = np.linspace(start, end, size + 1)
if reverse:
bounds = bounds[::-1]
mapping = list(zip(bounds[:-1]%(np.pi*2), order))
return mapping
@staticmethod
def _compute_separations(inner, outer, angles):
"""Compute x and y positions for separation lines for given angles.
"""
return [np.array([[a, inner], [a, outer]]) for a in angles]
@staticmethod
def _get_markers(ticks, marker):
if callable(marker):
marks = [v for v, l in ticks if marker(l)]
elif isinstance(marker, int) and marker:
nth_mark = max([np.ceil(len(ticks) / marker).astype(int), 1])
marks = [v for v, l in ticks[::nth_mark]]
elif isinstance(marker, tuple):
marks = [v for v, l in ticks if l in marker]
else:
marks = []
return marks
@staticmethod
def _get_ticks(ticks, ticker):
if callable(ticker):
ticks = [(v, l) for v, l in ticks if ticker(l)]
elif isinstance(ticker, int):
nth_mark = max([np.ceil(len(ticks) / ticker).astype(int), 1])
ticks = ticks[::nth_mark]
elif isinstance(ticker, (tuple, list)):
nth_mark = max([np.ceil(len(ticks) / len(ticker)).astype(int), 1])
ticks = [(v, tl) for (v, l), tl in zip(ticks[::nth_mark], ticker)]
elif ticker:
ticks = list(ticker)
else:
ticks = []
return ticks
def get_extents(self, view, ranges, range_type='combined'):
if range_type == 'hard':
return (np.nan,)*4
return (0, 0, np.pi*2, self.max_radius+self.radius_outer)
def get_data(self, element, ranges, style):
# dimension labels
dim_labels = element.dimensions(label=True)[:3]
x, y, z = [dimension_sanitizer(d) for d in dim_labels]
if self.invert_axes: x, y = y, x
# get raw values
aggregate = element.gridded
xvals = aggregate.dimension_values(x, expanded=False)
yvals = aggregate.dimension_values(y, expanded=False)
zvals = aggregate.dimension_values(2, flat=False)
# pretty print x and y dimension values if necessary
def _pprint(dim_label, vals):
if vals.dtype.kind not in 'SU':
dim = aggregate.get_dimension(dim_label)
return [dim.pprint_value(v) for v in vals]
return vals
xvals = _pprint(x, xvals)
yvals = _pprint(y, yvals)
# annular wedges
start_angle = self.start_angle
end_angle = self.start_angle + 2 * np.pi
bins_segment = np.linspace(start_angle, end_angle, len(xvals)+1)
segment_ticks = self._map_order_to_ticks(start_angle, end_angle,
xvals, True)
radius_max = 0.5
radius_min = radius_max * self.radius_inner
bins_annular = np.linspace(radius_min, radius_max, len(yvals)+1)
radius_ticks = self._map_order_to_ticks(radius_min, radius_max,
yvals)
patches = []
for j in range(len(yvals)):
ybin = bins_annular[j:j+2]
for i in range(len(xvals))[::-1]:
xbin = np.rad2deg(bins_segment[i:i+2])
width = ybin[1]-ybin[0]
wedge = Wedge((0.5, 0.5), ybin[1], xbin[0], xbin[1], width)
patches.append(wedge)
angles = self._get_markers(segment_ticks, self.xmarks)
xmarks = self._compute_separations(radius_min, radius_max, angles)
radii = self._get_markers(radius_ticks, self.ymarks)
ymarks = [Circle((0.5, 0.5), r) for r in radii]
style['array'] = zvals.flatten()
self._norm_kwargs(element, ranges, style, element.vdims[0])
if 'vmin' in style:
style['clim'] = style.pop('vmin'), style.pop('vmax')
data = {'annular': patches, 'xseparator': xmarks, 'yseparator': ymarks}
xticks = self._get_ticks(segment_ticks, self.xticks)
if not isinstance(self.xticks, int):
xticks = [(v-((np.pi)/len(xticks)), l) for v, l in xticks]
yticks = self._get_ticks(radius_ticks, self.yticks)
ticks = {'xticks': xticks, 'yticks': yticks}
return data, style, ticks
def init_artists(self, ax, plot_args, plot_kwargs):
# Draw edges
color_opts = ['c', 'cmap', 'vmin', 'vmax', 'norm', 'array']
groups = [g for g in self._style_groups if g != 'annular']
edge_opts = filter_styles(plot_kwargs, 'annular', groups)
annuli = plot_args['annular']
edge_opts.pop('interpolation', None)
annuli = PatchCollection(annuli, transform=ax.transAxes, **edge_opts)
ax.add_collection(annuli)
artists = {'artist': annuli}
paths = plot_args['xseparator']
if paths:
groups = [g for g in self._style_groups if g != 'xmarks']
xmark_opts = filter_styles(plot_kwargs, 'xmarks', groups, color_opts)
xmark_opts.pop('edgecolors', None)
xseparators = LineCollection(paths, **xmark_opts)
ax.add_collection(xseparators)
artists['xseparator'] = xseparators
paths = plot_args['yseparator']
if paths:
groups = [g for g in self._style_groups if g != 'ymarks']
ymark_opts = filter_styles(plot_kwargs, 'ymarks', groups, color_opts)
ymark_opts.pop('edgecolors', None)
yseparators = PatchCollection(paths, facecolor='none',
transform=ax.transAxes, **ymark_opts)
ax.add_collection(yseparators)
artists['yseparator'] = yseparators
return artists
| [
"param.Number",
"matplotlib.collections.LineCollection",
"param.Dict",
"param.Boolean",
"param.Parameter",
"matplotlib.patches.Wedge",
"numpy.isfinite",
"matplotlib.patches.Circle",
"numpy.rad2deg",
"numpy.diff",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"itertools.product",
"matp... | [((577, 1021), 'param.Dict', 'param.Dict', ([], {'default': "{'NaN': 'white'}", 'doc': '"""\n Dictionary to specify colors for clipped values, allows\n setting color for NaN values and for values above and below\n the min and max value. The min, max or NaN color may specify\n an RGB(A) color as a color hex string of the form #FFFFFF or\n #FFFFFFFF or a length 3 or length 4 tuple specifying values in\n the range 0-1 or a named HTML color."""'}), '(default={\'NaN\': \'white\'}, doc=\n """\n Dictionary to specify colors for clipped values, allows\n setting color for NaN values and for values above and below\n the min and max value. The min, max or NaN color may specify\n an RGB(A) color as a color hex string of the form #FFFFFF or\n #FFFFFFFF or a length 3 or length 4 tuple specifying values in\n the range 0-1 or a named HTML color."""\n )\n', (587, 1021), False, 'import param\n'), ((1027, 1085), 'param.ClassSelector', 'param.ClassSelector', ([], {'default': '(0)', 'class_': '(int, float, tuple)'}), '(default=0, class_=(int, float, tuple))\n', (1046, 1085), False, 'import param\n'), ((1100, 1190), 'param.Boolean', 'param.Boolean', ([], {'default': '(False)', 'doc': '"""\n Whether the HeatMap should be radial"""'}), '(default=False, doc=\n """\n Whether the HeatMap should be radial""")\n', (1113, 1190), False, 'import param\n'), ((1205, 1305), 'param.Boolean', 'param.Boolean', ([], {'default': '(False)', 'doc': '"""\n Whether to annotate each pixel with its value."""'}), '(default=False, doc=\n """\n Whether to annotate each pixel with its value.""")\n', (1218, 1305), False, 'import param\n'), ((1315, 1923), 'param.Parameter', 'param.Parameter', ([], {'default': 'None', 'doc': '"""\n Add separation lines to the heatmap for better readability. By\n default, does not show any separation lines. If parameter is of type\n integer, draws the given amount of separations lines spread across\n heatmap. If parameter is of type list containing integers, show\n separation lines at given indices. If parameter is of type tuple, draw\n separation lines at given categorical values. If parameter is of type\n function, draw separation lines where function returns True for passed\n heatmap category."""'}), '(default=None, doc=\n """\n Add separation lines to the heatmap for better readability. By\n default, does not show any separation lines. If parameter is of type\n integer, draws the given amount of separations lines spread across\n heatmap. If parameter is of type list containing integers, show\n separation lines at given indices. If parameter is of type tuple, draw\n separation lines at given categorical values. If parameter is of type\n function, draw separation lines where function returns True for passed\n heatmap category."""\n )\n', (1330, 1923), False, 'import param\n'), ((1928, 2536), 'param.Parameter', 'param.Parameter', ([], {'default': 'None', 'doc': '"""\n Add separation lines to the heatmap for better readability. By\n default, does not show any separation lines. If parameter is of type\n integer, draws the given amount of separations lines spread across\n heatmap. If parameter is of type list containing integers, show\n separation lines at given indices. If parameter is of type tuple, draw\n separation lines at given categorical values. If parameter is of type\n function, draw separation lines where function returns True for passed\n heatmap category."""'}), '(default=None, doc=\n """\n Add separation lines to the heatmap for better readability. By\n default, does not show any separation lines. If parameter is of type\n integer, draws the given amount of separations lines spread across\n heatmap. If parameter is of type list containing integers, show\n separation lines at given indices. If parameter is of type tuple, draw\n separation lines at given categorical values. If parameter is of type\n function, draw separation lines where function returns True for passed\n heatmap category."""\n )\n', (1943, 2536), False, 'import param\n'), ((2541, 2726), 'param.Parameter', 'param.Parameter', ([], {'default': '(20)', 'doc': '"""\n Ticks along x-axis/segments specified as an integer, explicit list of\n ticks or function. If `None`, no ticks are shown."""'}), '(default=20, doc=\n """\n Ticks along x-axis/segments specified as an integer, explicit list of\n ticks or function. If `None`, no ticks are shown."""\n )\n', (2556, 2726), False, 'import param\n'), ((2731, 2916), 'param.Parameter', 'param.Parameter', ([], {'default': '(20)', 'doc': '"""\n Ticks along y-axis/annulars specified as an integer, explicit list of\n ticks or function. If `None`, no ticks are shown."""'}), '(default=20, doc=\n """\n Ticks along y-axis/annulars specified as an integer, explicit list of\n ticks or function. If `None`, no ticks are shown."""\n )\n', (2746, 2916), False, 'import param\n'), ((8418, 8566), 'param.Number', 'param.Number', ([], {'default': '(np.pi / 2)', 'doc': '"""\n Define starting angle of the first annulars. By default, beings\n at 12 o clock."""'}), '(default=np.pi / 2, doc=\n """\n Define starting angle of the first annulars. By default, beings\n at 12 o clock."""\n )\n', (8430, 8566), False, 'import param\n'), ((8573, 8708), 'param.Number', 'param.Number', ([], {'default': '(0.5)', 'doc': '"""\n Define the maximum radius which is used for the x and y range extents.\n """'}), '(default=0.5, doc=\n """\n Define the maximum radius which is used for the x and y range extents.\n """\n )\n', (8585, 8708), False, 'import param\n'), ((8719, 8836), 'param.Number', 'param.Number', ([], {'default': '(0.1)', 'bounds': '(0, 0.5)', 'doc': '"""\n Define the radius fraction of inner, empty space."""'}), '(default=0.1, bounds=(0, 0.5), doc=\n """\n Define the radius fraction of inner, empty space.""")\n', (8731, 8836), False, 'import param\n'), ((8852, 8987), 'param.Number', 'param.Number', ([], {'default': '(0.05)', 'bounds': '(0, 1)', 'doc': '"""\n Define the radius fraction of outer space including the labels."""'}), '(default=0.05, bounds=(0, 1), doc=\n """\n Define the radius fraction of outer space including the labels."""\n )\n', (8864, 8987), False, 'import param\n'), ((8992, 9081), 'param.Boolean', 'param.Boolean', ([], {'default': '(True)', 'doc': '"""\n Whether the HeatMap should be radial"""'}), '(default=True, doc=\n """\n Whether the HeatMap should be radial""")\n', (9005, 9081), False, 'import param\n'), ((9096, 9196), 'param.Boolean', 'param.Boolean', ([], {'default': '(False)', 'doc': '"""\n Whether to annotate each pixel with its value."""'}), '(default=False, doc=\n """\n Whether to annotate each pixel with its value.""")\n', (9109, 9196), False, 'import param\n'), ((9206, 9816), 'param.Parameter', 'param.Parameter', ([], {'default': 'None', 'doc': '"""\n Add separation lines between segments for better readability. By\n default, does not show any separation lines. If parameter is of type\n integer, draws the given amount of separations lines spread across\n radial heatmap. If parameter is of type list containing integers, show\n separation lines at given indices. If parameter is of type tuple, draw\n separation lines at given segment values. If parameter is of type\n function, draw separation lines where function returns True for passed\n segment value."""'}), '(default=None, doc=\n """\n Add separation lines between segments for better readability. By\n default, does not show any separation lines. If parameter is of type\n integer, draws the given amount of separations lines spread across\n radial heatmap. If parameter is of type list containing integers, show\n separation lines at given indices. If parameter is of type tuple, draw\n separation lines at given segment values. If parameter is of type\n function, draw separation lines where function returns True for passed\n segment value."""\n )\n', (9221, 9816), False, 'import param\n'), ((9821, 10431), 'param.Parameter', 'param.Parameter', ([], {'default': 'None', 'doc': '"""\n Add separation lines between annulars for better readability. By\n default, does not show any separation lines. If parameter is of type\n integer, draws the given amount of separations lines spread across\n radial heatmap. If parameter is of type list containing integers, show\n separation lines at given indices. If parameter is of type tuple, draw\n separation lines at given annular values. If parameter is of type\n function, draw separation lines where function returns True for passed\n annular value."""'}), '(default=None, doc=\n """\n Add separation lines between annulars for better readability. By\n default, does not show any separation lines. If parameter is of type\n integer, draws the given amount of separations lines spread across\n radial heatmap. If parameter is of type list containing integers, show\n separation lines at given indices. If parameter is of type tuple, draw\n separation lines at given annular values. If parameter is of type\n function, draw separation lines where function returns True for passed\n annular value."""\n )\n', (9836, 10431), False, 'import param\n'), ((10436, 10620), 'param.Parameter', 'param.Parameter', ([], {'default': '(4)', 'doc': '"""\n Ticks along x-axis/segments specified as an integer, explicit list of\n ticks or function. If `None`, no ticks are shown."""'}), '(default=4, doc=\n """\n Ticks along x-axis/segments specified as an integer, explicit list of\n ticks or function. If `None`, no ticks are shown."""\n )\n', (10451, 10620), False, 'import param\n'), ((10625, 10809), 'param.Parameter', 'param.Parameter', ([], {'default': '(4)', 'doc': '"""\n Ticks along y-axis/annulars specified as an integer, explicit list of\n ticks or function. If `None`, no ticks are shown."""'}), '(default=4, doc=\n """\n Ticks along y-axis/annulars specified as an integer, explicit list of\n ticks or function. If `None`, no ticks are shown."""\n )\n', (10640, 10809), False, 'import param\n'), ((10818, 10874), 'param.ObjectSelector', 'param.ObjectSelector', ([], {'default': '"""polar"""', 'objects': "['polar']"}), "(default='polar', objects=['polar'])\n", (10838, 10874), False, 'import param\n'), ((4044, 4063), 'itertools.product', 'product', (['xpos', 'ypos'], {}), '(xpos, ypos)\n', (4051, 4063), False, 'from itertools import product\n'), ((11347, 11380), 'numpy.linspace', 'np.linspace', (['start', 'end', '(size + 1)'], {}), '(start, end, size + 1)\n', (11358, 11380), True, 'import numpy as np\n'), ((16056, 16116), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['annuli'], {'transform': 'ax.transAxes'}), '(annuli, transform=ax.transAxes, **edge_opts)\n', (16071, 16116), False, 'from matplotlib.collections import LineCollection, PatchCollection\n'), ((11692, 11726), 'numpy.array', 'np.array', (['[[a, inner], [a, outer]]'], {}), '([[a, inner], [a, outer]])\n', (11700, 11726), True, 'import numpy as np\n'), ((15029, 15050), 'matplotlib.patches.Circle', 'Circle', (['(0.5, 0.5)', 'r'], {}), '((0.5, 0.5), r)\n', (15035, 15050), False, 'from matplotlib.patches import Wedge, Circle\n'), ((16473, 16508), 'matplotlib.collections.LineCollection', 'LineCollection', (['paths'], {}), '(paths, **xmark_opts)\n', (16487, 16508), False, 'from matplotlib.collections import LineCollection, PatchCollection\n'), ((16884, 16962), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['paths'], {'facecolor': '"""none"""', 'transform': 'ax.transAxes'}), "(paths, facecolor='none', transform=ax.transAxes, **ymark_opts)\n", (16899, 16962), False, 'from matplotlib.collections import LineCollection, PatchCollection\n'), ((3958, 3972), 'numpy.diff', 'np.diff', (['xvals'], {}), '(xvals)\n', (3965, 3972), True, 'import numpy as np\n'), ((4004, 4018), 'numpy.diff', 'np.diff', (['yvals'], {}), '(yvals)\n', (4011, 4018), True, 'import numpy as np\n'), ((7205, 7233), 'numpy.arange', 'np.arange', (['(data.shape[1] + 1)'], {}), '(data.shape[1] + 1)\n', (7214, 7233), True, 'import numpy as np\n'), ((7495, 7523), 'numpy.arange', 'np.arange', (['(data.shape[0] + 1)'], {}), '(data.shape[0] + 1)\n', (7504, 7523), True, 'import numpy as np\n'), ((14625, 14658), 'numpy.rad2deg', 'np.rad2deg', (['bins_segment[i:i + 2]'], {}), '(bins_segment[i:i + 2])\n', (14635, 14658), True, 'import numpy as np\n'), ((14721, 14772), 'matplotlib.patches.Wedge', 'Wedge', (['(0.5, 0.5)', 'ybin[1]', 'xbin[0]', 'xbin[1]', 'width'], {}), '((0.5, 0.5), ybin[1], xbin[0], xbin[1], width)\n', (14726, 14772), False, 'from matplotlib.patches import Wedge, Circle\n'), ((4609, 4623), 'numpy.diff', 'np.diff', (['xvals'], {}), '(xvals)\n', (4616, 4623), True, 'import numpy as np\n'), ((4934, 4948), 'numpy.diff', 'np.diff', (['yvals'], {}), '(yvals)\n', (4941, 4948), True, 'import numpy as np\n'), ((6970, 6987), 'numpy.isfinite', 'np.isfinite', (['data'], {}), '(data)\n', (6981, 6987), True, 'import numpy as np\n')] |
from abc import abstractmethod, ABC
import os
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
from torch.utils.data import Subset
from torchvision import transforms
from .base import get_split_indices, print_loaded_dataset_shapes, get_loaders_from_datasets, log_call_parameters
class StandardVisionDataset(ABC):
"""
Holds information about a given dataset and implements several useful functions
"""
def __init__(self, **kwargs):
pass
@property
@abstractmethod
def dataset_name(self) -> str:
raise NotImplementedError('dataset_name not implemented')
@property
@abstractmethod
def means(self):
raise NotImplementedError('means not implemented')
@property
@abstractmethod
def stds(self):
raise NotImplementedError('stds not implemented')
@abstractmethod
def raw_dataset(self, data_dir: str, download: bool, split: str, transform):
raise NotImplementedError('raw_dataset_class not implemented, need to return datasets')
@property
def normalize_transform(self):
return transforms.Normalize(mean=self.means, std=self.stds)
@property
def train_transforms(self):
return transforms.Compose([
transforms.ToTensor(),
self.normalize_transform
])
@property
def test_transforms(self):
return self.train_transforms
def post_process_datasets(self, train_data, val_data, test_data, info=None):
""" This can be used to modify the labels or images. """
return train_data, val_data, test_data, info
@print_loaded_dataset_shapes
@log_call_parameters
def build_datasets(self, data_dir: str = None, val_ratio: float = 0.2, num_train_examples: int = None,
seed: int = 42, download: bool = True, **kwargs):
""" Builds train, validation, and test datasets. """
if data_dir is None:
data_dir = os.path.join(os.environ['DATA_DIR'], self.dataset_name)
train_data = self.raw_dataset(data_dir, download=download, split='train', transform=self.train_transforms)
val_data = self.raw_dataset(data_dir, download=download, split='val', transform=self.train_transforms)
test_data = self.raw_dataset(data_dir, download=download, split='test', transform=self.test_transforms)
# split train and validation
if val_data is None:
logging.info(f"Dataset {self.dataset_name} has no validation set. Splitting the training set...")
train_indices, val_indices = get_split_indices(len(train_data), val_ratio, seed)
if num_train_examples is not None:
train_indices = np.random.choice(train_indices, num_train_examples, replace=False)
val_data = Subset(train_data, val_indices)
train_data = Subset(train_data, train_indices)
else:
# subsample training set separately if needed
if num_train_examples is not None:
train_indices = np.random.choice(len(train_data), num_train_examples, replace=False)
train_data = Subset(train_data, train_indices)
# general way of returning extra information
info = None
# post-process datasets
train_data, val_data, test_data, info = self.post_process_datasets(train_data, val_data, test_data, info=info)
# name datasets and save statistics
for dataset in [train_data, val_data, test_data]:
dataset.dataset_name = self.dataset_name
dataset.statistics = (self.means, self.stds)
return train_data, val_data, test_data, info
@log_call_parameters
def build_loaders(self, data_dir: str = None, val_ratio: float = 0.2, num_train_examples: int = None,
seed: int = 42, download: bool = True, batch_size: int = 128, num_workers: int = 4,
drop_last: bool = False, **kwargs):
train_data, val_data, test_data, info = self.build_datasets(data_dir=data_dir, val_ratio=val_ratio,
num_train_examples=num_train_examples,
seed=seed, download=download, **kwargs)
train_loader, val_loader, test_loader = get_loaders_from_datasets(train_data, val_data, test_data,
batch_size=batch_size,
num_workers=num_workers,
drop_last=drop_last)
return train_loader, val_loader, test_loader, info
| [
"torch.utils.data.Subset",
"logging.basicConfig",
"logging.info",
"numpy.random.choice",
"torchvision.transforms.Normalize",
"os.path.join",
"torchvision.transforms.ToTensor"
] | [((61, 100), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (80, 100), False, 'import logging\n'), ((1117, 1169), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'self.means', 'std': 'self.stds'}), '(mean=self.means, std=self.stds)\n', (1137, 1169), False, 'from torchvision import transforms\n'), ((1971, 2026), 'os.path.join', 'os.path.join', (["os.environ['DATA_DIR']", 'self.dataset_name'], {}), "(os.environ['DATA_DIR'], self.dataset_name)\n", (1983, 2026), False, 'import os\n'), ((2445, 2552), 'logging.info', 'logging.info', (['f"""Dataset {self.dataset_name} has no validation set. Splitting the training set..."""'], {}), "(\n f'Dataset {self.dataset_name} has no validation set. Splitting the training set...'\n )\n", (2457, 2552), False, 'import logging\n'), ((2805, 2836), 'torch.utils.data.Subset', 'Subset', (['train_data', 'val_indices'], {}), '(train_data, val_indices)\n', (2811, 2836), False, 'from torch.utils.data import Subset\n'), ((2862, 2895), 'torch.utils.data.Subset', 'Subset', (['train_data', 'train_indices'], {}), '(train_data, train_indices)\n', (2868, 2895), False, 'from torch.utils.data import Subset\n'), ((1265, 1286), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1284, 1286), False, 'from torchvision import transforms\n'), ((2715, 2781), 'numpy.random.choice', 'np.random.choice', (['train_indices', 'num_train_examples'], {'replace': '(False)'}), '(train_indices, num_train_examples, replace=False)\n', (2731, 2781), True, 'import numpy as np\n'), ((3145, 3178), 'torch.utils.data.Subset', 'Subset', (['train_data', 'train_indices'], {}), '(train_data, train_indices)\n', (3151, 3178), False, 'from torch.utils.data import Subset\n')] |
# -*- coding: utf-8 -*-
"""image_to_amime_with_mxnet.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1LTzdjBxSsx9vAmfF8Xt81FHVgSwGSUYU
"""
from PIL import Image
import torch
import IPython
from IPython.display import display
import numpy as np
input_image = "DSC00438.JPG"#@param {type:"string"}
anime = "face_paint_512_v2" #@param ["face_paint_512_v2", "celeba_distill", "face_paint_512_v1", "paprika"] {allow-input: false}
anime = torch.hub.load("bryandlee/animegan2-pytorch:main", "generator", pretrained=anime)
anime = anime.cuda()
face2paint = torch.hub.load("bryandlee/animegan2-pytorch:main", "face2paint", size=512)
def image_to_paint(img):
#type np
img = Image.fromarray(np.uint8(img))
out = face2paint(anime, img, device='cuda')
out = np.array(out.resize((112,112)))
return out
# Import modules
import sys, dlib
sys.path.append('eameo-faceswap-generator')
import numpy as np
import faceBlendCommon as fbc
# import matplotlib.pyplot as plt
from PIL import Image
# get landmark
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("eameo-faceswap-generator/shape_predictor_68_face_landmarks.dat")
get_points = lambda x: np.array(fbc.getLandmarks(detector, predictor, x))
FACIAL_LANDMARKS_IDXS = {
"mouth": (48, 68),
# "right_eyebrow": (17, 22),
# "left_eyebrow": (22, 27),
# "right_eye": (36, 42),
# "left_eye": (42, 48),
"nose": (27, 35),
# "jaw": (0, 17),
"eye" : (36, 48),
"eyebrow" : (17, 27)
}
def get_crop_boundary(points_subset):
min_point = np.array([np.min(points_subset[:,0]), np.min(points_subset[:,1])])
max_point = np.array([np.max(points_subset[:,0]), np.max(points_subset[:,1])])
return min_point, max_point
def organ_image(image, points, organ = 'mouth'):
start, end = FACIAL_LANDMARKS_IDXS[organ]
min_point, max_point = get_crop_boundary(points[start:end])
new_image = np.zeros_like(image)
new_image[min_point[1]:max_point[1],min_point[0]:max_point[0],:] = image[min_point[1]:max_point[1],min_point[0]:max_point[0],:]
return new_image
def organ_only_image(image, points):
new_image = np.zeros_like(image)
for organ in FACIAL_LANDMARKS_IDXS:
start, end = FACIAL_LANDMARKS_IDXS[organ]
min_point, max_point = get_crop_boundary(points[start:end])
new_image[min_point[1]:max_point[1],min_point[0]:max_point[0],:] = image[min_point[1]:max_point[1],min_point[0]:max_point[0],:]
return new_image
def no_organ_image(image, points):
new_image = np.array(image, copy=True)
for organ in FACIAL_LANDMARKS_IDXS:
start, end = FACIAL_LANDMARKS_IDXS[organ]
min_point, max_point = get_crop_boundary(points[start:end])
new_image[min_point[1]:max_point[1],min_point[0]:max_point[0],:] = np.mean(image)
return new_image
"""#Prepare images"""
# !pip install mxnet >/dev/null
import os
import mxnet as mx
source = 'faces_webface_112x112'
# output = 'faces_webface_112x112_organs'
# output = 'faces_webface_112x112_no_organs'
output = 'faces_webface_112x112_paint'
valid_dataset = 'lfw'
# imgrec = mx.recordio.MXIndexedRecordIO(os.path.join(source, 'train.idx'), os.path.join(source, 'train.rec'), 'r')
# import numpy as np
# s = imgrec.read_idx(0)
# header, _ = mx.recordio.unpack(s)
# if header.flag > 0:
# header0 = (int(header.label[0]), int(header.label[1]))
# imgidx = np.array(range(1, int(header.label[0])))
# else:
# imgidx = np.array(list(imgrec.keys))
from tqdm import tqdm
# record = mx.recordio.MXIndexedRecordIO(os.path.join(output, 'train.idx'), os.path.join(output, 'train.rec'), 'w')
# s = imgrec.read_idx(0)
# record.write_idx(0, s)
# for i in tqdm(imgidx):
# s = imgrec.read_idx(i)
# header, img = mx.recordio.unpack(s)
# sample = mx.image.imdecode(img).asnumpy()
# try:
# # sample = organ_image(sample, get_points(sample))
# sample = image_to_paint(sample)
# packed_s = mx.recordio.pack_img(header, sample)
# record.write_idx(i, packed_s)
# except:
# pass
# record.close()
import pickle as pkl
with open(os.path.join(source, valid_dataset + '.bin'), 'rb') as f:
bins, issame_list = pkl.load(f, encoding='bytes')
A = []
B = []
S = []
for s, a, b in tqdm(zip(issame_list, bins[0::2], bins[1::2])):
try:
a = mx.image.imdecode(a).asnumpy()
# pa = get_points(a)
a = image_to_paint(a)
b = mx.image.imdecode(b).asnumpy()
# pb = get_points(b)
b = image_to_paint(b)
A.append(a)
B.append(b)
S.append(s)
except:
pass
bins = []
header = mx.recordio.IRHeader(0, 5, 7, 0)
for i in range(len(A)):
bins.append(np.flip(A[i], 2))
bins.append(np.flip(B[i], 2))
for i, x in enumerate(bins):
packed_i = mx.recordio.pack_img(header, x)
_, img_only = mx.recordio.unpack(packed_i)
bins[i] = img_only
with open(os.path.join(output, valid_dataset + '.bin'), 'wb') as f:
pkl.dump((bins, S), f) | [
"pickle.dump",
"mxnet.image.imdecode",
"pickle.load",
"numpy.mean",
"dlib.shape_predictor",
"os.path.join",
"sys.path.append",
"numpy.zeros_like",
"numpy.max",
"mxnet.recordio.IRHeader",
"torch.hub.load",
"numpy.uint8",
"faceBlendCommon.getLandmarks",
"mxnet.recordio.pack_img",
"numpy.mi... | [((510, 596), 'torch.hub.load', 'torch.hub.load', (['"""bryandlee/animegan2-pytorch:main"""', '"""generator"""'], {'pretrained': 'anime'}), "('bryandlee/animegan2-pytorch:main', 'generator', pretrained=\n anime)\n", (524, 596), False, 'import torch\n'), ((626, 700), 'torch.hub.load', 'torch.hub.load', (['"""bryandlee/animegan2-pytorch:main"""', '"""face2paint"""'], {'size': '(512)'}), "('bryandlee/animegan2-pytorch:main', 'face2paint', size=512)\n", (640, 700), False, 'import torch\n'), ((921, 964), 'sys.path.append', 'sys.path.append', (['"""eameo-faceswap-generator"""'], {}), "('eameo-faceswap-generator')\n", (936, 964), False, 'import sys, dlib\n'), ((1098, 1130), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (1128, 1130), False, 'import sys, dlib\n'), ((1143, 1234), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""eameo-faceswap-generator/shape_predictor_68_face_landmarks.dat"""'], {}), "(\n 'eameo-faceswap-generator/shape_predictor_68_face_landmarks.dat')\n", (1163, 1234), False, 'import sys, dlib\n'), ((4719, 4751), 'mxnet.recordio.IRHeader', 'mx.recordio.IRHeader', (['(0)', '(5)', '(7)', '(0)'], {}), '(0, 5, 7, 0)\n', (4739, 4751), True, 'import mxnet as mx\n'), ((1961, 1981), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (1974, 1981), True, 'import numpy as np\n'), ((2190, 2210), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (2203, 2210), True, 'import numpy as np\n'), ((2598, 2624), 'numpy.array', 'np.array', (['image'], {'copy': '(True)'}), '(image, copy=True)\n', (2606, 2624), True, 'import numpy as np\n'), ((4284, 4313), 'pickle.load', 'pkl.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (4292, 4313), True, 'import pickle as pkl\n'), ((4888, 4919), 'mxnet.recordio.pack_img', 'mx.recordio.pack_img', (['header', 'x'], {}), '(header, x)\n', (4908, 4919), True, 'import mxnet as mx\n'), ((4938, 4966), 'mxnet.recordio.unpack', 'mx.recordio.unpack', (['packed_i'], {}), '(packed_i)\n', (4956, 4966), True, 'import mxnet as mx\n'), ((5063, 5085), 'pickle.dump', 'pkl.dump', (['(bins, S)', 'f'], {}), '((bins, S), f)\n', (5071, 5085), True, 'import pickle as pkl\n'), ((766, 779), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (774, 779), True, 'import numpy as np\n'), ((1262, 1302), 'faceBlendCommon.getLandmarks', 'fbc.getLandmarks', (['detector', 'predictor', 'x'], {}), '(detector, predictor, x)\n', (1278, 1302), True, 'import faceBlendCommon as fbc\n'), ((2877, 2891), 'numpy.mean', 'np.mean', (['image'], {}), '(image)\n', (2884, 2891), True, 'import numpy as np\n'), ((4202, 4246), 'os.path.join', 'os.path.join', (['source', "(valid_dataset + '.bin')"], {}), "(source, valid_dataset + '.bin')\n", (4214, 4246), False, 'import os\n'), ((4792, 4808), 'numpy.flip', 'np.flip', (['A[i]', '(2)'], {}), '(A[i], 2)\n', (4799, 4808), True, 'import numpy as np\n'), ((4826, 4842), 'numpy.flip', 'np.flip', (['B[i]', '(2)'], {}), '(B[i], 2)\n', (4833, 4842), True, 'import numpy as np\n'), ((5001, 5045), 'os.path.join', 'os.path.join', (['output', "(valid_dataset + '.bin')"], {}), "(output, valid_dataset + '.bin')\n", (5013, 5045), False, 'import os\n'), ((1608, 1635), 'numpy.min', 'np.min', (['points_subset[:, 0]'], {}), '(points_subset[:, 0])\n', (1614, 1635), True, 'import numpy as np\n'), ((1636, 1663), 'numpy.min', 'np.min', (['points_subset[:, 1]'], {}), '(points_subset[:, 1])\n', (1642, 1663), True, 'import numpy as np\n'), ((1691, 1718), 'numpy.max', 'np.max', (['points_subset[:, 0]'], {}), '(points_subset[:, 0])\n', (1697, 1718), True, 'import numpy as np\n'), ((1719, 1746), 'numpy.max', 'np.max', (['points_subset[:, 1]'], {}), '(points_subset[:, 1])\n', (1725, 1746), True, 'import numpy as np\n'), ((4420, 4440), 'mxnet.image.imdecode', 'mx.image.imdecode', (['a'], {}), '(a)\n', (4437, 4440), True, 'import mxnet as mx\n'), ((4523, 4543), 'mxnet.image.imdecode', 'mx.image.imdecode', (['b'], {}), '(b)\n', (4540, 4543), True, 'import mxnet as mx\n')] |
import imagematrix
from array import *
import math
import numpy as np
class ResizeableImage(imagematrix.ImageMatrix):
"""
Find the best seam
"""
def best_seam(self, dp=True):
# initialize an energy map (filled with zeros)
gradient = np.zeros((self.width,self.height),dtype = np.int)
seam = []
if dp == True:
# compute the best seam dynamically (loops)
seam = dynamic(self,gradient)
else:
# compute the best seam using the naive algorithm (recursion)
seam = naive(self,gradient)
return seam
"""
Remove the lowest energy seam from the image
"""
def remove_best_seam(self):
self.remove_seam(self.best_seam())
"""
Calculate the lowest energy seam dynamically
-- takes an image map as input
-- find the lowest adjacent energy
-- add the to the total energy within the vertical seam
TO-DO
[x]compute gradient at every pixel[x]identify the lowest energy seam
[x]sum of all of the energies along a path
[x]find the smallest sum at the end of the path
[x]retrace that path back to the first pixel in the path
[x]find the first pixel of the min seam
"""
def dynamic(self,map):
inf = math.inf
# map[i][j] is the energy of a given pixel
# energy of the pixels on the edge of the image will always be 10000
for i in range (self.height):
if i == 0:
i = self.height-1 #skip the first row
for j in range (self.width):
# calculate the energy seam at the current pixel
# right edge
if j == 0:
map[i][j] = min(inf,map[i-1][j],map[i-1][j+1]) + map[i][j]
# left edge
elif j == self.width - 1:
map[i][j] = min(map[i-1][j-1],map[i-1][j],inf) + map[i][j]
else:
map[i][j] = min(map[i-1][j-2],map[i-1][j],map[i-1][j+1]) + map[i][j]
# find start of the lowest energy seam in last row
min(map[self.height-1])
for i in range (self.height):
for j in range (self.width):
# calculate the energy of a pixel
gradient[i][j] = self.energy(i,j)
return seam
"""
Calculate the lowest energy recursively
"""
def naive (self,map):
# depth first search on the gradient
# if left edge
# if right edge
# if last row
# else
# min(left,right,middle)
# get energy of current
# update energy (energy + min)
return
"""
Create the image gradient by calculating the energy of each pixel
Return the image as a 2 x 2 array
"""
def _gradient (self,map):
for i in range (self.height):
for j in range (self.width):
# calculate the energy of a pixel
gradient[i][j] = self.energy(i,j)
return gradient
"""
Find and return the minimum energy
"""
def _get_path (self,map):
lowest_seam = []
# follow the path along the lowest energy seam
for i in range (self.height):
j = 0
return path
"""
Simple implementation of depth first search
"""
def _dfs (self,a,b,c):
return
| [
"numpy.zeros"
] | [((242, 291), 'numpy.zeros', 'np.zeros', (['(self.width, self.height)'], {'dtype': 'np.int'}), '((self.width, self.height), dtype=np.int)\n', (250, 291), True, 'import numpy as np\n')] |
# Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by <NAME>
# Python translation by <NAME>, with <NAME> and <NAME>
import proper
import numpy as np
def prop_propagate(wf, dz, surface_name = "", **kwargs):
"""Determine which propagator to use to propagate the current wavefront by a
specified distance and do it.
Parameters
----------
wf : obj
WaveFront class object
dz : float
Distance in meters to propagate wavefront
surface_name : str
String containing name of surface to which to propagate
Returns
-------
None
Replaces the wavefront with a new one.
Other Parameters
----------------
TO_PLANE : bool
"""
if proper.print_it:
if surface_name == "":
print("Propagating")
else:
print("Propagating to %s" %(surface_name))
dzw = proper.prop_select_propagator(wf, dz)
z1 = wf.z
z2 = z1 + dz
if ("TO_PLANE" in kwargs and kwargs["TO_PLANE"]):
wf.propagator_type = wf.propagator_type[:11] + "INSIDE_"
if proper.verbose:
print(" PROPAGATOR: propagator_type = %s" %(wf.propagator_type))
if wf.propagator_type == "INSIDE__to_INSIDE_":
proper.prop_ptp(wf, dz)
elif wf.propagator_type == "INSIDE__to_OUTSIDE":
proper.prop_ptp(wf, wf.z_w0 - z1)
proper.prop_wts(wf, z2 - wf.z_w0)
elif wf.propagator_type == "OUTSIDE_to_INSIDE_":
proper.prop_stw(wf, wf.z_w0 - z1)
proper.prop_ptp(wf, z2 - wf.z_w0)
elif wf.propagator_type == "OUTSIDE_to_OUTSIDE":
proper.prop_stw(wf, wf.z_w0 - z1)
proper.prop_wts(wf, z2 - wf.z_w0)
if proper.print_total_intensity:
intensity = np.sum(np.abs(wf.wfarr)**2, dtype = np.float64)
if surface_name == "":
print("Total intensity = ", intensity)
else:
print("Total intensity at surface ", surface_name, " = ", intensity)
if proper.do_table:
proper.sampling_list[proper.action_num] = wf.dx
proper.distance_list[proper.action_num] = dz
proper.beam_diam_list[proper.action_num] = 1 * proper.prop_get_beamradius(wf)
if surface_name != "":
proper.surface_name_list[proper.action_num] = surface_name
else:
proper.surface_name_list[proper.action_num] = "(SURFACE)"
proper.action_num += 1
return
| [
"proper.prop_wts",
"proper.prop_select_propagator",
"numpy.abs",
"proper.prop_ptp",
"proper.prop_stw",
"proper.prop_get_beamradius"
] | [((1162, 1199), 'proper.prop_select_propagator', 'proper.prop_select_propagator', (['wf', 'dz'], {}), '(wf, dz)\n', (1191, 1199), False, 'import proper\n'), ((1527, 1550), 'proper.prop_ptp', 'proper.prop_ptp', (['wf', 'dz'], {}), '(wf, dz)\n', (1542, 1550), False, 'import proper\n'), ((1613, 1646), 'proper.prop_ptp', 'proper.prop_ptp', (['wf', '(wf.z_w0 - z1)'], {}), '(wf, wf.z_w0 - z1)\n', (1628, 1646), False, 'import proper\n'), ((1655, 1688), 'proper.prop_wts', 'proper.prop_wts', (['wf', '(z2 - wf.z_w0)'], {}), '(wf, z2 - wf.z_w0)\n', (1670, 1688), False, 'import proper\n'), ((2439, 2469), 'proper.prop_get_beamradius', 'proper.prop_get_beamradius', (['wf'], {}), '(wf)\n', (2465, 2469), False, 'import proper\n'), ((1750, 1783), 'proper.prop_stw', 'proper.prop_stw', (['wf', '(wf.z_w0 - z1)'], {}), '(wf, wf.z_w0 - z1)\n', (1765, 1783), False, 'import proper\n'), ((1792, 1825), 'proper.prop_ptp', 'proper.prop_ptp', (['wf', '(z2 - wf.z_w0)'], {}), '(wf, z2 - wf.z_w0)\n', (1807, 1825), False, 'import proper\n'), ((2032, 2048), 'numpy.abs', 'np.abs', (['wf.wfarr'], {}), '(wf.wfarr)\n', (2038, 2048), True, 'import numpy as np\n'), ((1887, 1920), 'proper.prop_stw', 'proper.prop_stw', (['wf', '(wf.z_w0 - z1)'], {}), '(wf, wf.z_w0 - z1)\n', (1902, 1920), False, 'import proper\n'), ((1929, 1962), 'proper.prop_wts', 'proper.prop_wts', (['wf', '(z2 - wf.z_w0)'], {}), '(wf, z2 - wf.z_w0)\n', (1944, 1962), False, 'import proper\n')] |
import json
import sys
from sklearn.metrics import classification_report
from argparse import ArgumentParser
import tensorflow as tf
import numpy as np
from src.utils.model_utility import *
from src.utils.generators import *
def predict_model(model_configuration: str,
dataset_configuration: str,
computer_configuration: str):
# loads name, image width/ height and l2_reg data
# model_parameters = load_model_parameters(model)
with open('src/configuration/model/{}.json'
.format(model_configuration)) as json_file:
model_parameters = json.load(json_file)
# loads data, number of gpus
with open('src/configuration/computer/{}.json'
.format(computer_configuration)) as json_file:
computer_parameters = json.load(json_file)
# loads n_classes, labels, class names, etc.
# dataset_parameters = load_dataset_parameters(dataset_name, path)
with open('src/configuration/dataset/{}.json'
.format(dataset_configuration)) as json_file:
dataset_parameters = json.load(json_file)
model = tf.keras.models.load_model(model_parameters['weights'])
# create the training and validation data
empty_training_data, validation_data = get_generator(dataset_parameters,
model_parameters,
computer_parameters,
True)
predictions = model.predict(validation_data,
workers=12,
verbose=1)
print("shape prediction", np.shape(predictions))
print('** classes indices: **', validation_data.class_indices)
if 'AffectNet' in dataset_parameters['dataset_name']:
print(classification_report(validation_data.classes,
predictions.argmax(axis=1),
target_names=dataset_parameters[
'class_names']))
else:
print(classification_report(validation_data.classes,
predictions.argmax(axis=1)))
np.save('../metrics/{}/'.format(dataset_parameters['dataset_name'])
+ 'predictions', predictions)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-m", "--model",
help="select your model")
parser.add_argument("-d", "--dataset",
help="select your dataset")
parser.add_argument("-c", "--computer",
help="select your dataset")
args = parser.parse_args()
model_configuration_name = args.model
dataset_configuration_name = args.dataset
computer_configuration_name = args.computer
predict_model(model_configuration_name,
dataset_configuration_name,
computer_configuration_name)
| [
"numpy.shape",
"json.load",
"tensorflow.keras.models.load_model",
"argparse.ArgumentParser"
] | [((1139, 1194), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (["model_parameters['weights']"], {}), "(model_parameters['weights'])\n", (1165, 1194), True, 'import tensorflow as tf\n'), ((2395, 2411), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (2409, 2411), False, 'from argparse import ArgumentParser\n'), ((611, 631), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (620, 631), False, 'import json\n'), ((816, 836), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (825, 836), False, 'import json\n'), ((1105, 1125), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1114, 1125), False, 'import json\n'), ((1703, 1724), 'numpy.shape', 'np.shape', (['predictions'], {}), '(predictions)\n', (1711, 1724), True, 'import numpy as np\n')] |
from functools import partial
from multiprocessing.pool import Pool
import cv2
import numpy as np
import scipy as sp
import torch
from pytorch_toolbelt.utils.torch_utils import to_numpy
from tqdm import tqdm
from xview.dataset import read_mask
from xview.metric import CompetitionMetricCallback
from xview.postprocessing import make_predictions_naive
def _compute_fn(args, coef_exp):
xi, dmg_true = args
loc_pred, dmg_pred = make_predictions_naive(xi.astype(np.float32) * coef_exp)
row = CompetitionMetricCallback.get_row_pair(loc_pred, dmg_pred, dmg_true, dmg_true)
return row
class AveragingOptimizedRounder(object):
def __init__(self, apply_softmax, workers=0):
self.coef_ = 0
self.workers = workers
self.apply_softmax = apply_softmax
@torch.no_grad()
def _prepare_data(self, X, y):
X_data = []
n = len(X[0])
m = len(X)
for i in tqdm(range(n), desc="Loading predictions"):
x_preds = []
for j in range(m):
x = np.load(X[j][i])
if self.apply_softmax == "pre":
x = torch.from_numpy(x).float().softmax(dim=0).numpy().astype(np.float16)
x_preds.append(x)
x = np.mean(np.stack(x_preds), axis=0)
if self.apply_softmax == "post":
x = torch.from_numpy(x).float().softmax(dim=0).numpy().astype(np.float16)
X_data.append(x)
Y_data = [read_mask(yi) for yi in tqdm(y, desc="Loading ground-truths")]
assert len(X_data) == len(Y_data)
print("Loaded data into memory")
return X_data, Y_data
def _target_metric_loss(self, coef, X, y):
coef_exp = np.expand_dims(np.expand_dims(coef, -1), -1)
all_rows = []
proc_fn = partial(_compute_fn, coef_exp=coef_exp)
with Pool(self.workers) as wp:
for row in wp.imap_unordered(proc_fn, zip(X, y)):
all_rows.append(row)
score, localization_f1, damage_f1, damage_f1s = CompetitionMetricCallback.compute_metrics(all_rows)
print(score, localization_f1, damage_f1, damage_f1s, "coeffs", coef)
return 1.0 - score
def fit(self, X, y):
X_data, Y_data = self._prepare_data(X, y)
loss_partial = partial(self._target_metric_loss, X=X_data, y=Y_data)
initial_coef = [1.0, 1.0, 1.0, 1.0, 1.0]
self.coef_ = sp.optimize.minimize(
loss_partial, initial_coef, method="nelder-mead", options={"maxiter": 100, "xatol": 0.001}
)
del X_data, Y_data
return self.coefficients()
def predict(self, X, y, coef: np.ndarray):
coef_exp = np.expand_dims(np.expand_dims(coef, -1), -1)
all_rows = []
X_data, Y_data = self._prepare_data(X, y)
proc_fn = partial(_compute_fn, coef_exp=coef_exp)
with Pool(self.workers) as wp:
for row in wp.imap_unordered(proc_fn, zip(X_data, Y_data)):
all_rows.append(row)
score, localization_f1, damage_f1, damage_f1s = CompetitionMetricCallback.compute_metrics(all_rows)
del X_data, Y_data
return score, localization_f1, damage_f1, damage_f1s
def coefficients(self):
return self.coef_["x"]
| [
"numpy.stack",
"functools.partial",
"scipy.optimize.minimize",
"numpy.load",
"xview.metric.CompetitionMetricCallback.compute_metrics",
"tqdm.tqdm",
"numpy.expand_dims",
"xview.dataset.read_mask",
"xview.metric.CompetitionMetricCallback.get_row_pair",
"multiprocessing.pool.Pool",
"torch.no_grad",... | [((505, 583), 'xview.metric.CompetitionMetricCallback.get_row_pair', 'CompetitionMetricCallback.get_row_pair', (['loc_pred', 'dmg_pred', 'dmg_true', 'dmg_true'], {}), '(loc_pred, dmg_pred, dmg_true, dmg_true)\n', (543, 583), False, 'from xview.metric import CompetitionMetricCallback\n'), ((795, 810), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (808, 810), False, 'import torch\n'), ((1805, 1844), 'functools.partial', 'partial', (['_compute_fn'], {'coef_exp': 'coef_exp'}), '(_compute_fn, coef_exp=coef_exp)\n', (1812, 1844), False, 'from functools import partial\n'), ((2041, 2092), 'xview.metric.CompetitionMetricCallback.compute_metrics', 'CompetitionMetricCallback.compute_metrics', (['all_rows'], {}), '(all_rows)\n', (2082, 2092), False, 'from xview.metric import CompetitionMetricCallback\n'), ((2296, 2349), 'functools.partial', 'partial', (['self._target_metric_loss'], {'X': 'X_data', 'y': 'Y_data'}), '(self._target_metric_loss, X=X_data, y=Y_data)\n', (2303, 2349), False, 'from functools import partial\n'), ((2420, 2536), 'scipy.optimize.minimize', 'sp.optimize.minimize', (['loss_partial', 'initial_coef'], {'method': '"""nelder-mead"""', 'options': "{'maxiter': 100, 'xatol': 0.001}"}), "(loss_partial, initial_coef, method='nelder-mead',\n options={'maxiter': 100, 'xatol': 0.001})\n", (2440, 2536), True, 'import scipy as sp\n'), ((2822, 2861), 'functools.partial', 'partial', (['_compute_fn'], {'coef_exp': 'coef_exp'}), '(_compute_fn, coef_exp=coef_exp)\n', (2829, 2861), False, 'from functools import partial\n'), ((3068, 3119), 'xview.metric.CompetitionMetricCallback.compute_metrics', 'CompetitionMetricCallback.compute_metrics', (['all_rows'], {}), '(all_rows)\n', (3109, 3119), False, 'from xview.metric import CompetitionMetricCallback\n'), ((1475, 1488), 'xview.dataset.read_mask', 'read_mask', (['yi'], {}), '(yi)\n', (1484, 1488), False, 'from xview.dataset import read_mask\n'), ((1734, 1758), 'numpy.expand_dims', 'np.expand_dims', (['coef', '(-1)'], {}), '(coef, -1)\n', (1748, 1758), True, 'import numpy as np\n'), ((1859, 1877), 'multiprocessing.pool.Pool', 'Pool', (['self.workers'], {}), '(self.workers)\n', (1863, 1877), False, 'from multiprocessing.pool import Pool\n'), ((2699, 2723), 'numpy.expand_dims', 'np.expand_dims', (['coef', '(-1)'], {}), '(coef, -1)\n', (2713, 2723), True, 'import numpy as np\n'), ((2876, 2894), 'multiprocessing.pool.Pool', 'Pool', (['self.workers'], {}), '(self.workers)\n', (2880, 2894), False, 'from multiprocessing.pool import Pool\n'), ((1045, 1061), 'numpy.load', 'np.load', (['X[j][i]'], {}), '(X[j][i])\n', (1052, 1061), True, 'import numpy as np\n'), ((1263, 1280), 'numpy.stack', 'np.stack', (['x_preds'], {}), '(x_preds)\n', (1271, 1280), True, 'import numpy as np\n'), ((1499, 1536), 'tqdm.tqdm', 'tqdm', (['y'], {'desc': '"""Loading ground-truths"""'}), "(y, desc='Loading ground-truths')\n", (1503, 1536), False, 'from tqdm import tqdm\n'), ((1356, 1375), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1372, 1375), False, 'import torch\n'), ((1134, 1153), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (1150, 1153), False, 'import torch\n')] |
# The main test script for the project
# GenSparseMatrix takes an input density and generates an nxn sparse matrix using a uniform distribution.
# It ensures that the resulting sparse matrix is diagonally dominant
import numpy as np
from scipy import sparse
from sparse_methods import *
from jacobi import *
#from LUPP import *
def GenSparseMatrix(n, density):
dok = sparse.rand(n, n, density, format='dok', random_state=1)
M = np.zeros((n, n))
#print dok
for item in dok.items():
M[item[0][0], item[0][1]] = item[1]
for i in range(n):
rowsum = sum(M[i,:])
rowsum -= M[i, i]
if rowsum > M[i, i]:
M[i, i] = rowsum+0.01
return M
def create_three_band_matrix(size, mean, std, factor):
matrix = np.eye(size, dtype = float, k = 0)
matrix += np.eye(size, dtype = float, k = 1)
matrix += np.eye(size, dtype = float, k = -1)
for i in range (0, size):
if i == 0:
matrix[i][i+1] *= np.random.normal(mean, std)
matrix[i][i] *= np.abs(np.random.normal(mean, std)) + factor + np.abs(matrix [i][i+1])
matrix[i][i] *= np.random.choice([-1,1])
elif i == size - 1:
matrix[i][i-1] *= np.random.normal(mean, std)
matrix[i][i] *= np.abs(np.random.normal(mean, std)) + factor + np.abs(matrix [i][i-1])
matrix[i][i] *= np.random.choice([-1,1])
else:
matrix[i][i+1] *= np.random.normal(mean, std)
matrix[i][i-1] *= np.random.normal(mean, std)
matrix[i][i] *= np.abs(np.random.normal(mean, std)) + factor + np.abs(matrix [i][i-1]) + np.abs(matrix[i][i+1])
matrix[i][i] *= np.random.choice([-1,1])
return matrix
if __name__ == "__main__1":
A = GenSparseMatrix(1000, 0.2)
b = np.ones(1000, dtype=np.float64)
Acsr = MatrixtoCSR(A)
#x, t = LUPP(A, b)
#print t
"""
x, er, it, t = Jacobi(A, b)
print er, it, t
"""
x = SparseJacobi(Acsr, b, 10e-6)
print ("done")
if __name__ == "__main__":
# M = np.array([[1.32465, 0, 0, 0],
# [5.234, 8.6, 0, 0],
# [0, 0, 3.456, 10],
# [0, 6, 0, -1.0997]])
M = create_three_band_matrix(1000,0,10,50)
b = np.random.rand(1000,1)
# M = np.array([[10, 20, 0, 0, 0, 0],
# [0, 30, 0, 40, 0, 0],
# [0, 0, 50, 60, 70, 0],
# [0, 0, 0, 0, 0, 80]])
csr = MatrixtoCSR(M)
#pprint(csr)
print (SparseGet(csr, 3, 3))
m = csr['m']
# A = np.array([[10., -1., 2., 0.],
# [-1., 11., -1., 3.],
# [2., -1., 10., -1.],
# [0.0, 3., -1., 8.]])
# csr1 = MatrixtoCSR(A)
#b = np.array([6., 25., -11., 15.])
# print (SparseGet(csr1, 3, 3))
#x, error, itr = Jacobi(A, b, 1e-10)
xsp = SparseJacobi(csr, b, 10e-6)
#print ("Error:", error, errorsp)
#print ("Iters:", itr, itrsp)
print ("x =", xsp)
# D = np.zeros(m, dtype=csr['A'].dtype)
# #D = []
# for i in range(m):
# D[i] = SparseGet(csr, i, i)
# print(D) | [
"numpy.abs",
"numpy.zeros",
"numpy.ones",
"scipy.sparse.rand",
"numpy.random.normal",
"numpy.random.choice",
"numpy.random.rand",
"numpy.eye"
] | [((384, 440), 'scipy.sparse.rand', 'sparse.rand', (['n', 'n', 'density'], {'format': '"""dok"""', 'random_state': '(1)'}), "(n, n, density, format='dok', random_state=1)\n", (395, 440), False, 'from scipy import sparse\n'), ((450, 466), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (458, 466), True, 'import numpy as np\n'), ((803, 833), 'numpy.eye', 'np.eye', (['size'], {'dtype': 'float', 'k': '(0)'}), '(size, dtype=float, k=0)\n', (809, 833), True, 'import numpy as np\n'), ((850, 880), 'numpy.eye', 'np.eye', (['size'], {'dtype': 'float', 'k': '(1)'}), '(size, dtype=float, k=1)\n', (856, 880), True, 'import numpy as np\n'), ((897, 928), 'numpy.eye', 'np.eye', (['size'], {'dtype': 'float', 'k': '(-1)'}), '(size, dtype=float, k=-1)\n', (903, 928), True, 'import numpy as np\n'), ((1739, 1770), 'numpy.ones', 'np.ones', (['(1000)'], {'dtype': 'np.float64'}), '(1000, dtype=np.float64)\n', (1746, 1770), True, 'import numpy as np\n'), ((2222, 2245), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(1)'], {}), '(1000, 1)\n', (2236, 2245), True, 'import numpy as np\n'), ((998, 1025), 'numpy.random.normal', 'np.random.normal', (['mean', 'std'], {}), '(mean, std)\n', (1014, 1025), True, 'import numpy as np\n'), ((1137, 1162), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (1153, 1162), True, 'import numpy as np\n'), ((1093, 1117), 'numpy.abs', 'np.abs', (['matrix[i][i + 1]'], {}), '(matrix[i][i + 1])\n', (1099, 1117), True, 'import numpy as np\n'), ((1207, 1234), 'numpy.random.normal', 'np.random.normal', (['mean', 'std'], {}), '(mean, std)\n', (1223, 1234), True, 'import numpy as np\n'), ((1346, 1371), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (1362, 1371), True, 'import numpy as np\n'), ((1402, 1429), 'numpy.random.normal', 'np.random.normal', (['mean', 'std'], {}), '(mean, std)\n', (1418, 1429), True, 'import numpy as np\n'), ((1452, 1479), 'numpy.random.normal', 'np.random.normal', (['mean', 'std'], {}), '(mean, std)\n', (1468, 1479), True, 'import numpy as np\n'), ((1616, 1641), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (1632, 1641), True, 'import numpy as np\n'), ((1302, 1326), 'numpy.abs', 'np.abs', (['matrix[i][i - 1]'], {}), '(matrix[i][i - 1])\n', (1308, 1326), True, 'import numpy as np\n'), ((1573, 1597), 'numpy.abs', 'np.abs', (['matrix[i][i + 1]'], {}), '(matrix[i][i + 1])\n', (1579, 1597), True, 'import numpy as np\n'), ((1053, 1080), 'numpy.random.normal', 'np.random.normal', (['mean', 'std'], {}), '(mean, std)\n', (1069, 1080), True, 'import numpy as np\n'), ((1547, 1571), 'numpy.abs', 'np.abs', (['matrix[i][i - 1]'], {}), '(matrix[i][i - 1])\n', (1553, 1571), True, 'import numpy as np\n'), ((1262, 1289), 'numpy.random.normal', 'np.random.normal', (['mean', 'std'], {}), '(mean, std)\n', (1278, 1289), True, 'import numpy as np\n'), ((1507, 1534), 'numpy.random.normal', 'np.random.normal', (['mean', 'std'], {}), '(mean, std)\n', (1523, 1534), True, 'import numpy as np\n')] |
from __future__ import print_function, absolute_import
import os, sys, subprocess, shlex, tempfile, time, sklearn.base, math
import numpy as np
import pandas as pd
from pandas_extensions import *
from ExeEstimator import *
class LibFFMClassifier(ExeEstimator, sklearn.base.ClassifierMixin):
'''
options:
-l <lambda>: set regularization parameter (default 0)
-k <factor>: set number of latent factors (default 4)
-t <iteration>: set number of iterations (default 15)
-r <eta>: set learning rate (default 0.1)
-s <nr_threads>: set number of threads (default 1)
-p <path>: set path to the validation set
--quiet: quiet model (no output)
--norm: do instance-wise normalization
--no-rand: disable random update
`--norm' helps you to do instance-wise normalization. When it is enabled,
you can simply assign `1' to `value' in the data.
'''
def __init__(self, columns, lambda_v=0, factor=4, iteration=15, eta=0.1,
nr_threads=1, quiet=False, normalize=None, no_rand=None):
ExeEstimator.__init__(self)
self.columns = columns.tolist() if hasattr(columns, 'tolist') else columns
self.lambda_v = lambda_v
self.factor = factor
self.iteration = iteration
self.eta = eta
self.nr_threads = nr_threads
self.quiet = quiet
self.normalize = normalize
self.no_rand = no_rand
def fit(self, X, y=None):
if type(X) is str: train_file = X
else:
if not hasattr(X, 'values'): X = pd.DataFrame(X, columns=self.columns)
train_file = self.save_reusable('_libffm_train', 'to_libffm', X, y)
# self._model_file = self.save_tmp_file(X, '_libffm_model', True)
self._model_file = self.tmpfile('_libffm_model')
command = 'utils/lib/ffm-train.exe' + ' -l ' + repr(v) + \
' -k ' + repr(r) + ' -t ' + repr(n) + ' -r ' + repr(a) + \
' -s ' + repr(s)
if self.quiet: command += ' --quiet'
if self.normalize: command += ' --norm'
if self.no_rand: command += ' --no-rand'
command += ' ' + train_file
command += ' ' + self._model_file
running_process = self.make_subprocess(command)
self.close_process(running_process)
return self
def predict(self, X):
if type(X) is str: test_file = X
else:
if not hasattr(X, 'values'): X = pd.DataFrame(X, columns=self.columns)
test_file = self.save_reusable('_libffm_test', 'to_libffm', X)
output_file = self.tmpfile('_libffm_predictions')
command = 'utils/lib/ffm-predict.exe ' + test_file + ' ' + self._model_file + ' ' + output_file
running_process = self.make_subprocess(command)
self.close_process(running_process)
preds = list(self.read_predictions(output_file))
return preds
def predict_proba(self, X):
predictions = np.asarray(map(lambda p: 1 / (1 + math.exp(-p)), self.predict(X)))
return np.vstack([1 - predictions, predictions]).T
| [
"pandas.DataFrame",
"math.exp",
"numpy.vstack"
] | [((2904, 2945), 'numpy.vstack', 'np.vstack', (['[1 - predictions, predictions]'], {}), '([1 - predictions, predictions])\n', (2913, 2945), True, 'import numpy as np\n'), ((1496, 1533), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': 'self.columns'}), '(X, columns=self.columns)\n', (1508, 1533), True, 'import pandas as pd\n'), ((2335, 2372), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': 'self.columns'}), '(X, columns=self.columns)\n', (2347, 2372), True, 'import pandas as pd\n'), ((2859, 2871), 'math.exp', 'math.exp', (['(-p)'], {}), '(-p)\n', (2867, 2871), False, 'import os, sys, subprocess, shlex, tempfile, time, sklearn.base, math\n')] |
"""
Author: <NAME> (<EMAIL>, http://personales.upv.es/jon)
Version: 1.0
Date: June 2014
Universitat Politecnica de Valencia
Technical University of Valencia TU.VLC
"""
import sys
import numpy
from . import MyKernel
class MyKernelClassifier:
"""
This class implements a classifier based on Kernel Density Estimator.
The purpose is to classify each sample according to the class with higher probability density.
"""
def __init__(self, h = None):
self.num_classes = 0
self.dim = 0
self.targets = None
self.estimators = None # Kernel Density Estimators, one per class
self.h = h
# ------------------------------------------------------------------------------
def fit(self, X, Y):
self.dim = X.shape[1]
# Establish the value of 'h' if not set previously
if self.h is None: self.h = max(7, 2.5 * self.dim)
self.targets = numpy.unique(Y)
self.num_classes = len(self.targets)
# Separate the training samples of each class in order to do the estimation
samples_per_class = []
for k in range(self.num_classes):
samples_per_class.append(X[Y == self.targets[k]])
kernel = 'gaussian' # This could be a parameter for the constructor, but the
# current implementation of MyKernel.py doesn't allow a
# different kernel type.
self.estimators = []
for k in range(self.num_classes):
self.estimators.append(MyKernel(kernel = kernel, bandwidth = self.h))
self.estimators[k].fit(samples_per_class[k])
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def predict(self, X):
Y = numpy.zeros(len(X), dtype = type(self.targets[0]))
best_log_dens = numpy.zeros(len(X))
for k in range(self.num_classes):
log_dens = self.estimators[k].score_samples(X)
if 0 == k :
best_log_dens[:] = log_dens[:]
Y[:] = self.targets[0]
else:
for n in range(len(X)):
if log_dens[n] > best_log_dens[n]:
best_log_dens[n] = log_dens[n]
Y[n] = self.targets[k]
return Y
# ------------------------------------------------------------------------------
| [
"numpy.unique"
] | [((949, 964), 'numpy.unique', 'numpy.unique', (['Y'], {}), '(Y)\n', (961, 964), False, 'import numpy\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, lstsq, hankel
from scipy.signal import convolve2d
from mas.forward_model import add_noise
N = 100
m, n = np.arange(N), np.arange(N)
omega_m = np.array((4.00001,))
omega_n = np.array((8.00002,))
y = np.sum(
np.e**(
1j * 2 * np.pi *
(
omega_m[np.newaxis, np.newaxis, :, np.newaxis] *
m[:, np.newaxis, np.newaxis, np.newaxis] +
omega_n[np.newaxis, np.newaxis, np.newaxis, :] *
n[np.newaxis, :, np.newaxis, np.newaxis]
) / N
), axis=(2, 3)
)
n_filt = np.repeat(
np.array(((1, -np.e**(1j * 2 * np.pi * omega_n[0] / N)),)),
1,
axis=0
)
m_filt = np.repeat(
np.array(((1, -np.e**(1j * 2 * np.pi * omega_m[0] / N)),)),
1,
axis=0
).T
correct_h = convolve2d(m_filt, n_filt)
y_hat_m = y[:-1, :]
b_hat_m = -y[len(omega_m):, :]
y_hat_n = y[:, :-1]
b_hat_n = -y[:, len(omega_m):]
# FIXME
# assert 2 * len(y_hat) > len(omega), "Not enough samples for Prony's method recovery"
def zeropad(x, padded_size):
"""zeropad 1D array x to size padded_size"""
return np.pad(x, [(0, padded_size - x.shape[0]), (0, padded_size - x.shape[1])], mode='constant')
padded_size = len(y_hat_m) + len(b_hat_m) - 1
h_hat_m = np.fft.ifft2(
np.fft.fft2(zeropad(b_hat_m, padded_size)) / np.fft.fft2(zeropad(y_hat_m, padded_size))
)
h_m = [1, h_hat_m[0, 0]]
padded_size = len(y_hat_n) + len(b_hat_n) - 1
h_hat_n = np.fft.ifft2(
np.fft.fft2(zeropad(b_hat_n, padded_size)) / np.fft.fft2(zeropad(y_hat_n, padded_size))
)
h_n = [1, h_hat_n[0, 0]]
omega_m_reconstructed = np.log(np.roots(h_m)) / (1j * 2 * np.pi / 100)
omega_n_reconstructed = np.log(np.roots(h_n)) / (1j * 2 * np.pi / 100)
# plt.subplot(3, 1, 1)
# plt.title('y')
# plt.plot(y)
# plt.subplot(3, 1, 2)
# plt.title('y DFT')
# plt.plot(np.abs(np.fft.fft(y)))
# plt.subplot(3, 1, 3)
# plt.title('prony estimate')
# y_reconstructed = np.zeros(N * 100) # 100X resolution
# y_reconstructed[omega_reconstructed.real.astype(int) * 100] = 1
# plt.plot(np.linspace(0, N, N * 100), y_reconstructed)
# plt.tight_layout()
# plt.show()
| [
"numpy.pad",
"numpy.roots",
"numpy.sum",
"scipy.signal.convolve2d",
"numpy.arange",
"numpy.array"
] | [((229, 249), 'numpy.array', 'np.array', (['(4.00001,)'], {}), '((4.00001,))\n', (237, 249), True, 'import numpy as np\n'), ((260, 280), 'numpy.array', 'np.array', (['(8.00002,)'], {}), '((8.00002,))\n', (268, 280), True, 'import numpy as np\n'), ((285, 537), 'numpy.sum', 'np.sum', (['(np.e ** (1.0j * 2 * np.pi * (omega_m[np.newaxis, np.newaxis, :, np.newaxis\n ] * m[:, np.newaxis, np.newaxis, np.newaxis] + omega_n[np.newaxis, np.\n newaxis, np.newaxis, :] * n[np.newaxis, :, np.newaxis, np.newaxis]) / N))'], {'axis': '(2, 3)'}), '(np.e ** (1.0j * 2 * np.pi * (omega_m[np.newaxis, np.newaxis, :, np.\n newaxis] * m[:, np.newaxis, np.newaxis, np.newaxis] + omega_n[np.\n newaxis, np.newaxis, np.newaxis, :] * n[np.newaxis, :, np.newaxis, np.\n newaxis]) / N), axis=(2, 3))\n', (291, 537), True, 'import numpy as np\n'), ((828, 854), 'scipy.signal.convolve2d', 'convolve2d', (['m_filt', 'n_filt'], {}), '(m_filt, n_filt)\n', (838, 854), False, 'from scipy.signal import convolve2d\n'), ((192, 204), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (201, 204), True, 'import numpy as np\n'), ((206, 218), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (215, 218), True, 'import numpy as np\n'), ((630, 692), 'numpy.array', 'np.array', (['((1, -np.e ** (1.0j * 2 * np.pi * omega_n[0] / N)),)'], {}), '(((1, -np.e ** (1.0j * 2 * np.pi * omega_n[0] / N)),))\n', (638, 692), True, 'import numpy as np\n'), ((1144, 1238), 'numpy.pad', 'np.pad', (['x', '[(0, padded_size - x.shape[0]), (0, padded_size - x.shape[1])]'], {'mode': '"""constant"""'}), "(x, [(0, padded_size - x.shape[0]), (0, padded_size - x.shape[1])],\n mode='constant')\n", (1150, 1238), True, 'import numpy as np\n'), ((734, 796), 'numpy.array', 'np.array', (['((1, -np.e ** (1.0j * 2 * np.pi * omega_m[0] / N)),)'], {}), '(((1, -np.e ** (1.0j * 2 * np.pi * omega_m[0] / N)),))\n', (742, 796), True, 'import numpy as np\n'), ((1649, 1662), 'numpy.roots', 'np.roots', (['h_m'], {}), '(h_m)\n', (1657, 1662), True, 'import numpy as np\n'), ((1720, 1733), 'numpy.roots', 'np.roots', (['h_n'], {}), '(h_n)\n', (1728, 1733), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Python wrapper for the GROMACS rms module
"""
import os
import sys
import json
import ntpath
import numpy as np
import configuration.settings as settings
from command_wrapper import cmd_wrapper
from tools import file_utils as fu
class Rms(object):
"""Wrapper for the trjconv module
Args:
input_gro_path (str): Path to the original (before launching the trajectory) GROMACS structure file GRO.
input_trr_paht (str): Path to the GROMACS uncompressed raw trajectory file TRR.
output_xvg_path (str): Path to the simple xmgrace plot file XVG.
properties (dic):
gmx_path (str): Path to the GROMACS executable binary.
"""
def __init__(self, input_gro_path, input_trr_path, output_xvg_path,
properties, **kwargs):
if isinstance(properties, basestring):
properties=json.loads(properties)
self.input_gro_path = input_gro_path
self.input_trr_path = input_trr_path
self.output_xvg_path = output_xvg_path
self.gmx_path = properties.get('gmx_path',None)
self.mutation = properties.get('mutation',None)
self.step = properties.get('step',None)
self.path = properties.get('path','')
self.mpirun = properties.get('mpirun', False)
self.mpirun_np = properties.get('mpirun_np', None)
def launch(self):
"""Launches the execution of the GROMACS rms module.
"""
out_log, err_log = fu.get_logs(path=self.path, mutation=self.mutation, step=self.step)
gmx = 'gmx' if self.gmx_path is 'None' else self.gmx_path
cmd = [gmx, 'rms', '-xvg', 'none',
'-s', self.input_gro_path,
'-f', self.input_trr_path,
'-o', self.output_xvg_path]
if self.mpirun_np is not None:
cmd.insert(0, str(self.mpirun_np))
cmd.insert(0, '-np')
if self.mpirun:
cmd.insert(0, 'mpirun')
cmd.append('<<<')
cmd.append('\"'+"$'0\n0\n'"+'\"')
else:
cmd.insert(0, '|')
cmd.insert(0, '\"'+'0 0'+'\"')
cmd.insert(0, 'echo')
command = cmd_wrapper.CmdWrapper(cmd, out_log, err_log)
command.launch()
xvg = self.output_xvg_path if os.path.isfile(self.output_xvg_path) else ntpath.basename(self.output_xvg_path)
self.mutation = '' if self.mutation is None else self.mutation
return {self.mutation: np.loadtxt(xvg)}
#Creating a main function to be compatible with CWL
def main():
system=sys.argv[1]
step=sys.argv[2]
properties_file=sys.argv[3]
prop = settings.YamlReader(properties_file, system).get_prop_dic()[step]
Rms(input_gro_path=sys.argv[4],
input_trr_path=sys.argv[5],
output_xvg_path=sys.argv[6],
properties=prop).launch()
if __name__ == '__main__':
main()
| [
"ntpath.basename",
"json.loads",
"command_wrapper.cmd_wrapper.CmdWrapper",
"os.path.isfile",
"numpy.loadtxt",
"tools.file_utils.get_logs",
"configuration.settings.YamlReader"
] | [((1484, 1551), 'tools.file_utils.get_logs', 'fu.get_logs', ([], {'path': 'self.path', 'mutation': 'self.mutation', 'step': 'self.step'}), '(path=self.path, mutation=self.mutation, step=self.step)\n', (1495, 1551), True, 'from tools import file_utils as fu\n'), ((2185, 2230), 'command_wrapper.cmd_wrapper.CmdWrapper', 'cmd_wrapper.CmdWrapper', (['cmd', 'out_log', 'err_log'], {}), '(cmd, out_log, err_log)\n', (2207, 2230), False, 'from command_wrapper import cmd_wrapper\n'), ((881, 903), 'json.loads', 'json.loads', (['properties'], {}), '(properties)\n', (891, 903), False, 'import json\n'), ((2294, 2330), 'os.path.isfile', 'os.path.isfile', (['self.output_xvg_path'], {}), '(self.output_xvg_path)\n', (2308, 2330), False, 'import os\n'), ((2336, 2373), 'ntpath.basename', 'ntpath.basename', (['self.output_xvg_path'], {}), '(self.output_xvg_path)\n', (2351, 2373), False, 'import ntpath\n'), ((2476, 2491), 'numpy.loadtxt', 'np.loadtxt', (['xvg'], {}), '(xvg)\n', (2486, 2491), True, 'import numpy as np\n'), ((2645, 2689), 'configuration.settings.YamlReader', 'settings.YamlReader', (['properties_file', 'system'], {}), '(properties_file, system)\n', (2664, 2689), True, 'import configuration.settings as settings\n')] |
from typing import TYPE_CHECKING
import numpy as np
from ..types.ndarray import get_array_type
if TYPE_CHECKING:
from ..types.ndarray import ArrayType
def pdist(
x_mat: 'ArrayType',
metric: str,
) -> 'np.ndarray':
"""Computes Pairwise distances between observations in n-dimensional space.
:param x_mat: Union['np.ndarray','scipy.sparse.csr_matrix', 'scipy.sparse.coo_matrix'] of ndim 2
:param metric: string describing the metric type
:return: np.ndarray of ndim 2
"""
return cdist(x_mat, x_mat, metric)
def cdist(
x_mat: 'ArrayType',
y_mat: 'ArrayType',
metric: str,
) -> 'np.ndarray':
"""Computes the pairwise distance between each row of X and each row on Y according to `metric`.
- Let `n_x = x_mat.shape[0]`
- Let `n_y = y_mat.shape[0]`
- Returns a matrix `dist` of shape `(n_x, n_y)` with `dist[i,j] = metric(x_mat[i], y_mat[j])`.
:param x_mat: numpy or scipy array of ndim 2
:param y_mat: numpy or scipy array of ndim 2
:param metric: string describing the metric type
:return: np.ndarray of ndim 2
"""
x_type = get_array_type(x_mat)
y_type = get_array_type(y_mat)
if x_type != y_type:
raise ValueError(
f'The type of your left-hand side is {x_type}, whereas your right-hand side is {y_type}. '
f'`.match()` requires left must be the same type as right.'
)
is_sparse = get_array_type(x_mat)[1]
if metric == 'cosine':
if is_sparse:
dists = sparse_cosine(x_mat, y_mat)
else:
dists = cosine(x_mat, y_mat)
elif metric == 'sqeuclidean':
if is_sparse:
dists = sparse_sqeuclidean(x_mat, y_mat)
else:
dists = sqeuclidean(x_mat, y_mat)
elif metric == 'euclidean':
if is_sparse:
dists = np.sqrt(sparse_sqeuclidean(x_mat, y_mat))
else:
dists = np.sqrt(sqeuclidean(x_mat, y_mat))
else:
raise ValueError(f'Input metric={metric} not valid')
return dists
def cosine(x_mat: 'np.ndarray', y_mat: 'np.ndarray', eps: float = 1e-7) -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:param eps: a small jitter to avoid divde by zero
:return: np.ndarray with ndim=2
"""
return 1 - np.clip(
(np.dot(x_mat, y_mat.T) + eps)
/ (
np.outer(np.linalg.norm(x_mat, axis=1), np.linalg.norm(y_mat, axis=1)) + eps
),
-1,
1,
)
def sqeuclidean(x_mat: 'np.ndarray', y_mat: 'np.ndarray') -> 'np.ndarray':
"""squared Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:return: np.ndarray with ndim=2
"""
return (
np.sum(y_mat ** 2, axis=1)
+ np.sum(x_mat ** 2, axis=1)[:, np.newaxis]
- 2 * np.dot(x_mat, y_mat.T)
)
def sparse_cosine(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
from scipy.sparse.linalg import norm
# we need the np.asarray otherwise we get a np.matrix object that iterates differently
return 1 - np.clip(
np.asarray(
x_mat.dot(y_mat.T) / (np.outer(norm(x_mat, axis=1), norm(y_mat, axis=1)))
),
-1,
1,
)
def sparse_sqeuclidean(x_mat: 'ArrayType', y_mat: 'ArrayType') -> 'np.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: scipy.sparse like array with ndim=2
:param y_mat: scipy.sparse like array with ndim=2
:return: np.ndarray with ndim=2
"""
# we need the np.asarray otherwise we get a np.matrix object that iterates differently
return np.asarray(
y_mat.power(2).sum(axis=1).flatten()
+ x_mat.power(2).sum(axis=1)
- 2 * x_mat.dot(y_mat.T)
)
| [
"scipy.sparse.linalg.norm",
"numpy.dot",
"numpy.sum",
"numpy.linalg.norm"
] | [((2904, 2930), 'numpy.sum', 'np.sum', (['(y_mat ** 2)'], {'axis': '(1)'}), '(y_mat ** 2, axis=1)\n', (2910, 2930), True, 'import numpy as np\n'), ((2997, 3019), 'numpy.dot', 'np.dot', (['x_mat', 'y_mat.T'], {}), '(x_mat, y_mat.T)\n', (3003, 3019), True, 'import numpy as np\n'), ((2941, 2967), 'numpy.sum', 'np.sum', (['(x_mat ** 2)'], {'axis': '(1)'}), '(x_mat ** 2, axis=1)\n', (2947, 2967), True, 'import numpy as np\n'), ((2426, 2448), 'numpy.dot', 'np.dot', (['x_mat', 'y_mat.T'], {}), '(x_mat, y_mat.T)\n', (2432, 2448), True, 'import numpy as np\n'), ((2489, 2518), 'numpy.linalg.norm', 'np.linalg.norm', (['x_mat'], {'axis': '(1)'}), '(x_mat, axis=1)\n', (2503, 2518), True, 'import numpy as np\n'), ((2520, 2549), 'numpy.linalg.norm', 'np.linalg.norm', (['y_mat'], {'axis': '(1)'}), '(y_mat, axis=1)\n', (2534, 2549), True, 'import numpy as np\n'), ((3550, 3569), 'scipy.sparse.linalg.norm', 'norm', (['x_mat'], {'axis': '(1)'}), '(x_mat, axis=1)\n', (3554, 3569), False, 'from scipy.sparse.linalg import norm\n'), ((3571, 3590), 'scipy.sparse.linalg.norm', 'norm', (['y_mat'], {'axis': '(1)'}), '(y_mat, axis=1)\n', (3575, 3590), False, 'from scipy.sparse.linalg import norm\n')] |
from general_utils.data_storage_classes.stock_cluster import StockCluster
from stock_data_analysis_module.data_processing_module.data_retrieval_module.ranged_data_retriever import RangedDataRetriever
import numpy as np
from datetime import date, datetime
def date_to_timestamp(date_in: date):
return datetime.fromisoformat(date_in.isoformat()).timestamp()
def ensure_data_length_consistency(ticker_data):
last_hist_date = ticker_data[0][-1][0]
max_length = len(ticker_data[0])
for data_list in ticker_data:
if len(data_list) == 0:
continue
if not data_list[-1][0] == last_hist_date:
raise ValueError("Data did not end on the same date, recovery impossible")
if len(data_list) > max_length:
max_length = len(data_list)
# todo:
# go element by element ensuring that all dates are matched
# if not, then the date's data becomes the average of the date preceding and following it.
to_remove = []
for i in range(len(ticker_data)):
data_list = ticker_data[i]
if not len(data_list) == max_length:
to_remove.append(i)
return to_remove
def getStrongestCoefficient(coeffList, currIndex):
maxCoeff = -2
maxIndex = -1
for i in range(len(coeffList)):
if i == currIndex:
continue
if abs(coeffList[i]) > maxCoeff:
maxCoeff = abs(coeffList[i])
maxIndex = i
coeffList[maxIndex] = 0
return maxIndex, maxCoeff
def retrieveTopNStrongestCoefficients(coefficients, n):
retCoeffPairs = []
for i in range(len(coefficients)):
considered = coefficients[i]
coeffs = []
for _ in range(n):
coeffs.append(getStrongestCoefficient(considered, i)[0])
retCoeffPairs.append((i, coeffs))
return retCoeffPairs
def constructCluster(coefficientPair, tickerList, tickerData):
mainTicker = tickerList[coefficientPair[0]]
mainTickerData = tickerData[coefficientPair[0]]
supportingTickers = []
supportingTickerData = []
for i in range(len(coefficientPair[1])):
supportTicker = tickerList[coefficientPair[1][i]]
supportData = tickerData[coefficientPair[1][i]]
supportingTickers.append(supportTicker)
supportingTickerData.append(supportData)
return StockCluster(mainTicker, mainTickerData, supportingTickers, supportingTickerData)
class StockClusterCreator:
def __init__(self, start_date, end_date, similar_tickers=5, columns=None):
if columns is None:
column_list = ['hist_date', 'adj_close']
else:
# this is intentional to allow duplicates. When a data requestor wants the historical date in
# their dataset, this duplicate is required as this class removes the first entry from the date
# it returns
column_list = ['hist_date']
for column in columns:
column_list.append(column)
self.dataRetriever = RangedDataRetriever(column_list, start_date, end_date)
self.similarTickers = similar_tickers
def createClusters(self, ticker_list):
# retrieve data for tickers
ticker_data = [self.dataRetriever.retrieveData(ticker) for ticker in ticker_list]
# ensure data all has same length
to_remove_indices = ensure_data_length_consistency(ticker_data)
to_remove_tickers = [ticker_list[i] for i in to_remove_indices]
to_remove_data = [ticker_data[i] for i in to_remove_indices]
for toRem in to_remove_tickers:
ticker_list.remove(toRem)
for toRem in to_remove_data:
ticker_data.remove(toRem)
# remove the first element from each data entry, as it is the historical date.
for i in range(len(ticker_data)):
curr_data = ticker_data[i]
ticker_data[i] = [x[1:] for x in curr_data]
# determine whether to shift correlation check by one element due to included dates
start_index = 0
if type(ticker_data[0][0][0]) == date:
start_index = 1
# have numpy calculate average correlation coefficient
coefficients = np.zeros((len(ticker_data), len(ticker_data)))
for i in range(start_index, len(ticker_data[0][0])):
temp_data = [[y[i] for y in x] for x in ticker_data]
coefficients += abs(np.corrcoef(temp_data))
coefficients /= len(ticker_data[0][0])-1
# grab top n coefficients for each stock
coeff_pairs = retrieveTopNStrongestCoefficients(coefficients, self.similarTickers)
# construct clusters from top n coefficients for each ticker
ret_clusters = []
for i in range(len(coeff_pairs)):
ret_clusters.append(constructCluster(coeff_pairs[i], ticker_list, ticker_data))
return ret_clusters
| [
"numpy.corrcoef",
"general_utils.data_storage_classes.stock_cluster.StockCluster",
"stock_data_analysis_module.data_processing_module.data_retrieval_module.ranged_data_retriever.RangedDataRetriever"
] | [((2409, 2494), 'general_utils.data_storage_classes.stock_cluster.StockCluster', 'StockCluster', (['mainTicker', 'mainTickerData', 'supportingTickers', 'supportingTickerData'], {}), '(mainTicker, mainTickerData, supportingTickers,\n supportingTickerData)\n', (2421, 2494), False, 'from general_utils.data_storage_classes.stock_cluster import StockCluster\n'), ((3096, 3150), 'stock_data_analysis_module.data_processing_module.data_retrieval_module.ranged_data_retriever.RangedDataRetriever', 'RangedDataRetriever', (['column_list', 'start_date', 'end_date'], {}), '(column_list, start_date, end_date)\n', (3115, 3150), False, 'from stock_data_analysis_module.data_processing_module.data_retrieval_module.ranged_data_retriever import RangedDataRetriever\n'), ((4531, 4553), 'numpy.corrcoef', 'np.corrcoef', (['temp_data'], {}), '(temp_data)\n', (4542, 4553), True, 'import numpy as np\n')] |
import plac
from os import path
import numpy as np
from scipy import sparse
from scipy.io import savemat
from cmmlib.inout import load_mesh, save_coff
from cmmlib import cmm
@plac.annotations(
K=('number of CMHBs', 'positional', None, int),
mu=('sparsity parameter mu', 'positional', None, float),
visualize=('visualize the weights?', 'flag', 'v'),
scaled=('respect triangle scaling?', 'flag', 's'),
output_dir=('output directory', 'option', 'o'),
maxiter=('maximum number of iterations', 'option', None, int),
ply=('output ply file?', 'flag', None),
off=('output off file?', 'flag', None)
)
def main(input_filename, K, mu, output_dir=None, visualize=False, scaled=False,
maxiter=None, ply=False, off=False):
if (off or ply) and not output_dir:
print ("please specify an output directory")
return 1
if output_dir and not path.exists(output_dir):
print("%s does not exist" % output_dir)
return 2
verts, tris = load_mesh(input_filename, normalize=True)
print ("%d vertices, %d faces" % (len(verts), len(tris)))
Phi_cpr, D = cmm.compressed_manifold_modes(
verts, tris, K, mu=mu, scaled=scaled,
maxiter=maxiter, verbose=100, return_D=True)
if D is None:
D_diag = np.ones(len(verts))
D = sparse.eye(len(verts))
else:
D_diag = D.data
if visualize:
from cmmlib.vis.weights import show_weights
show_weights(verts, tris, Phi_cpr)
if output_dir:
# save in simple text format
np.savetxt(path.join(output_dir, 'phi.txt'), Phi_cpr, fmt='%f')
np.savetxt(path.join(output_dir, 'D_diag.txt'), D_diag, fmt='%f')
# save in matlab format
savemat(path.join(output_dir, 'phi.mat'),
dict(verts=verts, tris=tris+1, phi=Phi_cpr, D=D))
# save HDF5 format if possible
try:
import h5py
except ImportError:
print ("Cannot save as HDF5, please install the h5py module")
else:
with h5py.File(path.join(output_dir, 'phi.h5'), 'w') as f:
f['verts'] = verts
f['tris'] = tris
f['phi'] = Phi_cpr
f['d_diag'] = D_diag
# save NPY format
np.save(path.join(output_dir, 'phi.npy'), Phi_cpr)
np.save(path.join(output_dir, 'D_diag.npy'), Phi_cpr)
if off or ply:
# map phi scalars to colors
from mayavi.core.lut_manager import LUTManager
from cmmlib.vis.weights import _centered
lut = LUTManager(lut_mode='RdBu').lut.table.to_array()[:, :3]
colors = [
lut[(_centered(Phi_cpr[:, k]) * (lut.shape[0]-1)).astype(int)]
for k in range(K)]
# save in a single scene as a collage
w = int(np.ceil(np.sqrt(K))) if K > 6 else K
spacing = 1.2 * verts.ptp(axis=0)
all_verts = [verts + spacing * (1.5, 0, 0)]
all_tris = [tris]
all_color = [np.zeros(verts.shape, np.int) + 127]
for k in range(K):
all_verts.append(verts + spacing * (-(k % w), 0, int(k / w)))
all_tris.append(tris + len(verts) * (k+1))
all_color.append(colors[k])
if off:
save_coff(path.join(output_dir, 'input.off'),
verts.astype(np.float32), tris)
for k in range(K):
save_coff(path.join(output_dir, 'cmh_%03d.off' % k),
verts.astype(np.float32), tris, colors[k])
save_coff(path.join(output_dir, 'all.off'),
np.vstack(all_verts), np.vstack(all_tris),
np.vstack(all_color))
if ply:
from tvtk.api import tvtk
pd = tvtk.PolyData(
points=np.vstack(all_verts).astype(np.float32),
polys=np.vstack(all_tris).astype(np.uint32))
pd.point_data.scalars = np.vstack(all_color).astype(np.uint8)
pd.point_data.scalars.name = 'colors'
ply = tvtk.PLYWriter(
file_name=path.join(output_dir, 'all.ply'),
input=pd, color=(1, 1, 1))
ply.array_name = 'colors'
ply.write()
if __name__ == '__main__':
plac.call(main)
| [
"plac.annotations",
"cmmlib.vis.weights.show_weights",
"os.path.exists",
"numpy.zeros",
"plac.call",
"cmmlib.vis.weights._centered",
"cmmlib.inout.load_mesh",
"cmmlib.cmm.compressed_manifold_modes",
"mayavi.core.lut_manager.LUTManager",
"os.path.join",
"numpy.vstack",
"numpy.sqrt"
] | [((178, 622), 'plac.annotations', 'plac.annotations', ([], {'K': "('number of CMHBs', 'positional', None, int)", 'mu': "('sparsity parameter mu', 'positional', None, float)", 'visualize': "('visualize the weights?', 'flag', 'v')", 'scaled': "('respect triangle scaling?', 'flag', 's')", 'output_dir': "('output directory', 'option', 'o')", 'maxiter': "('maximum number of iterations', 'option', None, int)", 'ply': "('output ply file?', 'flag', None)", 'off': "('output off file?', 'flag', None)"}), "(K=('number of CMHBs', 'positional', None, int), mu=(\n 'sparsity parameter mu', 'positional', None, float), visualize=(\n 'visualize the weights?', 'flag', 'v'), scaled=(\n 'respect triangle scaling?', 'flag', 's'), output_dir=(\n 'output directory', 'option', 'o'), maxiter=(\n 'maximum number of iterations', 'option', None, int), ply=(\n 'output ply file?', 'flag', None), off=('output off file?', 'flag', None))\n", (194, 622), False, 'import plac\n'), ((1000, 1041), 'cmmlib.inout.load_mesh', 'load_mesh', (['input_filename'], {'normalize': '(True)'}), '(input_filename, normalize=True)\n', (1009, 1041), False, 'from cmmlib.inout import load_mesh, save_coff\n'), ((1122, 1239), 'cmmlib.cmm.compressed_manifold_modes', 'cmm.compressed_manifold_modes', (['verts', 'tris', 'K'], {'mu': 'mu', 'scaled': 'scaled', 'maxiter': 'maxiter', 'verbose': '(100)', 'return_D': '(True)'}), '(verts, tris, K, mu=mu, scaled=scaled, maxiter\n =maxiter, verbose=100, return_D=True)\n', (1151, 1239), False, 'from cmmlib import cmm\n'), ((4410, 4425), 'plac.call', 'plac.call', (['main'], {}), '(main)\n', (4419, 4425), False, 'import plac\n'), ((1456, 1490), 'cmmlib.vis.weights.show_weights', 'show_weights', (['verts', 'tris', 'Phi_cpr'], {}), '(verts, tris, Phi_cpr)\n', (1468, 1490), False, 'from cmmlib.vis.weights import show_weights\n'), ((891, 914), 'os.path.exists', 'path.exists', (['output_dir'], {}), '(output_dir)\n', (902, 914), False, 'from os import path\n'), ((1567, 1599), 'os.path.join', 'path.join', (['output_dir', '"""phi.txt"""'], {}), "(output_dir, 'phi.txt')\n", (1576, 1599), False, 'from os import path\n'), ((1639, 1674), 'os.path.join', 'path.join', (['output_dir', '"""D_diag.txt"""'], {}), "(output_dir, 'D_diag.txt')\n", (1648, 1674), False, 'from os import path\n'), ((1743, 1775), 'os.path.join', 'path.join', (['output_dir', '"""phi.mat"""'], {}), "(output_dir, 'phi.mat')\n", (1752, 1775), False, 'from os import path\n'), ((2290, 2322), 'os.path.join', 'path.join', (['output_dir', '"""phi.npy"""'], {}), "(output_dir, 'phi.npy')\n", (2299, 2322), False, 'from os import path\n'), ((2349, 2384), 'os.path.join', 'path.join', (['output_dir', '"""D_diag.npy"""'], {}), "(output_dir, 'D_diag.npy')\n", (2358, 2384), False, 'from os import path\n'), ((2063, 2094), 'os.path.join', 'path.join', (['output_dir', '"""phi.h5"""'], {}), "(output_dir, 'phi.h5')\n", (2072, 2094), False, 'from os import path\n'), ((3046, 3075), 'numpy.zeros', 'np.zeros', (['verts.shape', 'np.int'], {}), '(verts.shape, np.int)\n', (3054, 3075), True, 'import numpy as np\n'), ((3342, 3376), 'os.path.join', 'path.join', (['output_dir', '"""input.off"""'], {}), "(output_dir, 'input.off')\n", (3351, 3376), False, 'from os import path\n'), ((3643, 3675), 'os.path.join', 'path.join', (['output_dir', '"""all.off"""'], {}), "(output_dir, 'all.off')\n", (3652, 3675), False, 'from os import path\n'), ((3703, 3723), 'numpy.vstack', 'np.vstack', (['all_verts'], {}), '(all_verts)\n', (3712, 3723), True, 'import numpy as np\n'), ((3725, 3744), 'numpy.vstack', 'np.vstack', (['all_tris'], {}), '(all_tris)\n', (3734, 3744), True, 'import numpy as np\n'), ((3772, 3792), 'numpy.vstack', 'np.vstack', (['all_color'], {}), '(all_color)\n', (3781, 3792), True, 'import numpy as np\n'), ((2860, 2870), 'numpy.sqrt', 'np.sqrt', (['K'], {}), '(K)\n', (2867, 2870), True, 'import numpy as np\n'), ((3501, 3542), 'os.path.join', 'path.join', (['output_dir', "('cmh_%03d.off' % k)"], {}), "(output_dir, 'cmh_%03d.off' % k)\n", (3510, 3542), False, 'from os import path\n'), ((4066, 4086), 'numpy.vstack', 'np.vstack', (['all_color'], {}), '(all_color)\n', (4075, 4086), True, 'import numpy as np\n'), ((4226, 4258), 'os.path.join', 'path.join', (['output_dir', '"""all.ply"""'], {}), "(output_dir, 'all.ply')\n", (4235, 4258), False, 'from os import path\n'), ((2589, 2616), 'mayavi.core.lut_manager.LUTManager', 'LUTManager', ([], {'lut_mode': '"""RdBu"""'}), "(lut_mode='RdBu')\n", (2599, 2616), False, 'from mayavi.core.lut_manager import LUTManager\n'), ((2689, 2713), 'cmmlib.vis.weights._centered', '_centered', (['Phi_cpr[:, k]'], {}), '(Phi_cpr[:, k])\n', (2698, 2713), False, 'from cmmlib.vis.weights import _centered\n'), ((3920, 3940), 'numpy.vstack', 'np.vstack', (['all_verts'], {}), '(all_verts)\n', (3929, 3940), True, 'import numpy as np\n'), ((3987, 4006), 'numpy.vstack', 'np.vstack', (['all_tris'], {}), '(all_tris)\n', (3996, 4006), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def white_mask(original):
"""
Create a mask from the whitish pixels of the frame
"""
# specify the range of colours that you want to include, you can play with the borders here
lower_white = (190, 100, 100)
upper_white = (255, 255, 255)
white = cv2.inRange(original, lower_white, upper_white)
mask = np.zeros_like(white)
mask[white > 0] = 1
mask = np.asarray(mask, np.float)
return mask
def yellow_mask_rgb(original):
"""
Create a mask from the yellowish pixels of the frame
"""
original = cv2.cvtColor(original, cv2.COLOR_BGR2RGB)
lower_yellow = (230, 120, 0)
upper_yellow = (255, 255, 180)
yellow = cv2.inRange(original, lower_yellow, upper_yellow)
# cv2.imshow('Yellow', yellow)
mask = np.zeros_like(yellow)
mask[yellow > 0] = 1
mask = np.asarray(mask, np.float)
return mask
def yellow_mask_hsv(original):
"""
Create a mask from the yellowish pixels of the frame
"""
HSV = cv2.cvtColor(original, cv2.COLOR_BGR2HSV)
lower_HSV = (00, 90, 100)
upper_HSV = (80, 255, 255)
yellow_HSV = cv2.inRange(HSV, lower_HSV, upper_HSV)
mask = np.zeros_like(yellow_HSV)
mask[yellow_HSV > 0] = 1
mask = np.asarray(mask, np.float)
return mask
def yellow_mask(original):
"""
Create a mask from the yellowish pixels of the frame, combining RGB mask and HSV mask
"""
hsv = yellow_mask_hsv(original)
rgb = yellow_mask_rgb(original)
mask = np.zeros_like(rgb)
mask[(hsv == 1) | (rgb == 1)] = 1
return mask
def white_yellow_mask(original):
yellow = yellow_mask(original)
white = white_mask(original)
mask = np.zeros_like(white)
mask[(white == 1) | (yellow == 1)] = 1
return mask
| [
"cv2.cvtColor",
"numpy.asarray",
"numpy.zeros_like",
"cv2.inRange"
] | [((306, 353), 'cv2.inRange', 'cv2.inRange', (['original', 'lower_white', 'upper_white'], {}), '(original, lower_white, upper_white)\n', (317, 353), False, 'import cv2\n'), ((366, 386), 'numpy.zeros_like', 'np.zeros_like', (['white'], {}), '(white)\n', (379, 386), True, 'import numpy as np\n'), ((424, 450), 'numpy.asarray', 'np.asarray', (['mask', 'np.float'], {}), '(mask, np.float)\n', (434, 450), True, 'import numpy as np\n'), ((589, 630), 'cv2.cvtColor', 'cv2.cvtColor', (['original', 'cv2.COLOR_BGR2RGB'], {}), '(original, cv2.COLOR_BGR2RGB)\n', (601, 630), False, 'import cv2\n'), ((714, 763), 'cv2.inRange', 'cv2.inRange', (['original', 'lower_yellow', 'upper_yellow'], {}), '(original, lower_yellow, upper_yellow)\n', (725, 763), False, 'import cv2\n'), ((812, 833), 'numpy.zeros_like', 'np.zeros_like', (['yellow'], {}), '(yellow)\n', (825, 833), True, 'import numpy as np\n'), ((871, 897), 'numpy.asarray', 'np.asarray', (['mask', 'np.float'], {}), '(mask, np.float)\n', (881, 897), True, 'import numpy as np\n'), ((1032, 1073), 'cv2.cvtColor', 'cv2.cvtColor', (['original', 'cv2.COLOR_BGR2HSV'], {}), '(original, cv2.COLOR_BGR2HSV)\n', (1044, 1073), False, 'import cv2\n'), ((1154, 1192), 'cv2.inRange', 'cv2.inRange', (['HSV', 'lower_HSV', 'upper_HSV'], {}), '(HSV, lower_HSV, upper_HSV)\n', (1165, 1192), False, 'import cv2\n'), ((1205, 1230), 'numpy.zeros_like', 'np.zeros_like', (['yellow_HSV'], {}), '(yellow_HSV)\n', (1218, 1230), True, 'import numpy as np\n'), ((1273, 1299), 'numpy.asarray', 'np.asarray', (['mask', 'np.float'], {}), '(mask, np.float)\n', (1283, 1299), True, 'import numpy as np\n'), ((1537, 1555), 'numpy.zeros_like', 'np.zeros_like', (['rgb'], {}), '(rgb)\n', (1550, 1555), True, 'import numpy as np\n'), ((1727, 1747), 'numpy.zeros_like', 'np.zeros_like', (['white'], {}), '(white)\n', (1740, 1747), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from collections import OrderedDict
from scipy import interp
from sklearn.metrics import auc
from sklearn.metrics.ranking import _binary_clf_curve
from pohmm import Pohmm, PohmmClassifier
# CMU Keystroke Dynamics Benchmark Dataset
# See: http://www.cs.cmu.edu/~keystroke/
# <NAME> and <NAME>. "Comparing Anomaly Detectors for Keystroke Dynamics"
DATASET_URL = 'http://www.cs.cmu.edu/~keystroke/DSL-StrongPasswordData.csv'
def stratified_kfold(df, n_folds):
"""
Create stratified k-folds from an indexed dataframe
"""
sessions = pd.DataFrame.from_records(list(df.index.unique())).groupby(0).apply(lambda x: x[1].unique())
sessions.apply(lambda x: np.random.shuffle(x))
folds = []
for i in range(n_folds):
idx = sessions.apply(lambda x: pd.Series(x[i * (len(x) // n_folds):(i + 1) * (len(x) // n_folds)]))
idx = pd.DataFrame(idx.stack().reset_index(level=1, drop=True)).set_index(0, append=True).index.values
folds.append(df.loc[idx])
return folds
def user_folds(df, target):
users = df.index.get_level_values(0).unique()
return [df.loc[u].reset_index().set_index([target, 'session']) for u in users]
def preprocess(df):
"""Convert the CMU dataset from row vectors into time/duration row observations"""
def process_row(idx_row):
idx, row = idx_row
# press-press latency
tau = 1000 * row[4::3].astype(float).values
tau = np.r_[np.median(tau), tau]
# duration
duration = 1000 * row[3::3].astype(float).values
keyname = list('.tie5Roanl') + ['enter']
return pd.DataFrame.from_dict(OrderedDict([
('user', [row['subject']] * 11),
('session', [row['sessionIndex'] * 100 + row['rep']] * 11),
('tau', tau),
('duration', duration),
('event', keyname)
]))
df = pd.concat(map(process_row, df.iterrows())).set_index(['user', 'session'])
return df
def roc_curve(y_true, y_score):
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=None, sample_weight=None)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1e-2, thresholds]
fpr = fps / fps[-1]
tpr = tps / tps[-1]
return fpr, 1 - tpr, thresholds
def ROC(scores):
# Generate an ROC curve for each fold, ordered by increasing threshold
roc = scores.groupby('user').apply(lambda x: pd.DataFrame(np.c_[roc_curve(x['genuine'], x['score'])][::-1],
columns=['far', 'frr', 'threshold']))
# interpolate to get the same threshold values in each fold
thresholds = np.sort(roc['threshold'].unique())
roc = roc.groupby(level='user').apply(lambda x: pd.DataFrame(np.c_[thresholds,
interp(thresholds, x['threshold'], x['far']),
interp(thresholds, x['threshold'], x['frr'])],
columns=['threshold', 'far', 'frr']))
roc = roc.reset_index(level=1, drop=True).reset_index()
return roc
def EER(roc):
far, frr = roc['far'].values, roc['frr'].values
def perp(a):
b = np.empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
# line segment a given by endpoints a1, a2
# line segment b given by endpoints b1, b2
def seg_intersect(a1, a2, b1, b2):
da = a2 - a1
db = b2 - b1
dp = a1 - b1
dap = perp(da)
denom = np.dot(dap, db)
num = np.dot(dap, dp)
return (num / denom) * db + b1
d = far <= frr
idx = np.diff(d).nonzero()[0][0]
return seg_intersect(np.array([idx, far[idx]]),
np.array([idx + 1, far[idx + 1]]),
np.array([idx, frr[idx]]),
np.array([idx + 1, frr[idx + 1]]))[1]
def AUC(roc):
return auc(roc['frr'].values, roc['far'].values)
def keystroke_model():
"""Generates a 2-state model with lognormal emissions and frequency smoothing"""
model = Pohmm(n_hidden_states=2,
init_spread=2,
emissions=[('duration','lognormal'),('tau','lognormal')],
smoothing='freq',
init_method='obs',
thresh=1)
return model
def identification(df, n_folds=5, seed=1234):
# Obtain identification results using k-fold cross validation
np.random.seed(seed)
folds = stratified_kfold(df, n_folds)
identification_results = []
for i in range(n_folds):
print('Fold %d of %d' % (i + 1, n_folds))
test_idx, test_samples = zip(*folds[i].groupby(level=[0, 1]))
train_idx, train_samples = zip(*pd.concat(folds[:i] + folds[i + 1:]).groupby(level=[0, 1]))
test_labels, _ = zip(*test_idx)
train_labels, _ = zip(*train_idx)
cl = PohmmClassifier(keystroke_model)
cl.fit_df(train_labels, train_samples)
for test_label, test_sample in zip(test_labels, test_samples):
result, _ = cl.predict_df(test_sample)
identification_results.append((i, test_label, result))
identification_results = pd.DataFrame.from_records(identification_results,
columns=['fold', 'label', 'prediction'])
acc_summary = identification_results.groupby('fold').apply(
lambda x: (x['label'] == x['prediction']).sum() / len(x)).describe()
print('Identification summary')
print('ACC: %.3f +/- %.3f' % (acc_summary['mean'], acc_summary['std']))
return
def verification(df):
verification_results = []
users = set(df.index.get_level_values(level='user').unique())
for genuine_user in users:
impostor_users = users.difference([genuine_user])
genuine_samples = df.loc[genuine_user]
_, genuine_samples = zip(*genuine_samples.groupby(level='session'))
train, test = genuine_samples[150:200], genuine_samples[200:]
pohmm = keystroke_model()
pohmm.fit_df(train)
# train_scores = np.array([pohmm.score_df(sample) for sample in train])
scores = []
for sample in test:
score = pohmm.score_df(sample)
scores.append(score)
verification_results.append((genuine_user, True, score))
for imposter_user in impostor_users:
_, impostor_samples = zip(*df.loc[imposter_user].groupby(level='session'))
for sample in impostor_samples[:5]:
score = pohmm.score_df(sample)
scores.append(score)
verification_results.append((genuine_user, False, score))
verification_results = pd.DataFrame.from_records(verification_results,
columns=['user', 'genuine', 'score'])
verification_ROC = verification_results.groupby('user').apply(ROC)
del verification_ROC['user']
eer_summary = verification_ROC.groupby('user').apply(EER).describe()
auc_summary = verification_ROC.groupby('user').apply(AUC).describe()
print('Verification summary')
print('EER: %.3f +/- %.3f' % (eer_summary['mean'], eer_summary['std']))
print('AUC: %.3f +/- %.3f' % (auc_summary['mean'], auc_summary['std']))
return
def main(n_users=10):
print('This example takes about 15 minutes to run on an Intel i5...')
# Download and preprocess the CMU dataset
df = pd.read_csv(DATASET_URL)
df = df[:400*n_users]
df = preprocess(df)
# Verification results obtained using the 4th session as training data,
# sessions 5-8 as genuine and reps 1-5 as impostor
verification(df)
# Identification results obtained by 5-fold stratified cross validation using only the last session
identification(df.groupby(level=0).apply(lambda x: x[-(11 * 50):]).reset_index(level=0, drop=True))
if __name__ == '__main__':
main()
| [
"numpy.random.seed",
"sklearn.metrics.ranking._binary_clf_curve",
"pohmm.Pohmm",
"pandas.read_csv",
"numpy.median",
"numpy.empty_like",
"pohmm.PohmmClassifier",
"sklearn.metrics.auc",
"numpy.diff",
"numpy.array",
"pandas.DataFrame.from_records",
"collections.OrderedDict",
"numpy.dot",
"sci... | [((2060, 2130), 'sklearn.metrics.ranking._binary_clf_curve', '_binary_clf_curve', (['y_true', 'y_score'], {'pos_label': 'None', 'sample_weight': 'None'}), '(y_true, y_score, pos_label=None, sample_weight=None)\n', (2077, 2130), False, 'from sklearn.metrics.ranking import _binary_clf_curve\n'), ((4159, 4200), 'sklearn.metrics.auc', 'auc', (["roc['frr'].values", "roc['far'].values"], {}), "(roc['frr'].values, roc['far'].values)\n", (4162, 4200), False, 'from sklearn.metrics import auc\n'), ((4323, 4475), 'pohmm.Pohmm', 'Pohmm', ([], {'n_hidden_states': '(2)', 'init_spread': '(2)', 'emissions': "[('duration', 'lognormal'), ('tau', 'lognormal')]", 'smoothing': '"""freq"""', 'init_method': '"""obs"""', 'thresh': '(1)'}), "(n_hidden_states=2, init_spread=2, emissions=[('duration', 'lognormal'\n ), ('tau', 'lognormal')], smoothing='freq', init_method='obs', thresh=1)\n", (4328, 4475), False, 'from pohmm import Pohmm, PohmmClassifier\n'), ((4693, 4713), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4707, 4713), True, 'import numpy as np\n'), ((5437, 5531), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['identification_results'], {'columns': "['fold', 'label', 'prediction']"}), "(identification_results, columns=['fold', 'label',\n 'prediction'])\n", (5462, 5531), True, 'import pandas as pd\n'), ((6956, 7045), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['verification_results'], {'columns': "['user', 'genuine', 'score']"}), "(verification_results, columns=['user', 'genuine',\n 'score'])\n", (6981, 7045), True, 'import pandas as pd\n'), ((7698, 7722), 'pandas.read_csv', 'pd.read_csv', (['DATASET_URL'], {}), '(DATASET_URL)\n', (7709, 7722), True, 'import pandas as pd\n'), ((3452, 3468), 'numpy.empty_like', 'np.empty_like', (['a'], {}), '(a)\n', (3465, 3468), True, 'import numpy as np\n'), ((3763, 3778), 'numpy.dot', 'np.dot', (['dap', 'db'], {}), '(dap, db)\n', (3769, 3778), True, 'import numpy as np\n'), ((3793, 3808), 'numpy.dot', 'np.dot', (['dap', 'dp'], {}), '(dap, dp)\n', (3799, 3808), True, 'import numpy as np\n'), ((5137, 5169), 'pohmm.PohmmClassifier', 'PohmmClassifier', (['keystroke_model'], {}), '(keystroke_model)\n', (5152, 5169), False, 'from pohmm import Pohmm, PohmmClassifier\n'), ((709, 729), 'numpy.random.shuffle', 'np.random.shuffle', (['x'], {}), '(x)\n', (726, 729), True, 'import numpy as np\n'), ((1665, 1838), 'collections.OrderedDict', 'OrderedDict', (["[('user', [row['subject']] * 11), ('session', [row['sessionIndex'] * 100 +\n row['rep']] * 11), ('tau', tau), ('duration', duration), ('event', keyname)\n ]"], {}), "([('user', [row['subject']] * 11), ('session', [row[\n 'sessionIndex'] * 100 + row['rep']] * 11), ('tau', tau), ('duration',\n duration), ('event', keyname)])\n", (1676, 1838), False, 'from collections import OrderedDict\n'), ((3930, 3955), 'numpy.array', 'np.array', (['[idx, far[idx]]'], {}), '([idx, far[idx]])\n', (3938, 3955), True, 'import numpy as np\n'), ((3982, 4015), 'numpy.array', 'np.array', (['[idx + 1, far[idx + 1]]'], {}), '([idx + 1, far[idx + 1]])\n', (3990, 4015), True, 'import numpy as np\n'), ((4042, 4067), 'numpy.array', 'np.array', (['[idx, frr[idx]]'], {}), '([idx, frr[idx]])\n', (4050, 4067), True, 'import numpy as np\n'), ((4094, 4127), 'numpy.array', 'np.array', (['[idx + 1, frr[idx + 1]]'], {}), '([idx + 1, frr[idx + 1]])\n', (4102, 4127), True, 'import numpy as np\n'), ((1478, 1492), 'numpy.median', 'np.median', (['tau'], {}), '(tau)\n', (1487, 1492), True, 'import numpy as np\n'), ((3878, 3888), 'numpy.diff', 'np.diff', (['d'], {}), '(d)\n', (3885, 3888), True, 'import numpy as np\n'), ((3012, 3056), 'scipy.interp', 'interp', (['thresholds', "x['threshold']", "x['far']"], {}), "(thresholds, x['threshold'], x['far'])\n", (3018, 3056), False, 'from scipy import interp\n'), ((3129, 3173), 'scipy.interp', 'interp', (['thresholds', "x['threshold']", "x['frr']"], {}), "(thresholds, x['threshold'], x['frr'])\n", (3135, 3173), False, 'from scipy import interp\n'), ((4980, 5016), 'pandas.concat', 'pd.concat', (['(folds[:i] + folds[i + 1:])'], {}), '(folds[:i] + folds[i + 1:])\n', (4989, 5016), True, 'import pandas as pd\n')] |
"""
pid_control
- <NAME>, PUP, 2012
- Last Update:
2/6/2019 - RWB
"""
import sys
import numpy as np
sys.path.append('..')
class pidControl:
def __init__(self, kp=0.0, ki=0.0, kd=0.0, Ts=0.01, sigma=0.05, limit=1.0):
self.kp = kp
self.ki = ki
self.kd = kd
self.Ts = Ts
self.limit = limit
self.integrator = 0.0
self.error_delay_1 = 0.0
self.error_dot_delay_1 = 0.0
self.y_dot = 0.0
self.y_delay_1 = 0.0
self.y_dot_delay_1 = 0.0
# gains for differentiator
self.a1 = (2.0 * sigma - Ts) / (2.0 * sigma + Ts)
self.a2 = 2.0 / (2.0 * sigma + Ts)
def update(self, y_ref, y, reset_flag=False):
if reset_flag is True:
self.integrator = 0.0
self.error_delay_1 = 0.0
self.y_dot = 0.0
self.y_delay_1 = 0.0
self.y_dot_delay_1 = 0.0
# compute the error
error = y_ref - y
# update the integrator using trapazoidal rule
self.integrator = self.integrator \
+ (self.Ts/2) * (error + self.error_delay_1)
# update the differentiator
error_dot = self.a1 * self.error_dot_delay_1 \
+ self.a2 * (error - self.error_delay_1)
# PID control
u = self.kp * error \
+ self.ki * self.integrator \
+ self.kd * error_dot
# saturate PID control at limit
u_sat = self._saturate(u)
# integral anti-windup
# adjust integrator to keep u out of saturation
if np.abs(self.ki) > 0.0001:
self.integrator = self.integrator \
+ (self.Ts / self.ki) * (u_sat - u)
# update the delayed variables
self.error_delay_1 = error
self.error_dot_delay_1 = error_dot
return u_sat
def update_with_rate(self, y_ref, y, ydot, reset_flag=False):
if reset_flag is True:
self.integrator = 0.0
self.error_delay_1 = 0.0
# compute the error
error = y_ref - y
# update the integrator using trapazoidal rule
self.integrator = self.integrator \
+ (self.Ts/2) * (error + self.error_delay_1)
# PID control
u = self.kp * error \
+ self.ki * self.integrator \
- self.kd * ydot
# saturate PID control at limit
u_sat = self._saturate(u)
# integral anti-windup
# adjust integrator to keep u out of saturation
if np.abs(self.ki) > 0.0001:
self.integrator = self.integrator \
+ (self.Ts / self.ki) * (u_sat - u)
self.error_delay_1 = error
return u_sat
def _saturate(self, u):
# saturate u at +- self.limit
if u >= self.limit:
u_sat = self.limit
elif u <= -self.limit:
u_sat = -self.limit
else:
u_sat = u
return u_sat
class piControl:
def __init__(self, kp=0.0, ki=0.0, Ts=0.01, limit=1.0):
self.kp = kp
self.ki = ki
self.Ts = Ts
self.limit = limit
self.integrator = 0.0
self.error_delay_1 = 0.0
def update(self, y_ref, y):
# compute the error
error = y_ref - y
# update the integrator using trapazoidal rule
self.integrator = self.integrator \
+ (self.Ts/2) * (error + self.error_delay_1)
# PI control
u = self.kp * error \
+ self.ki * self.integrator
# saturate PI control at limit
u_sat = self._saturate(u)
# integral anti-windup
# adjust integrator to keep u out of saturation
if np.abs(self.ki) > 0.0001:
self.integrator = self.integrator \
+ (self.Ts / self.ki) * (u_sat - u)
# update the delayed variables
self.error_delay_1 = error
return u_sat
def _saturate(self, u):
# saturate u at +- self.limit
if u >= self.limit:
u_sat = self.limit
elif u <= -self.limit:
u_sat = -self.limit
else:
u_sat = u
return u_sat
class pdControlWithRate:
# PD control with rate information
# u = kp*(yref-y) - kd*ydot
def __init__(self, kp=0.0, kd=0.0, limit=1.0):
self.kp = kp
self.kd = kd
self.limit = limit
def update(self, y_ref, y, ydot):
u = self.kp * (y_ref - y) - self.kd * ydot
# saturate PID control at limit
u_sat = self._saturate(u)
return u_sat
def _saturate(self, u):
# saturate u at +- self.limit
if u >= self.limit:
u_sat = self.limit
elif u <= -self.limit:
u_sat = -self.limit
else:
u_sat = u
return u_sat | [
"sys.path.append",
"numpy.abs"
] | [((116, 137), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (131, 137), False, 'import sys\n'), ((1607, 1622), 'numpy.abs', 'np.abs', (['self.ki'], {}), '(self.ki)\n', (1613, 1622), True, 'import numpy as np\n'), ((2575, 2590), 'numpy.abs', 'np.abs', (['self.ki'], {}), '(self.ki)\n', (2581, 2590), True, 'import numpy as np\n'), ((3771, 3786), 'numpy.abs', 'np.abs', (['self.ki'], {}), '(self.ki)\n', (3777, 3786), True, 'import numpy as np\n')] |
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
# Adapted after Key Phrase Extraction EmbedRank Algorithm by Swisscom (Schweiz) AG
# github repository of the project: https://github.com/swisscom/ai-research-keyphrase-extraction
# source code on github: https://github.com/swisscom/ai-research-keyphrase-extraction/blob/master/swisscom_ai/research_keyphrase/model/method.py
# ToDo: include Licence
def MMR(text, candidates, candidate_embeddings, beta, N, encoder):
"""
Core method using Maximal Marginal Relevance in charge to return the top-N candidates
:param text: Input text as string, for BERT encoder: no longer than 510 tokens
:param candidates: list of candidates (string)
:param candidate_embeddings: numpy array with the embedding of each candidate in each row
:param beta: hyperparameter beta for MMR (control tradeoff between informativeness and diversity)
:param N: number of candidates to extract
:param encoder: encoder for extracting embeddings
:return: A tuple with 3 elements :
1)list of the top-N candidates (or less if there are not enough candidates) (list of string)
2)list of associated relevance scores (list of float)
3)list containing for each keyphrase a list of alias (list of list of string)
"""
N = min(N, len(candidates))
doc_embedding = encoder.encode(text) # Extract doc embedding
doc_sim = cosine_similarity(candidate_embeddings, doc_embedding.reshape(1, -1))
doc_sim_norm = doc_sim/np.max(doc_sim)
doc_sim_norm = 0.5 + (doc_sim_norm - np.average(doc_sim_norm)) / np.std(doc_sim_norm)
sim_between = cosine_similarity(candidate_embeddings)
np.fill_diagonal(sim_between, np.NaN)
sim_between_norm = sim_between/np.nanmax(sim_between, axis=0)
sim_between_norm = \
0.5 + (sim_between_norm - np.nanmean(sim_between_norm, axis=0)) / np.nanstd(sim_between_norm, axis=0)
selected_candidates = []
unselected_candidates = [c for c in range(len(candidates))]
j = np.argmax(doc_sim)
selected_candidates.append(j)
unselected_candidates.remove(j)
for _ in range(N - 1):
selec_array = np.array(selected_candidates)
unselec_array = np.array(unselected_candidates)
distance_to_doc = doc_sim_norm[unselec_array, :]
dist_between = sim_between_norm[unselec_array][:, selec_array]
if dist_between.ndim == 1:
dist_between = dist_between[:, np.newaxis]
j = np.argmax(beta * distance_to_doc - (1 - beta) * np.max(dist_between, axis=1).reshape(-1, 1))
item_idx = unselected_candidates[j]
selected_candidates.append(item_idx)
unselected_candidates.remove(item_idx)
# Not using normalized version of doc_sim for computing relevance
relevance_list = max_normalization(doc_sim[selected_candidates]).tolist()
#aliases_list = get_aliases(sim_between[selected_candidates, :], candidates, alias_threshold)
selection = [candidates[id] for id in selected_candidates]
return selection, relevance_list
def max_normalization(array):
"""
Compute maximum normalization (max is set to 1) of the array
:param array: 1-d array
:return: 1-d array max- normalized : each value is multiplied by 1/max value
"""
return 1/np.max(array) * array.squeeze(axis=1)
def get_aliases(kp_sim_between, candidates, threshold):
"""
Find candidates which are very similar to the keyphrases (aliases)
:param kp_sim_between: ndarray of shape (nb_kp , nb candidates) containing the similarity
of each kp with all the candidates. Note that the similarity between the keyphrase and itself should be set to
NaN or 0
:param candidates: array of candidates (array of string)
:return: list containing for each keyphrase a list that contain candidates which are aliases
(very similar) (list of list of string)
"""
kp_sim_between = np.nan_to_num(kp_sim_between, 0)
idx_sorted = np.flip(np.argsort(kp_sim_between), 1)
aliases = []
for kp_idx, item in enumerate(idx_sorted):
alias_for_item = []
for i in item:
if kp_sim_between[kp_idx, i] >= threshold:
alias_for_item.append(candidates[i])
else:
break
aliases.append(alias_for_item)
return aliases | [
"numpy.fill_diagonal",
"sklearn.metrics.pairwise.cosine_similarity",
"numpy.average",
"numpy.nan_to_num",
"numpy.argmax",
"numpy.std",
"numpy.nanstd",
"numpy.argsort",
"numpy.max",
"numpy.array",
"numpy.nanmax",
"numpy.nanmean"
] | [((1644, 1683), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['candidate_embeddings'], {}), '(candidate_embeddings)\n', (1661, 1683), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((1688, 1725), 'numpy.fill_diagonal', 'np.fill_diagonal', (['sim_between', 'np.NaN'], {}), '(sim_between, np.NaN)\n', (1704, 1725), True, 'import numpy as np\n'), ((2031, 2049), 'numpy.argmax', 'np.argmax', (['doc_sim'], {}), '(doc_sim)\n', (2040, 2049), True, 'import numpy as np\n'), ((3927, 3959), 'numpy.nan_to_num', 'np.nan_to_num', (['kp_sim_between', '(0)'], {}), '(kp_sim_between, 0)\n', (3940, 3959), True, 'import numpy as np\n'), ((1519, 1534), 'numpy.max', 'np.max', (['doc_sim'], {}), '(doc_sim)\n', (1525, 1534), True, 'import numpy as np\n'), ((1762, 1792), 'numpy.nanmax', 'np.nanmax', (['sim_between'], {'axis': '(0)'}), '(sim_between, axis=0)\n', (1771, 1792), True, 'import numpy as np\n'), ((2170, 2199), 'numpy.array', 'np.array', (['selected_candidates'], {}), '(selected_candidates)\n', (2178, 2199), True, 'import numpy as np\n'), ((2224, 2255), 'numpy.array', 'np.array', (['unselected_candidates'], {}), '(unselected_candidates)\n', (2232, 2255), True, 'import numpy as np\n'), ((3985, 4011), 'numpy.argsort', 'np.argsort', (['kp_sim_between'], {}), '(kp_sim_between)\n', (3995, 4011), True, 'import numpy as np\n'), ((1604, 1624), 'numpy.std', 'np.std', (['doc_sim_norm'], {}), '(doc_sim_norm)\n', (1610, 1624), True, 'import numpy as np\n'), ((1892, 1927), 'numpy.nanstd', 'np.nanstd', (['sim_between_norm'], {'axis': '(0)'}), '(sim_between_norm, axis=0)\n', (1901, 1927), True, 'import numpy as np\n'), ((3298, 3311), 'numpy.max', 'np.max', (['array'], {}), '(array)\n', (3304, 3311), True, 'import numpy as np\n'), ((1576, 1600), 'numpy.average', 'np.average', (['doc_sim_norm'], {}), '(doc_sim_norm)\n', (1586, 1600), True, 'import numpy as np\n'), ((1852, 1888), 'numpy.nanmean', 'np.nanmean', (['sim_between_norm'], {'axis': '(0)'}), '(sim_between_norm, axis=0)\n', (1862, 1888), True, 'import numpy as np\n'), ((2535, 2563), 'numpy.max', 'np.max', (['dist_between'], {'axis': '(1)'}), '(dist_between, axis=1)\n', (2541, 2563), True, 'import numpy as np\n')] |
import random
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
tLimits = [0, 5]
mu = 0
sigma = 0.75
def lognorm(t):
return (np.exp(-((np.log(t) - mu) ** 2) / (2 * sigma ** 2))) / (t * sigma * np.sqrt(2 * np.pi))
tValues = np.arange(0.01, 5, 0.01)
yValues = np.array(list(map(lognorm, tValues)))
plt.figure()
plt.plot(tValues, yValues, '-b')
plt.ylabel('y')
plt.xlabel('t')
plt.title("Log-Normal Distribution")
plt.grid()
plt.show()
##############################################################################
M = np.ceil(max(yValues))
print("M = %f" % M)
binRange = tLimits[1] - tLimits[0]
binCount = 100
rand_lognorm = np.zeros(binCount) # Array of 'binCount' bins
bins = np.linspace(0, binRange, binCount) # Array of 'binCount' bin limits from 0 to 'binRange'
# Generate uniform random nos. and convert them to a lognormal dist. using accept-reject
def generatePoint():
u = random.uniform(tLimits[0], tLimits[1])
v = random.uniform(0, M)
if v < lognorm(u):
for j in range(0, len(bins)):
if u < bins[j]:
rand_lognorm[j] += 1
break
for i in range(10000):
generatePoint()
print(rand_lognorm)
##############################################################################
binWidth = binRange / binCount
area = sum(binWidth * rand_lognorm)
plt.figure()
plt.plot(bins, rand_lognorm / area, 'bo', label="By Von-Neumann Method")
plt.plot(bins, stats.lognorm([sigma],loc=mu).pdf(bins), '-r', label="By Standard Library")
plt.ylabel('Number of counts')
plt.xlabel('X, the log-normally distributed random variable')
plt.title("Probability Density Function")
plt.grid()
plt.legend()
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.plot",
"random.uniform",
"matplotlib.pyplot.legend",
"numpy.zeros",
"scipy.stats.lognorm",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"... | [((255, 279), 'numpy.arange', 'np.arange', (['(0.01)', '(5)', '(0.01)'], {}), '(0.01, 5, 0.01)\n', (264, 279), True, 'import numpy as np\n'), ((328, 340), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (338, 340), True, 'import matplotlib.pyplot as plt\n'), ((341, 373), 'matplotlib.pyplot.plot', 'plt.plot', (['tValues', 'yValues', '"""-b"""'], {}), "(tValues, yValues, '-b')\n", (349, 373), True, 'import matplotlib.pyplot as plt\n'), ((374, 389), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (384, 389), True, 'import matplotlib.pyplot as plt\n'), ((390, 405), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (400, 405), True, 'import matplotlib.pyplot as plt\n'), ((406, 442), 'matplotlib.pyplot.title', 'plt.title', (['"""Log-Normal Distribution"""'], {}), "('Log-Normal Distribution')\n", (415, 442), True, 'import matplotlib.pyplot as plt\n'), ((443, 453), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (451, 453), True, 'import matplotlib.pyplot as plt\n'), ((454, 464), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (462, 464), True, 'import matplotlib.pyplot as plt\n'), ((659, 677), 'numpy.zeros', 'np.zeros', (['binCount'], {}), '(binCount)\n', (667, 677), True, 'import numpy as np\n'), ((722, 756), 'numpy.linspace', 'np.linspace', (['(0)', 'binRange', 'binCount'], {}), '(0, binRange, binCount)\n', (733, 756), True, 'import numpy as np\n'), ((1362, 1374), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1372, 1374), True, 'import matplotlib.pyplot as plt\n'), ((1375, 1447), 'matplotlib.pyplot.plot', 'plt.plot', (['bins', '(rand_lognorm / area)', '"""bo"""'], {'label': '"""By Von-Neumann Method"""'}), "(bins, rand_lognorm / area, 'bo', label='By Von-Neumann Method')\n", (1383, 1447), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1569), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of counts"""'], {}), "('Number of counts')\n", (1549, 1569), True, 'import matplotlib.pyplot as plt\n'), ((1570, 1631), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X, the log-normally distributed random variable"""'], {}), "('X, the log-normally distributed random variable')\n", (1580, 1631), True, 'import matplotlib.pyplot as plt\n'), ((1632, 1673), 'matplotlib.pyplot.title', 'plt.title', (['"""Probability Density Function"""'], {}), "('Probability Density Function')\n", (1641, 1673), True, 'import matplotlib.pyplot as plt\n'), ((1674, 1684), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1682, 1684), True, 'import matplotlib.pyplot as plt\n'), ((1685, 1697), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1695, 1697), True, 'import matplotlib.pyplot as plt\n'), ((1698, 1708), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1706, 1708), True, 'import matplotlib.pyplot as plt\n'), ((932, 970), 'random.uniform', 'random.uniform', (['tLimits[0]', 'tLimits[1]'], {}), '(tLimits[0], tLimits[1])\n', (946, 970), False, 'import random\n'), ((979, 999), 'random.uniform', 'random.uniform', (['(0)', 'M'], {}), '(0, M)\n', (993, 999), False, 'import random\n'), ((224, 242), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (231, 242), True, 'import numpy as np\n'), ((1463, 1493), 'scipy.stats.lognorm', 'stats.lognorm', (['[sigma]'], {'loc': 'mu'}), '([sigma], loc=mu)\n', (1476, 1493), False, 'from scipy import stats\n'), ((166, 175), 'numpy.log', 'np.log', (['t'], {}), '(t)\n', (172, 175), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
class STN3d(nn.Module):
def __init__(self, channel):
super(STN3d, self).__init__()
self.conv1 = torch.nn.Conv1d(channel, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).astype(np.float32))).view(1, 9).repeat(
batchsize, 1)
if x.is_cuda:
iden = iden.to(x.get_device())
x = x + iden
x = x.view(-1, 3, 3)
return x
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv1 = torch.nn.Conv1d(k, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 512, 1)
self.fc1 = nn.Linear(512, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, k * k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(512)
self.bn4 = nn.BatchNorm1d(256)
self.bn5 = nn.BatchNorm1d(128)
self.k = k
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 512)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(1, self.k * self.k).repeat(
batchsize, 1)
if x.is_cuda:
iden = iden.to(x.get_device())
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class MeshSegNet(nn.Module):
def __init__(self, num_classes=15, num_channels=15, with_dropout=True, dropout_p=0.5):
super(MeshSegNet, self).__init__()
self.num_classes = num_classes
self.num_channels = num_channels
self.with_dropout = with_dropout
self.dropout_p = dropout_p
# MLP-1 [64, 64]
self.mlp1_conv1 = torch.nn.Conv1d(self.num_channels, 64, 1)
self.mlp1_conv2 = torch.nn.Conv1d(64, 64, 1)
self.mlp1_bn1 = nn.BatchNorm1d(64)
self.mlp1_bn2 = nn.BatchNorm1d(64)
# FTM (feature-transformer module)
self.fstn = STNkd(k=64)
# GLM-1 (graph-contrained learning modulus)
self.glm1_conv1_1 = torch.nn.Conv1d(64, 32, 1)
self.glm1_conv1_2 = torch.nn.Conv1d(64, 32, 1)
self.glm1_bn1_1 = nn.BatchNorm1d(32)
self.glm1_bn1_2 = nn.BatchNorm1d(32)
self.glm1_conv2 = torch.nn.Conv1d(32+32, 64, 1)
self.glm1_bn2 = nn.BatchNorm1d(64)
# MLP-2
self.mlp2_conv1 = torch.nn.Conv1d(64, 64, 1)
self.mlp2_bn1 = nn.BatchNorm1d(64)
self.mlp2_conv2 = torch.nn.Conv1d(64, 128, 1)
self.mlp2_bn2 = nn.BatchNorm1d(128)
self.mlp2_conv3 = torch.nn.Conv1d(128, 512, 1)
self.mlp2_bn3 = nn.BatchNorm1d(512)
# GLM-2 (graph-contrained learning modulus)
self.glm2_conv1_1 = torch.nn.Conv1d(512, 128, 1)
self.glm2_conv1_2 = torch.nn.Conv1d(512, 128, 1)
self.glm2_conv1_3 = torch.nn.Conv1d(512, 128, 1)
self.glm2_bn1_1 = nn.BatchNorm1d(128)
self.glm2_bn1_2 = nn.BatchNorm1d(128)
self.glm2_bn1_3 = nn.BatchNorm1d(128)
self.glm2_conv2 = torch.nn.Conv1d(128*3, 512, 1)
self.glm2_bn2 = nn.BatchNorm1d(512)
# MLP-3
self.mlp3_conv1 = torch.nn.Conv1d(64+512+512+512, 256, 1)
self.mlp3_conv2 = torch.nn.Conv1d(256, 256, 1)
self.mlp3_bn1_1 = nn.BatchNorm1d(256)
self.mlp3_bn1_2 = nn.BatchNorm1d(256)
self.mlp3_conv3 = torch.nn.Conv1d(256, 128, 1)
self.mlp3_conv4 = torch.nn.Conv1d(128, 128, 1)
self.mlp3_bn2_1 = nn.BatchNorm1d(128)
self.mlp3_bn2_2 = nn.BatchNorm1d(128)
# output
self.output_conv = torch.nn.Conv1d(128, self.num_classes, 1)
if self.with_dropout:
self.dropout = nn.Dropout(p=self.dropout_p)
def forward(self, x, a_s, a_l):
batchsize = x.size()[0]
n_pts = x.size()[2]
# MLP-1
x = F.relu(self.mlp1_bn1(self.mlp1_conv1(x)))
x = F.relu(self.mlp1_bn2(self.mlp1_conv2(x)))
# FTM
trans_feat = self.fstn(x)
x = x.transpose(2, 1)
x_ftm = torch.bmm(x, trans_feat)
# GLM-1
sap = torch.bmm(a_s, x_ftm)
sap = sap.transpose(2, 1)
x_ftm = x_ftm.transpose(2, 1)
x = F.relu(self.glm1_bn1_1(self.glm1_conv1_1(x_ftm)))
glm_1_sap = F.relu(self.glm1_bn1_2(self.glm1_conv1_2(sap)))
x = torch.cat([x, glm_1_sap], dim=1)
x = F.relu(self.glm1_bn2(self.glm1_conv2(x)))
# MLP-2
x = F.relu(self.mlp2_bn1(self.mlp2_conv1(x)))
x = F.relu(self.mlp2_bn2(self.mlp2_conv2(x)))
x_mlp2 = F.relu(self.mlp2_bn3(self.mlp2_conv3(x)))
if self.with_dropout:
x_mlp2 = self.dropout(x_mlp2)
# GLM-2
x_mlp2 = x_mlp2.transpose(2, 1)
sap_1 = torch.bmm(a_s, x_mlp2)
sap_2 = torch.bmm(a_l, x_mlp2)
x_mlp2 = x_mlp2.transpose(2, 1)
sap_1 = sap_1.transpose(2, 1)
sap_2 = sap_2.transpose(2, 1)
x = F.relu(self.glm2_bn1_1(self.glm2_conv1_1(x_mlp2)))
glm_2_sap_1 = F.relu(self.glm2_bn1_2(self.glm2_conv1_2(sap_1)))
glm_2_sap_2 = F.relu(self.glm2_bn1_3(self.glm2_conv1_3(sap_2)))
x = torch.cat([x, glm_2_sap_1, glm_2_sap_2], dim=1)
x_glm2 = F.relu(self.glm2_bn2(self.glm2_conv2(x)))
# GMP
x = torch.max(x_glm2, 2, keepdim=True)[0]
# Upsample
x = torch.nn.Upsample(n_pts)(x)
# Dense fusion
x = torch.cat([x, x_ftm, x_mlp2, x_glm2], dim=1)
# MLP-3
x = F.relu(self.mlp3_bn1_1(self.mlp3_conv1(x)))
x = F.relu(self.mlp3_bn1_2(self.mlp3_conv2(x)))
x = F.relu(self.mlp3_bn2_1(self.mlp3_conv3(x)))
if self.with_dropout:
x = self.dropout(x)
x = F.relu(self.mlp3_bn2_2(self.mlp3_conv4(x)))
# output
x = self.output_conv(x)
x = x.transpose(2,1).contiguous()
x = torch.nn.Softmax(dim=-1)(x.view(-1, self.num_classes))
x = x.view(batchsize, n_pts, self.num_classes)
return x
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = MeshSegNet().to(device)
summary(model, [(15, 6000), (6000, 6000), (6000, 6000)])
| [
"torch.nn.Dropout",
"torch.bmm",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.nn.Conv1d",
"torch.cat",
"torch.nn.Upsample",
"torch.max",
"torch.nn.Softmax",
"torch.cuda.is_available",
"torch.nn.Linear",
"numpy.array",
"numpy.eye"
] | [((239, 270), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['channel', '(64)', '(1)'], {}), '(channel, 64, 1)\n', (254, 270), False, 'import torch\n'), ((292, 319), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(64)', '(128)', '(1)'], {}), '(64, 128, 1)\n', (307, 319), False, 'import torch\n'), ((341, 370), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(128)', '(1024)', '(1)'], {}), '(128, 1024, 1)\n', (356, 370), False, 'import torch\n'), ((390, 410), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(512)'], {}), '(1024, 512)\n', (399, 410), True, 'import torch.nn as nn\n'), ((430, 449), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {}), '(512, 256)\n', (439, 449), True, 'import torch.nn as nn\n'), ((469, 486), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(9)'], {}), '(256, 9)\n', (478, 486), True, 'import torch.nn as nn\n'), ((507, 516), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (514, 516), True, 'import torch.nn as nn\n'), ((537, 555), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (551, 555), True, 'import torch.nn as nn\n'), ((575, 594), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (589, 594), True, 'import torch.nn as nn\n'), ((614, 634), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(1024)'], {}), '(1024)\n', (628, 634), True, 'import torch.nn as nn\n'), ((654, 673), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (668, 673), True, 'import torch.nn as nn\n'), ((693, 712), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (707, 712), True, 'import torch.nn as nn\n'), ((1479, 1504), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['k', '(64)', '(1)'], {}), '(k, 64, 1)\n', (1494, 1504), False, 'import torch\n'), ((1526, 1553), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(64)', '(128)', '(1)'], {}), '(64, 128, 1)\n', (1541, 1553), False, 'import torch\n'), ((1575, 1603), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(128)', '(512)', '(1)'], {}), '(128, 512, 1)\n', (1590, 1603), False, 'import torch\n'), ((1623, 1642), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {}), '(512, 256)\n', (1632, 1642), True, 'import torch.nn as nn\n'), ((1662, 1681), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (1671, 1681), True, 'import torch.nn as nn\n'), ((1701, 1722), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(k * k)'], {}), '(128, k * k)\n', (1710, 1722), True, 'import torch.nn as nn\n'), ((1743, 1752), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1750, 1752), True, 'import torch.nn as nn\n'), ((1773, 1791), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (1787, 1791), True, 'import torch.nn as nn\n'), ((1811, 1830), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (1825, 1830), True, 'import torch.nn as nn\n'), ((1850, 1869), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (1864, 1869), True, 'import torch.nn as nn\n'), ((1889, 1908), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (1903, 1908), True, 'import torch.nn as nn\n'), ((1928, 1947), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (1942, 1947), True, 'import torch.nn as nn\n'), ((3002, 3043), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['self.num_channels', '(64)', '(1)'], {}), '(self.num_channels, 64, 1)\n', (3017, 3043), False, 'import torch\n'), ((3070, 3096), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(64)', '(64)', '(1)'], {}), '(64, 64, 1)\n', (3085, 3096), False, 'import torch\n'), ((3121, 3139), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (3135, 3139), True, 'import torch.nn as nn\n'), ((3164, 3182), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (3178, 3182), True, 'import torch.nn as nn\n'), ((3338, 3364), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(64)', '(32)', '(1)'], {}), '(64, 32, 1)\n', (3353, 3364), False, 'import torch\n'), ((3393, 3419), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(64)', '(32)', '(1)'], {}), '(64, 32, 1)\n', (3408, 3419), False, 'import torch\n'), ((3446, 3464), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(32)'], {}), '(32)\n', (3460, 3464), True, 'import torch.nn as nn\n'), ((3491, 3509), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(32)'], {}), '(32)\n', (3505, 3509), True, 'import torch.nn as nn\n'), ((3536, 3567), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(32 + 32)', '(64)', '(1)'], {}), '(32 + 32, 64, 1)\n', (3551, 3567), False, 'import torch\n'), ((3590, 3608), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (3604, 3608), True, 'import torch.nn as nn\n'), ((3651, 3677), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(64)', '(64)', '(1)'], {}), '(64, 64, 1)\n', (3666, 3677), False, 'import torch\n'), ((3702, 3720), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (3716, 3720), True, 'import torch.nn as nn\n'), ((3747, 3774), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(64)', '(128)', '(1)'], {}), '(64, 128, 1)\n', (3762, 3774), False, 'import torch\n'), ((3799, 3818), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (3813, 3818), True, 'import torch.nn as nn\n'), ((3845, 3873), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(128)', '(512)', '(1)'], {}), '(128, 512, 1)\n', (3860, 3873), False, 'import torch\n'), ((3898, 3917), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (3912, 3917), True, 'import torch.nn as nn\n'), ((3998, 4026), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(512)', '(128)', '(1)'], {}), '(512, 128, 1)\n', (4013, 4026), False, 'import torch\n'), ((4055, 4083), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(512)', '(128)', '(1)'], {}), '(512, 128, 1)\n', (4070, 4083), False, 'import torch\n'), ((4112, 4140), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(512)', '(128)', '(1)'], {}), '(512, 128, 1)\n', (4127, 4140), False, 'import torch\n'), ((4167, 4186), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (4181, 4186), True, 'import torch.nn as nn\n'), ((4213, 4232), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (4227, 4232), True, 'import torch.nn as nn\n'), ((4259, 4278), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (4273, 4278), True, 'import torch.nn as nn\n'), ((4305, 4337), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(128 * 3)', '(512)', '(1)'], {}), '(128 * 3, 512, 1)\n', (4320, 4337), False, 'import torch\n'), ((4360, 4379), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(512)'], {}), '(512)\n', (4374, 4379), True, 'import torch.nn as nn\n'), ((4422, 4467), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(64 + 512 + 512 + 512)', '(256)', '(1)'], {}), '(64 + 512 + 512 + 512, 256, 1)\n', (4437, 4467), False, 'import torch\n'), ((4488, 4516), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(256)', '(256)', '(1)'], {}), '(256, 256, 1)\n', (4503, 4516), False, 'import torch\n'), ((4543, 4562), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (4557, 4562), True, 'import torch.nn as nn\n'), ((4589, 4608), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(256)'], {}), '(256)\n', (4603, 4608), True, 'import torch.nn as nn\n'), ((4635, 4663), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(256)', '(128)', '(1)'], {}), '(256, 128, 1)\n', (4650, 4663), False, 'import torch\n'), ((4690, 4718), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(128)', '(128)', '(1)'], {}), '(128, 128, 1)\n', (4705, 4718), False, 'import torch\n'), ((4745, 4764), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (4759, 4764), True, 'import torch.nn as nn\n'), ((4791, 4810), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (4805, 4810), True, 'import torch.nn as nn\n'), ((4855, 4896), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(128)', 'self.num_classes', '(1)'], {}), '(128, self.num_classes, 1)\n', (4870, 4896), False, 'import torch\n'), ((5298, 5322), 'torch.bmm', 'torch.bmm', (['x', 'trans_feat'], {}), '(x, trans_feat)\n', (5307, 5322), False, 'import torch\n'), ((5353, 5374), 'torch.bmm', 'torch.bmm', (['a_s', 'x_ftm'], {}), '(a_s, x_ftm)\n', (5362, 5374), False, 'import torch\n'), ((5589, 5621), 'torch.cat', 'torch.cat', (['[x, glm_1_sap]'], {'dim': '(1)'}), '([x, glm_1_sap], dim=1)\n', (5598, 5621), False, 'import torch\n'), ((6003, 6025), 'torch.bmm', 'torch.bmm', (['a_s', 'x_mlp2'], {}), '(a_s, x_mlp2)\n', (6012, 6025), False, 'import torch\n'), ((6042, 6064), 'torch.bmm', 'torch.bmm', (['a_l', 'x_mlp2'], {}), '(a_l, x_mlp2)\n', (6051, 6064), False, 'import torch\n'), ((6400, 6447), 'torch.cat', 'torch.cat', (['[x, glm_2_sap_1, glm_2_sap_2]'], {'dim': '(1)'}), '([x, glm_2_sap_1, glm_2_sap_2], dim=1)\n', (6409, 6447), False, 'import torch\n'), ((6665, 6709), 'torch.cat', 'torch.cat', (['[x, x_ftm, x_mlp2, x_glm2]'], {'dim': '(1)'}), '([x, x_ftm, x_mlp2, x_glm2], dim=1)\n', (6674, 6709), False, 'import torch\n'), ((916, 945), 'torch.max', 'torch.max', (['x', '(2)'], {'keepdim': '(True)'}), '(x, 2, keepdim=True)\n', (925, 945), False, 'import torch\n'), ((2171, 2200), 'torch.max', 'torch.max', (['x', '(2)'], {'keepdim': '(True)'}), '(x, 2, keepdim=True)\n', (2180, 2200), False, 'import torch\n'), ((4954, 4982), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'self.dropout_p'}), '(p=self.dropout_p)\n', (4964, 4982), True, 'import torch.nn as nn\n'), ((6533, 6567), 'torch.max', 'torch.max', (['x_glm2', '(2)'], {'keepdim': '(True)'}), '(x_glm2, 2, keepdim=True)\n', (6542, 6567), False, 'import torch\n'), ((6602, 6626), 'torch.nn.Upsample', 'torch.nn.Upsample', (['n_pts'], {}), '(n_pts)\n', (6619, 6626), False, 'import torch\n'), ((7115, 7139), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (7131, 7139), False, 'import torch\n'), ((7307, 7332), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7330, 7332), False, 'import torch\n'), ((1129, 1166), 'numpy.array', 'np.array', (['[1, 0, 0, 0, 1, 0, 0, 0, 1]'], {}), '([1, 0, 0, 0, 1, 0, 0, 0, 1])\n', (1137, 1166), True, 'import numpy as np\n'), ((2383, 2397), 'numpy.eye', 'np.eye', (['self.k'], {}), '(self.k)\n', (2389, 2397), True, 'import numpy as np\n')] |
from concurrent.futures import ProcessPoolExecutor
from functools import partial
import numpy as np
import os
import glob
#from util import audio
import audio
from hparams import hparams as hp
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
'''Preprocesses the THCHS30 dataset from a given input path into a given output directory.
Args:
in_dir: The directory where you have downloaded the THCHS30 dataset
out_dir: The directory to write the output into
num_workers: Optional number of worker processes to parallelize across
tqdm: You can optionally pass tqdm to get a nice progress bar
Returns:
A list of tuples describing the training examples. This should be written to train.txt
'''
# We use ProcessPoolExecutor to parallize across processes. This is just an optimization and you
# can omit it and just call _process_utterance on each input if you want.
executor = ProcessPoolExecutor(max_workers=num_workers)
futures = []
index = 1
wav_path = None
# trn_files = glob.glob(os.path.join(in_dir, 'biaobei_48000', '*.trn'))
#
# for trn in trn_files:
# with open(trn) as f:
# pinyin = f.readline().strip('\n')
# wav_file = trn[:-4] + '.wav'
# task = partial(_process_utterance, out_dir, index, wav_file, pinyin)
# futures.append(executor.submit(task))
# index += 1
with open(os.path.join(in_dir, 'ProsodyLabeling', '000001-010000.txt'), encoding='utf-8') as f:
for line in f:
if index % 2 == 1:
parts = line.strip().split('\t')
wav_path = os.path.join(in_dir, 'Wave', '%s.wav' % parts[0])
if os.path.exists(wav_path) is False:
wav_path = None
else:
text = line.strip()
if wav_path is not None and text is not None:
task = partial(_process_utterance, out_dir, int(index/2), wav_path, text)
futures.append(executor.submit(task))
index += 1
return [future.result() for future in tqdm(futures) if future.result() is not None]
def build_from_path_old(hparams, input_dirs, mel_dir, linear_dir, wav_dir, n_jobs=12, tqdm=lambda x: x):
executor = ProcessPoolExecutor(max_workers=n_jobs)
futures = []
index = 1
wav_path = None
with open(input_dirs+'.txt', encoding='utf-8') as f:
for line in f:
if index % 2 == 1:
parts = line.strip().split('\t')
wav_path = os.path.join(input_dirs+'-wav', '%s.wav' % parts[0])
if os.path.exists(wav_path) is False:
wav_path = None
else:
text = line.strip()
if wav_path is not None and text is not None:
print(int(index/2))
futures.append(executor.submit(
partial(_process_utterance, mel_dir, linear_dir, wav_dir, int(index/2), wav_path, text, hparams)))
index += 1
return [future.result() for future in tqdm(futures)]
def _process_utterance(out_dir, index, wav_path, pinyin):
'''Preprocesses a single utterance audio/text pair.
This writes the mel and linear scale spectrograms to disk and returns a tuple to write
to the train.txt file.
Args:
out_dir: The directory to write the spectrograms into
index: The numeric index to use in the spectrogram filenames.
wav_path: Path to the audio file containing the speech input
pinyin: The pinyin of Chinese spoken in the input audio file
Returns:
A (spectrogram_filename, mel_filename, n_frames, text) tuple to write to train.txt
'''
# Load the audio to a numpy array:
wav = audio.load_wav(wav_path)
# rescale wav for unified measure for all clips
wav = wav / np.abs(wav).max() * 0.999
# trim silence
wav = audio.trim_silence(wav)
# Compute the linear-scale spectrogram from the wav:
spectrogram = audio.spectrogram(wav).astype(np.float32)
n_frames = spectrogram.shape[1]
if n_frames > hp.max_frame_num:
return None
# Compute a mel-scale spectrogram from the wav:
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)
# Write the spectrograms to disk:
spectrogram_filename = 'biaobei-spec-%05d.npy' % index
mel_filename = 'biaobei-mel-%05d.npy' % index
np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
# Return a tuple describing this training example:
return (spectrogram_filename, mel_filename, n_frames, pinyin) | [
"audio.load_wav",
"numpy.abs",
"concurrent.futures.ProcessPoolExecutor",
"os.path.exists",
"audio.melspectrogram",
"audio.spectrogram",
"audio.trim_silence",
"os.path.join"
] | [((955, 999), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'num_workers'}), '(max_workers=num_workers)\n', (974, 999), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((2269, 2308), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'n_jobs'}), '(max_workers=n_jobs)\n', (2288, 2308), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((3762, 3786), 'audio.load_wav', 'audio.load_wav', (['wav_path'], {}), '(wav_path)\n', (3776, 3786), False, 'import audio\n'), ((3912, 3935), 'audio.trim_silence', 'audio.trim_silence', (['wav'], {}), '(wav)\n', (3930, 3935), False, 'import audio\n'), ((4426, 4469), 'os.path.join', 'os.path.join', (['out_dir', 'spectrogram_filename'], {}), '(out_dir, spectrogram_filename)\n', (4438, 4469), False, 'import os\n'), ((4518, 4553), 'os.path.join', 'os.path.join', (['out_dir', 'mel_filename'], {}), '(out_dir, mel_filename)\n', (4530, 4553), False, 'import os\n'), ((1436, 1496), 'os.path.join', 'os.path.join', (['in_dir', '"""ProsodyLabeling"""', '"""000001-010000.txt"""'], {}), "(in_dir, 'ProsodyLabeling', '000001-010000.txt')\n", (1448, 1496), False, 'import os\n'), ((4012, 4034), 'audio.spectrogram', 'audio.spectrogram', (['wav'], {}), '(wav)\n', (4029, 4034), False, 'import audio\n'), ((4221, 4246), 'audio.melspectrogram', 'audio.melspectrogram', (['wav'], {}), '(wav)\n', (4241, 4246), False, 'import audio\n'), ((1644, 1693), 'os.path.join', 'os.path.join', (['in_dir', '"""Wave"""', "('%s.wav' % parts[0])"], {}), "(in_dir, 'Wave', '%s.wav' % parts[0])\n", (1656, 1693), False, 'import os\n'), ((2548, 2602), 'os.path.join', 'os.path.join', (["(input_dirs + '-wav')", "('%s.wav' % parts[0])"], {}), "(input_dirs + '-wav', '%s.wav' % parts[0])\n", (2560, 2602), False, 'import os\n'), ((1711, 1735), 'os.path.exists', 'os.path.exists', (['wav_path'], {}), '(wav_path)\n', (1725, 1735), False, 'import os\n'), ((2620, 2644), 'os.path.exists', 'os.path.exists', (['wav_path'], {}), '(wav_path)\n', (2634, 2644), False, 'import os\n'), ((3856, 3867), 'numpy.abs', 'np.abs', (['wav'], {}), '(wav)\n', (3862, 3867), True, 'import numpy as np\n')] |
#============================================================================
# Copyright (c) 2018 Diamond Light Source Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#============================================================================
# Author: <NAME>
# E-mail: <EMAIL>
#============================================================================
"""
Example to show how to guess parameters of a forward model from
an unknown camera. In this case it's from the Hazard Cameras (Hazcams) on the
underside of NASA’s Perseverance Mars rover.
https://mars.nasa.gov/system/downloadable_items/45689_PIA24430-Perseverance's_first_full-color_look_at_Mars.png
"""
import numpy as np
import vounwarp.losa.loadersaver as io
import vounwarp.post.postprocessing as post
# Load image
mat0 = io.load_image("Sol0_1st_color.png")
output_base = "figs/"
(height, width) = mat0.shape
mat0 = mat0 / np.max(mat0)
# Create line pattern
line_pattern = np.zeros((height, width), dtype=np.float32)
for i in range(50, height - 50, 40):
line_pattern[i - 1:i + 2] = 1.0
# Estimate parameters by visual inspection.
# Coarse estimation
xcenter = width / 2.0 + 110.0
ycenter = height / 2.0 - 20.0
list_pow = np.asarray([1.0, 10**(-4), 10**(-7), 10**(-10), 10**(-13)])
# Fine estimation
list_coef = np.asarray([1.0, 4.0, 5.0, 17.0, 3.0])
list_ffact = list_pow * list_coef
pad = width
mat_pad = np.pad(line_pattern, pad, mode='edge')
mat_cor = post.unwarp_image_backward(mat_pad, xcenter + pad,
ycenter + pad, list_ffact)
mat_cor = mat_cor[pad:pad + height, pad:pad + width]
io.save_image(output_base + "/overlay.jpg", (mat0 + 0.5*mat_cor))
| [
"numpy.pad",
"vounwarp.losa.loadersaver.load_image",
"vounwarp.post.postprocessing.unwarp_image_backward",
"numpy.asarray",
"numpy.zeros",
"vounwarp.losa.loadersaver.save_image",
"numpy.max"
] | [((1317, 1352), 'vounwarp.losa.loadersaver.load_image', 'io.load_image', (['"""Sol0_1st_color.png"""'], {}), "('Sol0_1st_color.png')\n", (1330, 1352), True, 'import vounwarp.losa.loadersaver as io\n'), ((1469, 1512), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'np.float32'}), '((height, width), dtype=np.float32)\n', (1477, 1512), True, 'import numpy as np\n'), ((1722, 1781), 'numpy.asarray', 'np.asarray', (['[1.0, 10 ** -4, 10 ** -7, 10 ** -10, 10 ** -13]'], {}), '([1.0, 10 ** -4, 10 ** -7, 10 ** -10, 10 ** -13])\n', (1732, 1781), True, 'import numpy as np\n'), ((1812, 1850), 'numpy.asarray', 'np.asarray', (['[1.0, 4.0, 5.0, 17.0, 3.0]'], {}), '([1.0, 4.0, 5.0, 17.0, 3.0])\n', (1822, 1850), True, 'import numpy as np\n'), ((1908, 1946), 'numpy.pad', 'np.pad', (['line_pattern', 'pad'], {'mode': '"""edge"""'}), "(line_pattern, pad, mode='edge')\n", (1914, 1946), True, 'import numpy as np\n'), ((1957, 2034), 'vounwarp.post.postprocessing.unwarp_image_backward', 'post.unwarp_image_backward', (['mat_pad', '(xcenter + pad)', '(ycenter + pad)', 'list_ffact'], {}), '(mat_pad, xcenter + pad, ycenter + pad, list_ffact)\n', (1983, 2034), True, 'import vounwarp.post.postprocessing as post\n'), ((2125, 2190), 'vounwarp.losa.loadersaver.save_image', 'io.save_image', (["(output_base + '/overlay.jpg')", '(mat0 + 0.5 * mat_cor)'], {}), "(output_base + '/overlay.jpg', mat0 + 0.5 * mat_cor)\n", (2138, 2190), True, 'import vounwarp.losa.loadersaver as io\n'), ((1418, 1430), 'numpy.max', 'np.max', (['mat0'], {}), '(mat0)\n', (1424, 1430), True, 'import numpy as np\n')] |
"""
Parsers provided by aiida_skeaf.
Register parsers via the "aiida.parsers" entry point in setup.json.
"""
import re
import typing as ty
import numpy as np
from aiida import orm
from aiida.common import exceptions
from aiida.engine import ExitCode
from aiida.parsers.parser import Parser
from aiida.plugins import CalculationFactory
SkeafCalculation = CalculationFactory("skeaf.skeaf")
class SkeafParser(Parser):
"""
Parser class for parsing output of calculation.
"""
def __init__(self, node):
"""
Initialize Parser instance
Checks that the ProcessNode being passed was produced by a SkeafCalculation.
:param node: ProcessNode of calculation
:param type node: :class:`aiida.orm.ProcessNode`
"""
super().__init__(node)
if not issubclass(node.process_class, SkeafCalculation):
raise exceptions.ParsingError("Can only parse SkeafCalculation")
def parse(self, **kwargs):
"""
Parse outputs, store results in database.
:returns: an exit code, if parsing fails (or nothing if parsing succeeds)
"""
output_filename = self.node.get_option("output_filename")
# Check that folder content is as expected
files_retrieved = self.retrieved.list_object_names()
files_expected = [
"results_freqvsangle.out",
"results_short.out",
"results_orbitoutlines_invAng.out",
]
# Note: set(A) <= set(B) checks whether A is a subset of B
if not set(files_expected) <= set(files_retrieved):
self.logger.error(
f"Found files '{files_retrieved}', expected to find '{files_expected}'"
)
return self.exit_codes.ERROR_MISSING_OUTPUT_FILES
# parse `results_short.out`
self.logger.info(f"Parsing '{output_filename}'")
with self.retrieved.open(output_filename, "r") as handle:
output_node = parse_short_out(handle.readlines())
self.out("output_parameters", output_node)
# parse `results_freqvsangle.out`
filename = "results_freqvsangle.out"
self.logger.info(f"Parsing '{filename}'")
with self.retrieved.open(filename, "r") as handle:
output_node = parse_frequency(handle.readlines())
# Exchange theta and phi if needed
input_params = self.node.inputs["parameters"].get_dict()
angle_iso_convention = input_params.get("angle_iso_convention")
if angle_iso_convention:
theta = output_node.get_array("theta")
phi = output_node.get_array("phi")
output_node.set_array("theta", phi)
output_node.set_array("phi", theta)
output_node.set_attribute("angle_iso_convention", angle_iso_convention)
self.out("frequency", output_node)
# parse `results_orbitoutlines_invAng.out`
# filename = 'results_orbitoutlines_invAng.out'
# self.logger.info(f"Parsing '{filename}'")
# with self.retrieved.open(filename, "r") as handle:
# output_node = SinglefileData(file=handle)
# self.out("skeaf", output_node)
return ExitCode(0)
def parse_short_out(filecontent: ty.List[str]) -> orm.Dict:
"""Parse `results_short.out`."""
parameters = {
"fermi_energy_unit": "rydberg",
}
regexs = {
"version": re.compile(r"Short results file generated by S.K.E.A.F. (.+)"),
"fermi_energy": re.compile(r"Fermi energy:\s*([+-]?(?:[0-9]*[.])?[0-9]+) Ryd"),
"time_per_angle": re.compile(r"Calculations for one angle took\s*(.+)"),
"time_total": re.compile(r"Whole program run took\s*(.+)"),
"timestamp_started": re.compile(r"Started finding DOS on\s*(.+)"),
"timestamp_finished": re.compile(r"Program finished on\s*(.+)"),
}
for line in filecontent:
for key, reg in regexs.items():
match = reg.match(line.strip())
if match:
parameters[key] = match.group(1)
regexs.pop(key, None)
break
parameters["fermi_energy"] = float(parameters["fermi_energy"])
return orm.Dict(dict=parameters)
def parse_frequency(filecontent: ty.List[str]) -> orm.ArrayData:
"""Parse `results_freqvsangle.out`."""
array = orm.ArrayData()
header = filecontent.pop(0)
array.set_attribute("header", header)
freq = np.loadtxt(
filecontent,
delimiter=",",
usecols=range(6),
dtype=float,
)
numorbcopy = np.loadtxt(
filecontent,
delimiter=",",
usecols=6,
dtype=int,
)
array.set_array("theta", freq[:, 0])
array.set_array("phi", freq[:, 1])
array.set_array("freq", freq[:, 2])
array.set_array("mstar", freq[:, 3])
array.set_array("curv", freq[:, 4])
array.set_array("type", freq[:, 5])
array.set_array("numorbcopy", numorbcopy)
return array
| [
"aiida.orm.Dict",
"re.compile",
"aiida.common.exceptions.ParsingError",
"aiida.plugins.CalculationFactory",
"numpy.loadtxt",
"aiida.orm.ArrayData",
"aiida.engine.ExitCode"
] | [((358, 391), 'aiida.plugins.CalculationFactory', 'CalculationFactory', (['"""skeaf.skeaf"""'], {}), "('skeaf.skeaf')\n", (376, 391), False, 'from aiida.plugins import CalculationFactory\n'), ((4180, 4205), 'aiida.orm.Dict', 'orm.Dict', ([], {'dict': 'parameters'}), '(dict=parameters)\n', (4188, 4205), False, 'from aiida import orm\n'), ((4328, 4343), 'aiida.orm.ArrayData', 'orm.ArrayData', ([], {}), '()\n', (4341, 4343), False, 'from aiida import orm\n'), ((4557, 4617), 'numpy.loadtxt', 'np.loadtxt', (['filecontent'], {'delimiter': '""","""', 'usecols': '(6)', 'dtype': 'int'}), "(filecontent, delimiter=',', usecols=6, dtype=int)\n", (4567, 4617), True, 'import numpy as np\n'), ((3189, 3200), 'aiida.engine.ExitCode', 'ExitCode', (['(0)'], {}), '(0)\n', (3197, 3200), False, 'from aiida.engine import ExitCode\n'), ((3400, 3461), 're.compile', 're.compile', (['"""Short results file generated by S.K.E.A.F. (.+)"""'], {}), "('Short results file generated by S.K.E.A.F. (.+)')\n", (3410, 3461), False, 'import re\n'), ((3488, 3550), 're.compile', 're.compile', (['"""Fermi energy:\\\\s*([+-]?(?:[0-9]*[.])?[0-9]+) Ryd"""'], {}), "('Fermi energy:\\\\s*([+-]?(?:[0-9]*[.])?[0-9]+) Ryd')\n", (3498, 3550), False, 'import re\n'), ((3578, 3631), 're.compile', 're.compile', (['"""Calculations for one angle took\\\\s*(.+)"""'], {}), "('Calculations for one angle took\\\\s*(.+)')\n", (3588, 3631), False, 'import re\n'), ((3655, 3699), 're.compile', 're.compile', (['"""Whole program run took\\\\s*(.+)"""'], {}), "('Whole program run took\\\\s*(.+)')\n", (3665, 3699), False, 'import re\n'), ((3730, 3774), 're.compile', 're.compile', (['"""Started finding DOS on\\\\s*(.+)"""'], {}), "('Started finding DOS on\\\\s*(.+)')\n", (3740, 3774), False, 'import re\n'), ((3806, 3847), 're.compile', 're.compile', (['"""Program finished on\\\\s*(.+)"""'], {}), "('Program finished on\\\\s*(.+)')\n", (3816, 3847), False, 'import re\n'), ((885, 943), 'aiida.common.exceptions.ParsingError', 'exceptions.ParsingError', (['"""Can only parse SkeafCalculation"""'], {}), "('Can only parse SkeafCalculation')\n", (908, 943), False, 'from aiida.common import exceptions\n')] |
import os
import os.path
import numpy as np
import h5py
import torch
import utils
DATASET_REGISTRY = {}
def build_dataset(name, *args, **kwargs):
return DATASET_REGISTRY[name](*args, **kwargs)
def register_dataset(name):
def register_dataset_fn(fn):
if name in DATASET_REGISTRY:
raise ValueError("Cannot register duplicate dataset ({})".format(name))
DATASET_REGISTRY[name] = fn
return fn
return register_dataset_fn
# @register_dataset("bsd400")
# def load_bsd400(data, batch_size=100, num_workers=0):
# train_dataset = Dataset(filename=os.path.join(data, "train.h5"))
# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=1, shuffle=True)
# valid_dataset = Dataset(filename=os.path.join(data, "valid.h5"))
# valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, num_workers=1, shuffle=False)
# return train_loader, valid_loader, None
@register_dataset("pwc")
def load_pwc(n_data=1000, batch_size=100, num_workers=4, fix_datapoints= False, min_sep = 5):
train_dataset = utils.PieceWiseConstantDataset(n_data = n_data, fix_datapoints = fix_datapoints, min_sep= min_sep)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=num_workers, shuffle=True)
valid_dataset = utils.PieceWiseConstantDataset(n_data = n_data, fix_datapoints=fix_datapoints, min_sep= min_sep)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, num_workers=1, shuffle=True)
test_dataset = utils.PieceWiseConstantDataset(n_data = n_data, fix_datapoints=fix_datapoints, min_sep= min_sep)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, num_workers=1, shuffle=True)
return train_loader, valid_loader, test_loader
@register_dataset("masked_pwc")
def load_pwc(n_data=1000, batch_size=100, num_workers=0, fix_datapoints= False, min_sep = 5, test_num = 0):
# train_dataset = utils.PieceWiseConstantDataset()
train_dataset = utils.MaskedDataset(n_data = n_data, fix_datapoints=fix_datapoints, min_sep= min_sep)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, num_workers=1, shuffle=True)
# valid_dataset = utils.PieceWiseConstantDataset()
valid_dataset = utils.MaskedDataset(n_data = n_data, fix_datapoints=fix_datapoints, min_sep= min_sep)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, num_workers=1, shuffle=False)
test_dataset = utils.MaskedDataset(n_data = n_data, fix_datapoints=fix_datapoints, min_sep= min_sep, test_num = test_num)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=10000, num_workers=1, shuffle=True)
return train_loader, valid_loader, test_loader
class Dataset(torch.utils.data.Dataset):
def __init__(self, filename):
super().__init__()
self.h5f = h5py.File(filename, "r")
self.keys = list(self.h5f.keys())
def __len__(self):
return len(self.keys)
def __getitem__(self, index):
key = self.keys[index]
data = np.array(self.h5f[key])
return torch.Tensor(data)
| [
"h5py.File",
"utils.PieceWiseConstantDataset",
"torch.utils.data.DataLoader",
"utils.MaskedDataset",
"torch.Tensor",
"numpy.array"
] | [((1111, 1208), 'utils.PieceWiseConstantDataset', 'utils.PieceWiseConstantDataset', ([], {'n_data': 'n_data', 'fix_datapoints': 'fix_datapoints', 'min_sep': 'min_sep'}), '(n_data=n_data, fix_datapoints=fix_datapoints,\n min_sep=min_sep)\n', (1141, 1208), False, 'import utils\n'), ((1229, 1337), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'num_workers': 'num_workers', 'shuffle': '(True)'}), '(train_dataset, batch_size=batch_size,\n num_workers=num_workers, shuffle=True)\n', (1256, 1337), False, 'import torch\n'), ((1355, 1452), 'utils.PieceWiseConstantDataset', 'utils.PieceWiseConstantDataset', ([], {'n_data': 'n_data', 'fix_datapoints': 'fix_datapoints', 'min_sep': 'min_sep'}), '(n_data=n_data, fix_datapoints=fix_datapoints,\n min_sep=min_sep)\n', (1385, 1452), False, 'import utils\n'), ((1471, 1560), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_dataset'], {'batch_size': '(1)', 'num_workers': '(1)', 'shuffle': '(True)'}), '(valid_dataset, batch_size=1, num_workers=1,\n shuffle=True)\n', (1498, 1560), False, 'import torch\n'), ((1581, 1678), 'utils.PieceWiseConstantDataset', 'utils.PieceWiseConstantDataset', ([], {'n_data': 'n_data', 'fix_datapoints': 'fix_datapoints', 'min_sep': 'min_sep'}), '(n_data=n_data, fix_datapoints=fix_datapoints,\n min_sep=min_sep)\n', (1611, 1678), False, 'import utils\n'), ((1696, 1784), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': '(1)', 'num_workers': '(1)', 'shuffle': '(True)'}), '(test_dataset, batch_size=1, num_workers=1,\n shuffle=True)\n', (1723, 1784), False, 'import torch\n'), ((2048, 2135), 'utils.MaskedDataset', 'utils.MaskedDataset', ([], {'n_data': 'n_data', 'fix_datapoints': 'fix_datapoints', 'min_sep': 'min_sep'}), '(n_data=n_data, fix_datapoints=fix_datapoints, min_sep=\n min_sep)\n', (2067, 2135), False, 'import utils\n'), ((2153, 2251), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'num_workers': '(1)', 'shuffle': '(True)'}), '(train_dataset, batch_size=batch_size,\n num_workers=1, shuffle=True)\n', (2180, 2251), False, 'import torch\n'), ((2324, 2411), 'utils.MaskedDataset', 'utils.MaskedDataset', ([], {'n_data': 'n_data', 'fix_datapoints': 'fix_datapoints', 'min_sep': 'min_sep'}), '(n_data=n_data, fix_datapoints=fix_datapoints, min_sep=\n min_sep)\n', (2343, 2411), False, 'import utils\n'), ((2429, 2519), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_dataset'], {'batch_size': '(1)', 'num_workers': '(1)', 'shuffle': '(False)'}), '(valid_dataset, batch_size=1, num_workers=1,\n shuffle=False)\n', (2456, 2519), False, 'import torch\n'), ((2540, 2646), 'utils.MaskedDataset', 'utils.MaskedDataset', ([], {'n_data': 'n_data', 'fix_datapoints': 'fix_datapoints', 'min_sep': 'min_sep', 'test_num': 'test_num'}), '(n_data=n_data, fix_datapoints=fix_datapoints, min_sep=\n min_sep, test_num=test_num)\n', (2559, 2646), False, 'import utils\n'), ((2665, 2757), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': '(10000)', 'num_workers': '(1)', 'shuffle': '(True)'}), '(test_dataset, batch_size=10000, num_workers=1,\n shuffle=True)\n', (2692, 2757), False, 'import torch\n'), ((2928, 2952), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (2937, 2952), False, 'import h5py\n'), ((3130, 3153), 'numpy.array', 'np.array', (['self.h5f[key]'], {}), '(self.h5f[key])\n', (3138, 3153), True, 'import numpy as np\n'), ((3169, 3187), 'torch.Tensor', 'torch.Tensor', (['data'], {}), '(data)\n', (3181, 3187), False, 'import torch\n')] |
import numpy as np
import cv2
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import matplotlib.gridspec as gridspec
import time
import os
import helpers
import head_move_box
def dist(arr):
# compute distance
return np.sqrt((arr[0] ** 2) + (arr[1] ** 2))
def tracking(resource, target):
start = time.time()
#parameter setting
movPt = [22,39,57] #22-leftbrow, 33-righteye, 57-lowerlip
# nameOfMovPT = ["(6)RightJaw (mm)", "(12)LeftJaw", "(57)LowerLip"]
nameOfMovPT = ["Left Brow", "Right Eye", "Lower Lip"]
#nameOfMovPT = ["Euclidean (mm)", "Horizontal", "Vertical"]
my_figsize, my_dpi = (20, 10), 80
Z_cam = 500 #(millimeter)
sizeOfLdmk = [68,2]
desiredEyePixels = 180 #(180 pixel = 6cm, => 1 pixel = 0.4 mms)
eyeDistGT = 63.0 # The distance between middle of eyes is 60mm
pix2mm = eyeDistGT/desiredEyePixels
svVideo = os.path.join(target, 'output.avi') # create output video file
sv2DLdMarks = os.path.join(target, '2d_landmarks') # create 2D landmarks file
sv3DLdMarks = os.path.join(target, '3d_landmarks') # create 3D frontalised landmarks file
sv3DLdMarks_Pose = os.path.join(target, '3d_landmarks_pose') # create 3D landmarks coupled with pose file
svNonDetect = os.path.join(target, 'NonDetected') # create non-detected frames file
landmarks_2d = []
landmarks_3d = []
landmarks_pose_3d = []
nonDetectFr = []
cap = cv2.VideoCapture(resource) # load video
# video info
fps = cap.get(cv2.CAP_PROP_FPS)
totalFrame = np.int32(cap.get(cv2.CAP_PROP_FRAME_COUNT))
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
print("Total frames: ", totalFrame)
print("Frame size: ", size)
vis = 0
fourcc = cv2.VideoWriter_fourcc(*'XVID') # create VideoWriter object
width, height = my_figsize[0] * my_dpi, my_figsize[1] * my_dpi
out = cv2.VideoWriter(svVideo, fourcc, fps, (width, height))
flagIndx = False
totalIndx = 0
while(cap.isOpened()):
frameIndex = np.int32(cap.get(cv2.CAP_PROP_POS_FRAMES))
print("Processing frame ", frameIndex, "...")
# capture frame-by-frame
ret, frame = cap.read()
if ret==True:
# operations on the frame
try:
# generate face bounding box and track 2D landmarks for current frame
(bb, frame_landmarks) = helpers.get_landmarks(frame)
except:
print("Landmarks in frame ", frameIndex, " (", frameIndex/fps, " s) could not be detected.")
nonDetectFr.append(frameIndex/fps)
continue
# only for plotting head movement
(eyeLNow,eyeRNow,noseNow) = helpers.get_fixedPoint(frame_landmarks, numOfPoint = 3)
# 3D transformation by adding depth to the 2D image
(vertices, mesh_plotting, Ind, rotation_angle) = helpers.landmarks_3d_fitting(frame_landmarks,height,width)
frame_landmarks_3d = vertices[np.int32(Ind),0:2]
landmarks_2d.append(frame_landmarks)
landmarks_3d.append(vertices[np.int32(Ind),0:3])
landmarks_pose_3d.append(mesh_plotting[np.int32(Ind),0:3])
totalIndx = totalIndx + 1
# compare current landmarks with the first frame landmarks
if flagIndx == False:
eyeL0,eyeR0,nose0 = eyeLNow, eyeRNow, noseNow
init_im = frame
init_landmarks = frame_landmarks
init_landmarks_3d = frame_landmarks_3d
diff = frame_landmarks - init_landmarks
# dis for Euclidean distance, dis_x for horizontal displacement, dis_y for vertical displacement
# dis = dis_x = dis_y = np.zeros(1)
y1 = y2 = y3 = np.zeros(1)
flagIndx = True
else:
diff = frame_landmarks_3d - init_landmarks_3d
# dis = np.append(dis, dist(diff[movPt])) # left jaw
# dis_x = np.append(dis_x, diff[movPt, 0])
# dis_y = np.append(dis_y, diff[movPt, 1])
y1 = np.append(y1, dist(diff[movPt[0]])) # Leftbrow
y2 = np.append(y2, dist(diff[movPt[1]])) # Righteye
y3 = np.append(y3, dist(diff[movPt[2]])) # Lower lip
else:
break
############################ plotting ##############################
fig = plt.figure(figsize=my_figsize, dpi=my_dpi)
canvas = FigureCanvas(fig)
gs = gridspec.GridSpec(6, 3)
gs.update(wspace=0.5)
gs.update(hspace=1)
##################### Raw data with landmarks ######################
im1 = helpers.visualize_facial_landmarks(frame, bb, frame_landmarks, 1, movPt[0:3]) # with background
# im2 = helpers.visualize_facial_landmarks(frame, bb, frame_landmarks, 0, movPt) # no background
# add mesh
# for (x, y) in mesh_plotting[:, 0:2]:
# x = np.int32(x)
# y = np.int32(y)
# cv2.circle(im1, (x, y), 1, (1, 254, 1), -1)
ax1 = plt.subplot(gs[:3, :1])
ax1.imshow(im1)
ax1.set_title('Raw RGB Video', fontsize=16)
ax1.set_ylabel('Pixel', fontsize=14)
######################### landmark tracking ########################
ax2 = plt.subplot(gs[:3, 1:2])
# ax2.imshow(im2),ax2.set_title('Landmark Extraction on Raw Data', fontsize=16)
ax2.set_title('Landmarks Tracking', fontsize=16)
ax2.set_ylabel('Pixel', fontsize=14)
ax2.axis([-100, 100, -100, 100])
# landmarkCompare = 0 * im1.copy() + 255
for (x, y) in frame_landmarks_3d[:, 0:2]:
x = np.int32(x)
y = np.int32(y)
plt.plot(x, y, 'go')
# for highlight
(a1, b1) = frame_landmarks_3d[movPt[0], 0:2] # 22-leftbrow
(a2, b2) = frame_landmarks_3d[movPt[1], 0:2] # 33-righteye
(a3, b3) = frame_landmarks_3d[movPt[2], 0:2] # 57-lowerlip
plt.plot(a1, b1, 'o', color = 'cornflowerblue')
plt.plot(a2, b2, 'o', color = 'navajowhite')
plt.plot(a3, b3, 'o', color = 'm')
# for jaw contour
# plt.plot(frame_landmarks_3d[0:17, 0], frame_landmarks_3d[0:17, 1], linestyle='-', color='r', lw=2)
###################### head movement tracking ######################
ax3 = plt.subplot(gs[:3, 2:3], projection='3d')
rotationX, rotationY, rotationZ = rotation_angle
head_move_box.head_box_plot(ax3, eyeL0 * pix2mm, eyeR0 * pix2mm, nose0 * pix2mm,
eyeLNow * pix2mm, eyeRNow * pix2mm, noseNow * pix2mm,
rotationX, rotationY, rotationZ, Z_cam * pix2mm)
ax3.set_title('Head Movement Tracking', fontsize=16)
ax3.set_xlabel('mm', fontsize=14), ax3.set_ylabel('mm', fontsize=14), ax3.set_zlabel('mm', fontsize=14)
################## landmark movements #################
x = np.arange(totalIndx) / fps
maxMov = max(y1 * pix2mm) + 1
minMov = min(y1 * pix2mm) - 1
ax_Pt1 = plt.subplot(gs[3, :])
ax_Pt1.set_title('Movement of 3 Highlight Point ', fontsize=16)
ax_Pt1.plot(x, y1 * pix2mm, color='cornflowerblue')
ax_Pt1.axis([0, totalFrame / fps, minMov, maxMov])
plt.xlabel("time(s)")
plt.ylabel(nameOfMovPT[0], fontsize=14)
maxMov = max(y2 * pix2mm) + 1
minMov = min(y2 * pix2mm) - 1
ax_Pt2 = plt.subplot(gs[4, :])
ax_Pt2.plot(x, y2 * pix2mm, color='navajowhite')
ax_Pt2.axis([0, totalFrame / fps, minMov, maxMov])
plt.xlabel("time(s)", fontsize=14)
plt.ylabel(nameOfMovPT[1], fontsize=14)
maxMov = max(y3 * pix2mm) + 1
minMov = min(y3 * pix2mm) - 1
ax_Pt3 = plt.subplot(gs[5, :])
ax_Pt3.plot(x, y3 * pix2mm, color='m')
ax_Pt3.axis([0, totalFrame / fps, minMov, maxMov])
plt.xlabel("time(s)", fontsize=14)
plt.ylabel(nameOfMovPT[2], fontsize=14)
fig.canvas.draw()
outFrame = np.frombuffer(canvas.tostring_rgb(), dtype='uint8').reshape(height, width, 3)
if (vis):
cv2.imshow('frame', outFrame)
# write the flipped frame
out.write(outFrame)
plt.close()
np.save(sv3DLdMarks, np.asarray(landmarks_3d))
np.save(sv2DLdMarks, np.asarray(landmarks_2d))
np.save(sv3DLdMarks_Pose, np.asarray(landmarks_pose_3d))
np.save(svNonDetect, np.asarray(nonDetectFr))
cap.release()
out.release()
end = time.time()
print("processing time:" + str(end - start))
if __name__ == "__main__":
path = "./videos"
for file in os.listdir(path):
filepath = os.path.join(path, file)
print(filepath)
target = os.path.join(os.getcwd(), os.path.basename(filepath).split('.')[0])
if not os.path.exists(target):
os.mkdir(target)
tracking(filepath, target)
| [
"helpers.landmarks_3d_fitting",
"helpers.visualize_facial_landmarks",
"os.mkdir",
"cv2.VideoWriter_fourcc",
"helpers.get_fixedPoint",
"head_move_box.head_box_plot",
"matplotlib.pyplot.figure",
"numpy.arange",
"cv2.VideoWriter",
"cv2.imshow",
"os.path.join",
"matplotlib.backends.backend_agg.Fig... | [((292, 326), 'numpy.sqrt', 'np.sqrt', (['(arr[0] ** 2 + arr[1] ** 2)'], {}), '(arr[0] ** 2 + arr[1] ** 2)\n', (299, 326), True, 'import numpy as np\n'), ((377, 388), 'time.time', 'time.time', ([], {}), '()\n', (386, 388), False, 'import time\n'), ((952, 986), 'os.path.join', 'os.path.join', (['target', '"""output.avi"""'], {}), "(target, 'output.avi')\n", (964, 986), False, 'import os\n'), ((1032, 1068), 'os.path.join', 'os.path.join', (['target', '"""2d_landmarks"""'], {}), "(target, '2d_landmarks')\n", (1044, 1068), False, 'import os\n'), ((1114, 1150), 'os.path.join', 'os.path.join', (['target', '"""3d_landmarks"""'], {}), "(target, '3d_landmarks')\n", (1126, 1150), False, 'import os\n'), ((1213, 1254), 'os.path.join', 'os.path.join', (['target', '"""3d_landmarks_pose"""'], {}), "(target, '3d_landmarks_pose')\n", (1225, 1254), False, 'import os\n'), ((1318, 1353), 'os.path.join', 'os.path.join', (['target', '"""NonDetected"""'], {}), "(target, 'NonDetected')\n", (1330, 1353), False, 'import os\n'), ((1492, 1518), 'cv2.VideoCapture', 'cv2.VideoCapture', (['resource'], {}), '(resource)\n', (1508, 1518), False, 'import cv2\n'), ((1849, 1880), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (1871, 1880), False, 'import cv2\n'), ((1986, 2040), 'cv2.VideoWriter', 'cv2.VideoWriter', (['svVideo', 'fourcc', 'fps', '(width, height)'], {}), '(svVideo, fourcc, fps, (width, height))\n', (2001, 2040), False, 'import cv2\n'), ((8697, 8708), 'time.time', 'time.time', ([], {}), '()\n', (8706, 8708), False, 'import time\n'), ((8824, 8840), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (8834, 8840), False, 'import os\n'), ((4541, 4583), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'my_figsize', 'dpi': 'my_dpi'}), '(figsize=my_figsize, dpi=my_dpi)\n', (4551, 4583), True, 'from matplotlib import pyplot as plt\n'), ((4601, 4618), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvas', (['fig'], {}), '(fig)\n', (4613, 4618), True, 'from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n'), ((4632, 4655), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(6)', '(3)'], {}), '(6, 3)\n', (4649, 4655), True, 'import matplotlib.gridspec as gridspec\n'), ((4806, 4883), 'helpers.visualize_facial_landmarks', 'helpers.visualize_facial_landmarks', (['frame', 'bb', 'frame_landmarks', '(1)', 'movPt[0:3]'], {}), '(frame, bb, frame_landmarks, 1, movPt[0:3])\n', (4840, 4883), False, 'import helpers\n'), ((5208, 5231), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:3, :1]'], {}), '(gs[:3, :1])\n', (5219, 5231), True, 'from matplotlib import pyplot as plt\n'), ((5445, 5469), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:3, 1:2]'], {}), '(gs[:3, 1:2])\n', (5456, 5469), True, 'from matplotlib import pyplot as plt\n'), ((6126, 6171), 'matplotlib.pyplot.plot', 'plt.plot', (['a1', 'b1', '"""o"""'], {'color': '"""cornflowerblue"""'}), "(a1, b1, 'o', color='cornflowerblue')\n", (6134, 6171), True, 'from matplotlib import pyplot as plt\n'), ((6182, 6224), 'matplotlib.pyplot.plot', 'plt.plot', (['a2', 'b2', '"""o"""'], {'color': '"""navajowhite"""'}), "(a2, b2, 'o', color='navajowhite')\n", (6190, 6224), True, 'from matplotlib import pyplot as plt\n'), ((6235, 6267), 'matplotlib.pyplot.plot', 'plt.plot', (['a3', 'b3', '"""o"""'], {'color': '"""m"""'}), "(a3, b3, 'o', color='m')\n", (6243, 6267), True, 'from matplotlib import pyplot as plt\n'), ((6498, 6539), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[:3, 2:3]'], {'projection': '"""3d"""'}), "(gs[:3, 2:3], projection='3d')\n", (6509, 6539), True, 'from matplotlib import pyplot as plt\n'), ((6605, 6796), 'head_move_box.head_box_plot', 'head_move_box.head_box_plot', (['ax3', '(eyeL0 * pix2mm)', '(eyeR0 * pix2mm)', '(nose0 * pix2mm)', '(eyeLNow * pix2mm)', '(eyeRNow * pix2mm)', '(noseNow * pix2mm)', 'rotationX', 'rotationY', 'rotationZ', '(Z_cam * pix2mm)'], {}), '(ax3, eyeL0 * pix2mm, eyeR0 * pix2mm, nose0 *\n pix2mm, eyeLNow * pix2mm, eyeRNow * pix2mm, noseNow * pix2mm, rotationX,\n rotationY, rotationZ, Z_cam * pix2mm)\n', (6632, 6796), False, 'import head_move_box\n'), ((7239, 7260), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[3, :]'], {}), '(gs[3, :])\n', (7250, 7260), True, 'from matplotlib import pyplot as plt\n'), ((7460, 7481), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time(s)"""'], {}), "('time(s)')\n", (7470, 7481), True, 'from matplotlib import pyplot as plt\n'), ((7490, 7529), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['nameOfMovPT[0]'], {'fontsize': '(14)'}), '(nameOfMovPT[0], fontsize=14)\n', (7500, 7529), True, 'from matplotlib import pyplot as plt\n'), ((7624, 7645), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[4, :]'], {}), '(gs[4, :])\n', (7635, 7645), True, 'from matplotlib import pyplot as plt\n'), ((7770, 7804), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time(s)"""'], {'fontsize': '(14)'}), "('time(s)', fontsize=14)\n", (7780, 7804), True, 'from matplotlib import pyplot as plt\n'), ((7813, 7852), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['nameOfMovPT[1]'], {'fontsize': '(14)'}), '(nameOfMovPT[1], fontsize=14)\n', (7823, 7852), True, 'from matplotlib import pyplot as plt\n'), ((7947, 7968), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[5, :]'], {}), '(gs[5, :])\n', (7958, 7968), True, 'from matplotlib import pyplot as plt\n'), ((8083, 8117), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time(s)"""'], {'fontsize': '(14)'}), "('time(s)', fontsize=14)\n", (8093, 8117), True, 'from matplotlib import pyplot as plt\n'), ((8126, 8165), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['nameOfMovPT[2]'], {'fontsize': '(14)'}), '(nameOfMovPT[2], fontsize=14)\n', (8136, 8165), True, 'from matplotlib import pyplot as plt\n'), ((8422, 8433), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8431, 8433), True, 'from matplotlib import pyplot as plt\n'), ((8461, 8485), 'numpy.asarray', 'np.asarray', (['landmarks_3d'], {}), '(landmarks_3d)\n', (8471, 8485), True, 'import numpy as np\n'), ((8512, 8536), 'numpy.asarray', 'np.asarray', (['landmarks_2d'], {}), '(landmarks_2d)\n', (8522, 8536), True, 'import numpy as np\n'), ((8568, 8597), 'numpy.asarray', 'np.asarray', (['landmarks_pose_3d'], {}), '(landmarks_pose_3d)\n', (8578, 8597), True, 'import numpy as np\n'), ((8624, 8647), 'numpy.asarray', 'np.asarray', (['nonDetectFr'], {}), '(nonDetectFr)\n', (8634, 8647), True, 'import numpy as np\n'), ((8861, 8885), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (8873, 8885), False, 'import os\n'), ((2814, 2867), 'helpers.get_fixedPoint', 'helpers.get_fixedPoint', (['frame_landmarks'], {'numOfPoint': '(3)'}), '(frame_landmarks, numOfPoint=3)\n', (2836, 2867), False, 'import helpers\n'), ((2996, 3056), 'helpers.landmarks_3d_fitting', 'helpers.landmarks_3d_fitting', (['frame_landmarks', 'height', 'width'], {}), '(frame_landmarks, height, width)\n', (3024, 3056), False, 'import helpers\n'), ((5816, 5827), 'numpy.int32', 'np.int32', (['x'], {}), '(x)\n', (5824, 5827), True, 'import numpy as np\n'), ((5844, 5855), 'numpy.int32', 'np.int32', (['y'], {}), '(y)\n', (5852, 5855), True, 'import numpy as np\n'), ((5868, 5888), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""go"""'], {}), "(x, y, 'go')\n", (5876, 5888), True, 'from matplotlib import pyplot as plt\n'), ((7119, 7139), 'numpy.arange', 'np.arange', (['totalIndx'], {}), '(totalIndx)\n', (7128, 7139), True, 'import numpy as np\n'), ((8321, 8350), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'outFrame'], {}), "('frame', outFrame)\n", (8331, 8350), False, 'import cv2\n'), ((8940, 8951), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8949, 8951), False, 'import os\n'), ((9010, 9032), 'os.path.exists', 'os.path.exists', (['target'], {}), '(target)\n', (9024, 9032), False, 'import os\n'), ((9046, 9062), 'os.mkdir', 'os.mkdir', (['target'], {}), '(target)\n', (9054, 9062), False, 'import os\n'), ((2493, 2521), 'helpers.get_landmarks', 'helpers.get_landmarks', (['frame'], {}), '(frame)\n', (2514, 2521), False, 'import helpers\n'), ((3894, 3905), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (3902, 3905), True, 'import numpy as np\n'), ((3097, 3110), 'numpy.int32', 'np.int32', (['Ind'], {}), '(Ind)\n', (3105, 3110), True, 'import numpy as np\n'), ((3207, 3220), 'numpy.int32', 'np.int32', (['Ind'], {}), '(Ind)\n', (3215, 3220), True, 'import numpy as np\n'), ((3278, 3291), 'numpy.int32', 'np.int32', (['Ind'], {}), '(Ind)\n', (3286, 3291), True, 'import numpy as np\n'), ((8953, 8979), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (8969, 8979), False, 'import os\n')] |
import numpy as np
import logging
from tqdm import tqdm
class Perceptron():
def __init__(self, eta, epochs):
self.weights = np.random.randn(3) * 1e-4 # SMALL WEIGHTS INIT
self.eta = eta # Learning Rate
self.epochs = epochs
logging.info(f'initial weights before training :\n{self.weights}')
def activationFunction(self, inputs, weights):
z = np.dot(inputs, weights) # Z = W * X
dt = np.where(z > 0, 1, 0) # CONDITION IF TRUE ELSE
#print(f"dot : ", dt)
return dt
def fit(self, X, y):
self.X = X
self.y = y
X_with_bias = np.c_[self.X, -np.ones((len(self.X), 1))] # CONCATINATION
logging.info(f'X with bias : \n{X_with_bias}') # CONCATINATION
for epoch in tqdm(range(self.epochs), total=self.epochs, desc="training the model"):
logging.info("--"*10)
logging.info(f'for epoch :{epoch}')
logging.info("--"*10)
y_hat = self.activationFunction(X_with_bias, self.weights) # FORWORD PROPAGATION
logging.info(f'predicted values after Forword pass: \n{y_hat}')
self.error = self.y - y_hat
logging.info(f"Error :\n{self.error}")
self.weights = self.weights + self.eta * np.dot(X_with_bias.T, self.error) # BACK WORK PROPAGATION
logging.info(f"updated weights after epoch :\n{epoch}/{self.epochs} : \n{self.weights}")
logging.info("###"*10)
def predict(self, X):
X_with_bias = np.c_[X, -np.ones((len(X),1))]
logging.info(f'predict x X_with_bias : \n{X_with_bias}')
return self.activationFunction(X_with_bias, self.weights)
def total_loss(self):
total_loss = np.sum(self.error)
logging.info(f'total_loss : {total_loss}')
return total_loss
| [
"numpy.sum",
"numpy.random.randn",
"logging.info",
"numpy.where",
"numpy.dot"
] | [((237, 306), 'logging.info', 'logging.info', (['f"""initial weights before training :\n{self.weights}"""'], {}), '(f"""initial weights before training :\n{self.weights}""")\n', (249, 306), False, 'import logging\n'), ((360, 383), 'numpy.dot', 'np.dot', (['inputs', 'weights'], {}), '(inputs, weights)\n', (366, 383), True, 'import numpy as np\n'), ((405, 426), 'numpy.where', 'np.where', (['(z > 0)', '(1)', '(0)'], {}), '(z > 0, 1, 0)\n', (413, 426), True, 'import numpy as np\n'), ((615, 664), 'logging.info', 'logging.info', (['f"""X with bias : \n{X_with_bias}"""'], {}), '(f"""X with bias : \n{X_with_bias}""")\n', (627, 664), False, 'import logging\n'), ((1380, 1439), 'logging.info', 'logging.info', (['f"""predict x X_with_bias : \n{X_with_bias}"""'], {}), '(f"""predict x X_with_bias : \n{X_with_bias}""")\n', (1392, 1439), False, 'import logging\n'), ((1536, 1554), 'numpy.sum', 'np.sum', (['self.error'], {}), '(self.error)\n', (1542, 1554), True, 'import numpy as np\n'), ((1557, 1599), 'logging.info', 'logging.info', (['f"""total_loss : {total_loss}"""'], {}), "(f'total_loss : {total_loss}')\n", (1569, 1599), False, 'import logging\n'), ((129, 147), 'numpy.random.randn', 'np.random.randn', (['(3)'], {}), '(3)\n', (144, 147), True, 'import numpy as np\n'), ((770, 793), 'logging.info', 'logging.info', (["('--' * 10)"], {}), "('--' * 10)\n", (782, 793), False, 'import logging\n'), ((795, 830), 'logging.info', 'logging.info', (['f"""for epoch :{epoch}"""'], {}), "(f'for epoch :{epoch}')\n", (807, 830), False, 'import logging\n'), ((834, 857), 'logging.info', 'logging.info', (["('--' * 10)"], {}), "('--' * 10)\n", (846, 857), False, 'import logging\n'), ((945, 1012), 'logging.info', 'logging.info', (['f"""predicted values after Forword pass: \n{y_hat}"""'], {}), '(f"""predicted values after Forword pass: \n{y_hat}""")\n', (957, 1012), False, 'import logging\n'), ((1045, 1086), 'logging.info', 'logging.info', (['f"""Error :\n{self.error}"""'], {}), '(f"""Error :\n{self.error}""")\n', (1057, 1086), False, 'import logging\n'), ((1191, 1291), 'logging.info', 'logging.info', (['f"""updated weights after epoch :\n{epoch}/{self.epochs} : \n{self.weights}"""'], {}), '(\n f"""updated weights after epoch :\n{epoch}/{self.epochs} : \n{self.weights}"""\n )\n', (1203, 1291), False, 'import logging\n'), ((1284, 1308), 'logging.info', 'logging.info', (["('###' * 10)"], {}), "('###' * 10)\n", (1296, 1308), False, 'import logging\n'), ((1129, 1162), 'numpy.dot', 'np.dot', (['X_with_bias.T', 'self.error'], {}), '(X_with_bias.T, self.error)\n', (1135, 1162), True, 'import numpy as np\n')] |
"""This script contains code to support creation of photometric sourcelists using two techniques:
aperture photometry and segmentation-map based photometry."""
import os
import sys
import shutil
import warnings
from distutils.version import LooseVersion
import numpy as np
import skimage
from astropy.io import fits as fits
import photutils
if LooseVersion(photutils.__version__) < '1.1.0':
from photutils.detection.findstars import (_DAOFindProperties, _StarCutout,
_StarFinderKernel, _find_stars)
else:
from photutils.detection._utils import (_StarCutout, _StarFinderKernel,
_find_stars)
from photutils.detection.daofinder import _DAOFindProperties
from photutils.detection import StarFinderBase, find_peaks
from photutils.utils import NoDetectionsWarning
from stsci.tools import fileutil as fu
from . import astrometric_utils as amutils
from .. import astrodrizzle
#
# Original imports
#
from astropy.stats import sigma_clipped_stats
from astropy.table import Table
from scipy import ndimage
import scipy.signal as ss
from stsci.tools import logutil
try:
from matplotlib import pyplot as plt
except Exception:
plt = None
CATALOG_TYPES = ['aperture', 'segment']
INSTR_KWS = ['INSTRUME', 'DETECTOR']
FILTER_KW = "FILTER*"
PSF_PATH = ['pars', 'psfs']
__taskname__ = 'deconvolve_utils'
MSG_DATEFMT = '%Y%j%H%M%S'
SPLUNK_MSG_FORMAT = '%(asctime)s %(levelname)s src=%(name)s- %(message)s'
log = logutil.create_logger(__name__, level=logutil.logging.NOTSET, stream=sys.stdout,
format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT)
# ======================================================================================================================
def fft_deconv_img(img, psf, freq_limit=0.95, block_size=(1024, 1024)):
""" FFT image deconvolution
This function performs a simple 1-step FFT-based deconvolution of the
input image using the specified PSF. The input image get transformed
by FFT section-by-section in blocks defined by the `block_size` parameter
in order to minimize the memory use for what can be extremely large images.
PARAMETERS
-----------
img : ndarray
Numpy array of image to be deconvolved
psf : ndarray
Numpy array of PSF to be used for deconvolution. This array has to have the same shape as
the input image and should be normalized to a total flux (sum) approximately equal to the
brightest non-saturated source in the science image being deconvolved.
freq_limit : float
Compute regularization parameter (frequency limit) to be used with PSF in deconvolution.
This value should result in selecting a frequency
one to two orders of magnitude below the largest spectral component of the point-spread function.
block_size : tuple, optional
This specifies how much of the input image will be transformed using an FFT at one time.
RETURNS
-------
deconv : ndarray
Numpy array of deconvolved image
.. note::
Based on 2017 implementation by <NAME>
http://www.radio-science.net/2017/09/deconvolution-in-frequency-domain-with.html
"""
# Insure PSF has no NaN values and is centred in output array, to avoid shifting deconvolved product
# This also has the advantage of insuring we are only working on a copy of the input PSF
psf = np.nan_to_num(psf, 0.0)
psf = _center_psf(psf, block_size)
# Compute alpha scaling based on PSF frequency limit
P2 = np.abs(np.copy(psf).flatten())
P2 = np.sort(P2)[::-1]
index = int(P2.shape[0] * freq_limit)
alpha = P2[index]
del P2
# Insure input image also has no NaN values, only if necessary
# This should be less memory-intensive than np.isnan()
if len(np.where(img == np.nan)[0]) > 0:
img = np.nan_to_num(img, copy=True, nan=0.0)
img_shape = img.shape
# Break up image into blocks that will be deconvolved separately, then
# pieced back together again.
# Returns: {'slices':[], 'new_shape':(y, x), 'blocks':[N, M, y, x]}
block_dict = create_blocks(img, block_size=block_size)
img_blocks = block_dict['blocks']
del img
# FFT point spread function (first column of theory matrix G)
# In order to avoid memory errors, this code:
# - Splits the input into (non-overlapping) blocks using:
# slices = skimage.util.view_as_blocks(arr, block_shape=(1024,1024))
# - perform the FFT on each block separately (as if separate exposures)
# spectrum = np.fft.fft2(slices)
m_maps = img_blocks * 0.
for a in range(img_blocks.shape[0]):
for b in range(img_blocks.shape[1]):
block = img_blocks[a, b, :, :]
m_map = _perform_deconv(block, psf, alpha)
m_maps[a, b, :, :] = m_map
# Re-constitute deconvolved image blocks into single image
# rebuild_arr(block_arr, slices, new_shape, output_shape)
deconv_img = rebuild_arr(m_maps,
block_dict['slices'],
block_dict['new_shape'],
img_shape)
return deconv_img
# ======================================================================================================================
# Functions to manage PSF library for deconvolution
#
def _perform_deconv(img_block, psf, alpha):
P = np.fft.fft2(psf)
# FFT2 measurement
# Use image in husky_conv.png
# U^H d
D = np.fft.fft2(img_block)
# -dampped spectral components,
# -also known as Wiener filtering
# (conj(S)/(|S|^2 + alpha^2)) U^H d
M = (np.conj(P) / (np.abs(P)**2.0 + alpha**2.0)) * D
# maximum a posteriori estimate of deconvolved image
# m_map = U (conj(S)/(|S|^2 + alpha^2)) U^H d
m_map = (D.shape[1] * D.shape[0]) * np.fft.fftshift(np.fft.ifft2(M).real)
zero_mask = (img_block > 0).astype(np.int16)
m_map *= zero_mask
return m_map
def _center_psf(psf, img_block_shape):
"""Create centered PSF image with same shape as img_block"""
psf_y, psf_x = np.where(psf == psf.max())
psf_y = psf_y[0]
psf_x = psf_x[0]
psf_center = [(img_block_shape[0] // 2), (img_block_shape[1] // 2)]
centered_psf = np.zeros(img_block_shape, dtype=psf.dtype)
# We need to recenter the PSF
psf_section = np.where(psf != 0.0)
psf_xr = [psf_section[1].min(), psf_section[1].max()]
psf_yr = [psf_section[0].min(), psf_section[0].max()]
psf_size = [(psf_yr[1] - psf_yr[0]) // 2, (psf_xr[1] - psf_xr[0]) // 2]
# If psf_size is not at least 2 * psf_max//2, increase to that value
# This will insure that the final position of the PSF in the output is at the exact center
psf_max = np.where(psf[psf_yr[0]:psf_yr[1], psf_xr[0]:psf_xr[1]] == psf.max())
psf_len = max(max(psf_max[0][0] // 2, psf_size[0]), max(psf_max[1][0] // 2, psf_size[1]))
centered_psf[psf_center[0] - psf_len: psf_center[0] + psf_len,
psf_center[1] - psf_len: psf_center[1] + psf_len] = psf[psf_y - psf_len: psf_y + psf_len,
psf_x - psf_len: psf_x + psf_len]
return centered_psf
def pad_arr(arr, block=(1024, 1024)):
""" Zero-pad an input array up to an integer number of blocks in each dimension
Parameters
----------
arr : `numpy.ndarray`
Original input array to be padded to the new size
block : `tuple` or `int`
Size of blocks which should be used to define the output size so that the
output image is an integer number of blocks with this size. If only an
integer is specified, then a block size of (block, block) will be used.
Returns
-------
new_arr : `numpy.ndarray`
Resized output array of size (n*block[0], m*block[1]).
"""
if isinstance(block, int):
block = (block, block)
new_shape = (arr.shape[0] + (block[0] - (arr.shape[0] % block[0])),
arr.shape[1] + (block[1] - (arr.shape[1] % block[1])))
new_arr = np.zeros(new_shape, dtype=arr.dtype)
new_arr[:arr.shape[0], :arr.shape[1]] = arr
return new_arr
def create_blocks(arr, block_size=(1024, 1024)):
"""Split input array into uniformly-sized blocks
This function will split the input array into uniformly-sized blocks
with the size of each block specified as `block_size`. The input array
will be zero-padded in either or both axes in order to expand the array
to an integer number of blocks in both dimensions.
Parameters
----------
arr : `numpy.ndarray`
2-D Input image of size N x M
block_size : `tuple`, optional
Tuple specifying the block size n x m
Returns
--------
block_dict : `dict`
This dictionary contains all the information describing all the blocks
created from the input array consisting of
{'slices':[], 'new_shape': (N`, M`), `blocks`: []} where:
``"slices"``:
List of indices [y, x, n, m] describing each block, where,
(y,x) is index of the block in the original array and
(n,m) is the number of pixels in the block.
``"new_shape"``:
Full size of the padded array that was cut into blocks
``"blocks"``:
Actual blocks as returned by `skimage.util.view_as_blocks`.
"""
if arr.shape[0] % block_size[0] != 0 or arr.shape[1] % block_size[1] != 0:
new_arr = pad_arr(arr, block=block_size)
else:
new_arr = arr
new_shape = new_arr.shape
# Create blocks from image of size block_size
# Output set of blocks will have shape: [N, M, block_size[0], block_size[1]]
blocks = skimage.util.view_as_blocks(new_arr, block_size)
slices = []
for a in range(blocks.shape[0]):
for b in range(blocks.shape[1]):
slices.append([a, b, slice(block_size[0] * a, block_size[0] * (a + 1)),
slice(block_size[1] * b, block_size[1] * (b + 1))])
del new_arr
return {'slices': slices, 'new_shape': new_shape, 'blocks': blocks}
def rebuild_arr(block_arr, slices, new_shape, output_shape):
"""Convert convolved blocks into full array that matches original input array
Parameters
-----------
block_arr : `numpy.ndarray`
List of image blocks which need to be pieced back together into a single
array.
slices : `list`
List of `slices` from `create_blocks` specifying the location ajd size of each
block from the original input array.
new_shape : `tuple`
Full size of the padded image used to create the uniformly sized blocks.
output_shape : `tuple`
The original shape of the array before padding and creating the blocks.
Returns
--------
out_arr : `numpy.ndarray`
Single array of same size as original input array before padding and splitting into blocks.
"""
out_arr = np.zeros(new_shape, dtype=block_arr.dtype)
for s in slices:
out_arr[(s[2], s[3])] = block_arr[s[0], s[1], :, :]
return out_arr[:output_shape[0], :output_shape[1]]
def find_psf(imgname, path_root=None):
"""Pull PSF from library based on unique combination of intrument/detector/filter.
Parameters
===========
imgname : str
Image name of science image to be deconvolved. If the header contains
instrument, detector, and filters, then those additional parameters do not need to be specified.
path_root : str, optional
Full path to parent directory of PSF library IF not the default path for package.
Returns
========
psfnames : list
List of the Full filenames, with path, for all PSFs from the library
that apply to the input image.
"""
# We need to create an average PSF made up of all the filters
# used to create the image.
# Start by looking in the input image header for
# the values for the instrument, detector and filter* keywords,
# so we know what PSF(s) to look for.
total_hdu = fits.open(imgname)
# get list of all input exposures
input_files = [f.split('[')[0] for f in total_hdu[0].header.get('d*data').values()]
# If there were (for any reason) no D???DATA keywords,
# only use the input image to define the filters for the PSF
if len(input_files) == 0:
input_files = [imgname]
total_hdu.close()
del total_hdu
# Reduce the list down to only unique filenames
input_files = list(dict.fromkeys(input_files))
# get filter names from each input file
filter_list = [get_filter_names(f) for f in input_files]
kw_vals = [filter_list[0][0].lower(), filter_list[0][1].lower()]
# Set up path to PSF library installed with code based on tree:
# drizzlepac/pars/psfs/<instrument>/<detector>
if path_root is None:
path_root = os.path.split(os.path.dirname(__file__))[0]
for psf_path in PSF_PATH:
path_root = os.path.join(path_root, psf_path)
path_root = os.path.join(path_root, kw_vals[0], kw_vals[1])
# Now look for filename associated with selected filter
psf_names = [os.path.join(path_root, "{}.fits".format("_".join(kw_vals))) for kw_vals in filter_list]
# Again, remove duplicate entries
psf_names = list(dict.fromkeys(psf_names))
log.debug('Looking for Library PSFs:\n {}'.format(psf_names))
psfs_exist = [os.path.exists(fname) for fname in psf_names]
if not all(psfs_exist):
log.error('Some PSF NOT found for keywords {} \n with values of {}'.format(INSTR_KWS, filter_list))
if not any(psfs_exist):
log.error('NO PSF(s) found for keywords {} \n with values of {}'.format(INSTR_KWS, filter_list))
raise ValueError
log.info("Using Library PSF(s):\n {}".format([os.path.basename(name) for name in psf_names]))
return psf_names
def get_filter_names(imgname):
"""Interpret photmode from image
Parameters
-----------
imgname : str
Filename of observation to extract filter names from
Returns
--------
kw_vals : list
List containing instrument, detector, and filter names in that order.
"""
# look for instrument, detector, and filter in input image name
# Start by looking in the input image header for these values, so we know what to look
hdu = fits.open(imgname)
photmode = hdu[('sci', 1)].header.get('photmode')
if photmode is None:
photmode = hdu[0].header.get('photmode')
detector = hdu[0].header.get('detector')
hdu.close()
del hdu
# Remove duplicate entries from photmode, if any (like 2 CLEAR filters)
filter_list = list(dict.fromkeys(photmode.split(' ')))
# Key off of instrument and detector (first 2 members of photmode)
kw_vals = [filter_list[0].lower(), detector.lower()]
# This will remove all polarizer, grism and prism filter entries as well.
excl_filters = ['CAL', 'MJD', 'POL', 'GR', 'PR']
for e in excl_filters:
indx = [filter_list.index(i) for i in filter_list if i.startswith(e)]
indx.reverse()
for i in indx:
del filter_list[i]
# Now select the widest-band filter used for this observation
# This accounts for cross-filter usage.
found_filter = False
if len(filter_list[2:]) >= 1:
# Loop over types of bandpass based on filter names
# going from widest band to narrowest bands
bandpass = ['lp', 'w', 'm', 'n']
for bp in bandpass:
for f in filter_list[2:]:
if f.lower().endswith(bp):
kw_vals += [f.lower()]
found_filter = True
break
if not found_filter:
kw_vals += ['clear']
# The result at this point will be:
# kw_vals = [instrument, detector, selected filter]
return kw_vals
def convert_library_psf(calimg, drzimg, psfs,
total_flux=100000.0,
pixfrac=1.0,
clean_psfs=True):
"""Drizzle library PSFs to match science image. """
psf_flt_names = [_create_input_psf(psfname, calimg, total_flux) for psfname in psfs]
# Insure only final drizzle step is run
drizzle_pars = {}
drizzle_pars["build"] = True
drizzle_pars['context'] = False
drizzle_pars['preserve'] = False
drizzle_pars['clean'] = True
drizzle_pars['in_memory'] = True
drizzle_pars["resetbits"] = 0
drizzle_pars["static"] = False
drizzle_pars["skysub"] = False
drizzle_pars["driz_separate"] = False
drizzle_pars["median"] = False
drizzle_pars["blot"] = False
drizzle_pars["driz_cr"] = False
drizzle_pars["driz_combine"] = True
drizzle_pars['final_wcs'] = True
drizzle_pars['final_fillval'] = 0.0
drizzle_pars['final_pixfrac'] = pixfrac
drizzle_pars["final_refimage"] = "{}[1]".format(drzimg)
psf_drz_name = psf_flt_names[0].replace('_flt.fits', '') # astrodrizzle will add suffix
psf_drz_output = "{}_drz.fits".format(psf_drz_name)
# Drizzle PSF FLT file to match orientation and plate scale of drizzled science (total detection) image
astrodrizzle.AstroDrizzle(input=psf_flt_names,
output=psf_drz_name,
**drizzle_pars)
if clean_psfs:
# clean up intermediate files
for psf_name in psf_flt_names:
os.remove(psf_name)
return psf_drz_output
def _create_input_psf(psf_name, calimg, total_flux):
# Create copy of input science image based on input psf filename
psf_root = os.path.basename(psf_name)
lib_psf_arr = fits.getdata(psf_name)
lib_psf_arr *= total_flux
lib_size = [lib_psf_arr.shape[0] // 2, lib_psf_arr.shape[1] // 2]
# create hamming 2d filter to avoid edge effects
h = ss.hamming(lib_psf_arr.shape[0])
h2d = np.sqrt(np.outer(h, h))
lib_psf_arr *= h2d
# This will be the name of the new file containing the library PSF that will be drizzled to
# match the input image `drzimg`
psf_flt_name = psf_root.replace('.fits', '_psf_flt.fits')
# create version of PSF that will be drizzled
psf_base = fits.getdata(calimg, ext=1) * 0.0
# Copy library PSF into this array
out_cen = [psf_base.shape[0] // 2, psf_base.shape[1] // 2]
edge = (lib_psf_arr.shape[0] % 2, lib_psf_arr.shape[1] % 2)
psf_base[out_cen[0] - lib_size[0]: out_cen[0] + lib_size[0] + edge[0],
out_cen[1] - lib_size[1]: out_cen[1] + lib_size[1] + edge[1]] = lib_psf_arr
# Write out library PSF FLT file now
psf_flt = shutil.copy(calimg, psf_flt_name)
# Update file with library PSF
flt_hdu = fits.open(psf_flt, mode='update')
flt_hdu[('sci', 1)].data = psf_base
flt_hdu[('sci', 1)].header['psf_nx'] = psf_base.shape[1]
flt_hdu[('sci', 1)].header['psf_ny'] = psf_base.shape[0]
num_sci = fu.countExtn(calimg)
# Also zero out all other science data in this 'PSF' file.
if num_sci > 1:
for extn in range(2, num_sci + 1):
flt_hdu[('sci', extn)].data *= 0.0
flt_hdu.close()
del flt_hdu, lib_psf_arr
return psf_flt_name
def get_cutouts(data, star_list, kernel, threshold_eff, exclude_border=False):
coords = [(row[1], row[0]) for row in star_list]
convolved_data = data
star_cutouts = []
for (ypeak, xpeak) in coords:
# now extract the object from the data, centered on the peak
# pixel in the convolved image, with the same size as the kernel
x0 = xpeak - kernel.xradius
x1 = xpeak + kernel.xradius + 1
y0 = ypeak - kernel.yradius
y1 = ypeak + kernel.yradius + 1
if x0 < 0 or x1 > data.shape[1]:
continue # pragma: no cover
if y0 < 0 or y1 > data.shape[0]:
continue # pragma: no cover
slices = (slice(y0, y1), slice(x0, x1))
data_cutout = data[slices]
# Skip slices which include pixels with a value of NaN
if np.isnan(data_cutout).any():
continue
convdata_cutout = convolved_data[slices]
# correct pixel values for the previous image padding
if exclude_border:
x0 -= kernel.xradius
x1 -= kernel.xradius
y0 -= kernel.yradius
y1 -= kernel.yradius
xpeak -= kernel.xradius
ypeak -= kernel.yradius
slices = (slice(y0, y1), slice(x0, x1))
star_cutouts.append(_StarCutout(data_cutout, convdata_cutout, slices,
xpeak, ypeak, kernel, threshold_eff))
return star_cutouts
class UserStarFinder(StarFinderBase):
"""
Measure stars in an image using the DAOFIND (`Stetson 1987
<https://ui.adsabs.harvard.edu/abs/1987PASP...99..191S/abstract>`_)
algorithm. Stars measured using DAOFIND can be identified using
any algorithm defined by the user, with the results passed in as a
simple list of coords.
DAOFIND (`Stetson 1987; PASP 99, 191
<https://ui.adsabs.harvard.edu/abs/1987PASP...99..191S/abstract>`_)
searches images for local density maxima that have a peak amplitude
greater than ``threshold`` (approximately; ``threshold`` is applied
to a convolved image) and have a size and shape similar to the
defined 2D Gaussian kernel. The Gaussian kernel is defined by the
``fwhm``, ``ratio``, ``theta``, and ``sigma_radius`` input
parameters.
``DAOStarFinder`` finds the object centroid by fitting the marginal x
and y 1D distributions of the Gaussian kernel to the marginal x and
y distributions of the input (unconvolved) ``data`` image.
``DAOStarFinder`` calculates the object roundness using two methods. The
``roundlo`` and ``roundhi`` bounds are applied to both measures of
roundness. The first method (``roundness1``; called ``SROUND`` in
`DAOFIND`_) is based on the source symmetry and is the ratio of a
measure of the object's bilateral (2-fold) to four-fold symmetry.
The second roundness statistic (``roundness2``; called ``GROUND`` in
`DAOFIND`_) measures the ratio of the difference in the height of
the best fitting Gaussian function in x minus the best fitting
Gaussian function in y, divided by the average of the best fitting
Gaussian functions in x and y. A circular source will have a zero
roundness. A source extended in x or y will have a negative or
positive roundness, respectively.
The sharpness statistic measures the ratio of the difference between
the height of the central pixel and the mean of the surrounding
non-bad pixels in the convolved image, to the height of the best
fitting Gaussian function at that point.
Parameters
----------
threshold : float
The absolute image value above which to select sources.
fwhm : float
The full-width half-maximum (FWHM) of the major axis of the
Gaussian kernel in units of pixels.
ratio : float, optional
The ratio of the minor to major axis standard deviations of the
Gaussian kernel. ``ratio`` must be strictly positive and less
than or equal to 1.0. The default is 1.0 (i.e., a circular
Gaussian kernel).
theta : float, optional
The position angle (in degrees) of the major axis of the
Gaussian kernel measured counter-clockwise from the positive x
axis.
sigma_radius : float, optional
The truncation radius of the Gaussian kernel in units of sigma
(standard deviation) [``1 sigma = FWHM /
(2.0*sqrt(2.0*log(2.0)))``].
sharplo : float, optional
The lower bound on sharpness for object detection.
sharphi : float, optional
The upper bound on sharpness for object detection.
roundlo : float, optional
The lower bound on roundness for object detection.
roundhi : float, optional
The upper bound on roundness for object detection.
sky : float, optional
The background sky level of the image. Setting ``sky`` affects
only the output values of the object ``peak``, ``flux``, and
``mag`` values. The default is 0.0, which should be used to
replicate the results from `DAOFIND`_.
exclude_border : bool, optional
Set to `True` to exclude sources found within half the size of
the convolution kernel from the image borders. The default is
`False`, which is the mode used by `DAOFIND`_.
brightest : int, None, optional
Number of brightest objects to keep after sorting the full object list.
If ``brightest`` is set to `None`, all objects will be selected.
peakmax : float, None, optional
Maximum peak pixel value in an object. Only objects whose peak pixel
values are *strictly smaller* than ``peakmax`` will be selected.
This may be used to exclude saturated sources. By default, when
``peakmax`` is set to `None`, all objects will be selected.
.. warning::
`DAOStarFinder` automatically excludes objects whose peak
pixel values are negative. Therefore, setting ``peakmax`` to a
non-positive value would result in exclusion of all objects.
coords : `~astropy.table.Table` or `None`
A table, such as returned by `find_peaks`, with approximate X,Y positions
of identified sources.
If not provided, the DAOFind algorithm will be used to find sources.
See Also
--------
IRAFStarFinder
Notes
-----
For the convolution step, this routine sets pixels beyond the image
borders to 0.0. The equivalent parameters in `DAOFIND`_ are
``boundary='constant'`` and ``constant=0.0``.
The main differences between `~photutils.detection.DAOStarFinder`
and `~photutils.detection.IRAFStarFinder` are:
* `~photutils.detection.IRAFStarFinder` always uses a 2D
circular Gaussian kernel, while
`~photutils.detection.DAOStarFinder` can use an elliptical
Gaussian kernel.
* `~photutils.detection.IRAFStarFinder` calculates the objects'
centroid, roundness, and sharpness using image moments.
References
----------
.. [1] <NAME>. 1987; PASP 99, 191
(https://ui.adsabs.harvard.edu/abs/1987PASP...99..191S/abstract)
.. [2] https://iraf.net/irafhelp.php?val=daofind
.. _DAOFIND: https://iraf.net/irafhelp.php?val=daofind
"""
def __init__(self, threshold, fwhm, ratio=1.0, theta=0.0,
sigma_radius=1.5, sharplo=0.2, sharphi=1.0, roundlo=-1.0,
roundhi=1.0, sky=0.0, exclude_border=False,
coords=None,
brightest=None, peakmax=None):
if not np.isscalar(threshold):
raise TypeError('threshold must be a scalar value.')
self.threshold = threshold
if not np.isscalar(fwhm):
raise TypeError('fwhm must be a scalar value.')
self.fwhm = fwhm
self.coords = coords
self.ratio = ratio
self.theta = theta
self.sigma_radius = sigma_radius
self.sharplo = sharplo
self.sharphi = sharphi
self.roundlo = roundlo
self.roundhi = roundhi
self.sky = sky
self.exclude_border = exclude_border
self.kernel = _StarFinderKernel(self.fwhm, self.ratio, self.theta,
self.sigma_radius)
self.threshold_eff = self.threshold * self.kernel.relerr
self.brightest = brightest
self.peakmax = peakmax
self._star_cutouts = None
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table` or `None`
A table of found stars with the following parameters:
* ``id``: unique object identification number.
* ``xcentroid, ycentroid``: object centroid.
* ``sharpness``: object sharpness.
* ``roundness1``: object roundness based on symmetry.
* ``roundness2``: object roundness based on marginal Gaussian
fits.
* ``npix``: the total number of pixels in the Gaussian kernel
array.
* ``sky``: the input ``sky`` parameter.
* ``peak``: the peak, sky-subtracted, pixel value of the object.
* ``flux``: the object flux calculated as the peak density in
the convolved image divided by the detection threshold. This
derivation matches that of `DAOFIND`_ if ``sky`` is 0.0.
* ``mag``: the object instrumental magnitude calculated as
``-2.5 * log10(flux)``. The derivation matches that of
`DAOFIND`_ if ``sky`` is 0.0.
`None` is returned if no stars are found.
"""
if self.coords:
star_cutouts = get_cutouts(data, self.coords,
self.kernel,
self.threshold_eff,
exclude_border=self.exclude_border)
else:
star_cutouts = _find_stars(data, self.kernel, self.threshold_eff,
mask=mask,
exclude_border=self.exclude_border)
if star_cutouts is None:
warnings.warn('No sources were found.', NoDetectionsWarning)
return None
self._star_cutouts = star_cutouts
star_props = []
for star_cutout in star_cutouts:
props = _DAOFindProperties(star_cutout, self.kernel, self.sky)
if np.isnan(props.dx_hx).any() or np.isnan(props.dy_hy).any():
continue
if (props.sharpness <= self.sharplo or
props.sharpness >= self.sharphi):
continue
if (props.roundness1 <= self.roundlo or
props.roundness1 >= self.roundhi):
continue
if (props.roundness2 <= self.roundlo or
props.roundness2 >= self.roundhi):
continue
if self.peakmax is not None and props.peak >= self.peakmax:
continue
star_props.append(props)
nstars = len(star_props)
if nstars == 0:
warnings.warn('Sources were found, but none pass the sharpness '
'and roundness criteria.', NoDetectionsWarning)
return None
if self.brightest is not None:
fluxes = [props.flux for props in star_props]
idx = sorted(np.argsort(fluxes)[-self.brightest:].tolist())
star_props = [star_props[k] for k in idx]
nstars = len(star_props)
table = Table()
table['id'] = np.arange(nstars) + 1
columns = ('xcentroid', 'ycentroid', 'sharpness', 'roundness1',
'roundness2', 'npix', 'sky', 'peak', 'flux', 'mag')
for column in columns:
table[column] = [getattr(props, column) for props in star_props]
return table
# -----------------------------------------------------------------------------
#
# Main user interface
#
# -----------------------------------------------------------------------------
def find_point_sources(drzname, data=None, mask=None,
def_fwhm=2.0,
box_size=11, block_size=(1024, 1024),
diagnostic_mode=False):
""" Identify point sources most similar to TinyTim PSFs
Primary user-interface to identifying point-sources in the
drizzle product image most similar to the TinyTim PSF for the
filter-combination closest to that found in the drizzled image.
The PSFs are pulled, by default, from those installed with the
code as created using the TinyTim PSF modelling software for
every direct image filter used by the ACS and WFC3 cameras on HST.
.. note: Sources identified by this function will only have integer pixel
positions.
Parameters
-----------
drzname : `str`
Filename of the drizzled image which should be used to find
point sources. This will provide the information on the filters
used on the all the input exposures.
data : `numpy.ndarray`, optional
If provided, will be used as the image to be evaluated instead
of opening the file specified in `drzname`.
mask : `numpy.ndarray`, optional
If provided, this mask will be used to eliminate regions in the
input array from being searched for point sources. Pixels with
a value of 0 in the mask indicate what pixels should be ignored.
def_fwhm : `float`, optional
Default FWHM to use in case the model PSF can not be accurately
measured by `photutils`.
box_size : `int`, optional
Size of the box used to recognize each point source.
block_size : `tuple`, optional
(Y, X) size of the block used by the FFT to process the drizzled image.
diagnostic_mode : `bool`, optional
Specify whether or not to provide additional diagnostic messages
and output while processing.
Returns
-------
peaks : `astropy.table.Table`
Output from `photutils.detection.find_peaks` for all identified sources
with columns `x_peak`, `y_peak` and `peak_value`.
psf_fwhm : `float`
FWHM (in pixels) of PSF used to identify the sources.
"""
# determine the name of at least 1 input exposure
calname = determine_input_image(drzname)
sep = box_size // 2
if not isinstance(block_size, tuple):
block_size = tuple(block_size)
if data is None:
# load image
drzhdu = fits.open(drzname)
sciext = 0 if len(drzhdu) == 1 else ('sci', 1)
drz = drzhdu[sciext].data.copy()
drzhdr = drzhdu[sciext].header.copy()
drzhdu.close()
del drzhdu
if mask is not None:
# Apply any user-specified mask
drz *= mask
else:
drz = data
drzhdr = None
if mask is not None:
# invert the mask
invmask = np.invert(mask)
else:
invmask = None
# Identify PSF for image
psfnames = find_psf(drzname)
# Load PSF and convert to be consistent (orientation) with image
clean_psfs = True if not diagnostic_mode else False
drzpsfname = convert_library_psf(calname, drzname, psfnames,
pixfrac=1.5,
clean_psfs=clean_psfs)
drzpsf = fits.getdata(drzpsfname)
# try to measure just the core of the PSF
# This will be a lot less likely to result in invalid/impossible FWHM values
max_y, max_x = np.where(drzpsf == drzpsf.max())
xc = max_x[0]
yc = max_y[0]
psf_core = drzpsf[yc - box_size: yc + box_size, xc - box_size: xc + box_size]
psf_fwhm = amutils.find_fwhm(psf_core, def_fwhm)
# check value
if psf_fwhm < 0 or psf_fwhm > 2.0 * def_fwhm:
# Try a different starting guess for the FWHM
psf_fwhm = amutils.find_fwhm(psf_core, def_fwhm + 1)
if psf_fwhm < 0 or psf_fwhm > 2.0 * def_fwhm:
log.debug("FWHM computed as {}. Reverting to using default FWHM of {}".format(psf_fwhm, def_fwhm))
psf_fwhm = def_fwhm
log.info("Library PSF FWHM computed as {}.".format(psf_fwhm))
# deconvolve the image with the PSF
decdrz = fft_deconv_img(drz, drzpsf,
block_size=block_size)
if mask is not None:
decmask = ndimage.binary_erosion(mask, iterations=box_size)
decdrz *= decmask
if diagnostic_mode:
fits.PrimaryHDU(data=decdrz,
header=drzhdr).writeto(drzname.replace('.fits', '_deconv.fits'),
overwrite=True)
if mask is not None:
fits.PrimaryHDU(data=decmask.astype(np.uint16)).writeto(drzname.replace('.fits', '_deconv_mask.fits'),
overwrite=True)
# find sources in deconvolved image
dec_peaks = find_peaks(decdrz, threshold=0.0,
mask=invmask, box_size=box_size)
# Use these positions as an initial guess for the final position
peak_mask = (drz * 0.).astype(np.uint8)
# Do this by creating a mask for the original input that only
# includes those pixels with 2 pixels of each peak from the
# deconvolved image.
for peak in dec_peaks:
x = peak['x_peak']
y = peak['y_peak']
peak_mask[y - sep: y + sep + 1, x - sep: x + sep + 1] = 1
drz *= peak_mask
if diagnostic_mode:
fits.PrimaryHDU(data=drz).writeto(drzname.replace('.fits', '_peak_mask.fits'), overwrite=True)
# Use this new mask to find the actual peaks in the original input
# but only to integer pixel precision.
peaks = find_peaks(drz, threshold=0., box_size=box_size // 2)
if len(peaks) == 0:
peaks = None
# Remove PSF used, unless running in diagnostic_mode
if not diagnostic_mode:
if os.path.exists(drzpsfname):
os.remove(drzpsfname)
del peak_mask
return peaks, psf_fwhm
def determine_input_image(image):
"""Determine the name of an input exposure for the given drizzle product"""
calimg = None
with fits.open(image) as hdu:
calimg = hdu[0].header['d001data']
if calimg:
calimg = calimg.split('[')[0]
else:
log.warn('No input image found in "D001DATA" keyword for {}'.format(image))
return calimg
| [
"os.remove",
"numpy.abs",
"numpy.nan_to_num",
"numpy.invert",
"astropy.io.fits.PrimaryHDU",
"numpy.isnan",
"photutils.detection._utils._StarFinderKernel",
"numpy.argsort",
"numpy.arange",
"os.path.join",
"numpy.fft.ifft2",
"shutil.copy",
"numpy.copy",
"astropy.io.fits.getdata",
"os.path.... | [((1518, 1650), 'stsci.tools.logutil.create_logger', 'logutil.create_logger', (['__name__'], {'level': 'logutil.logging.NOTSET', 'stream': 'sys.stdout', 'format': 'SPLUNK_MSG_FORMAT', 'datefmt': 'MSG_DATEFMT'}), '(__name__, level=logutil.logging.NOTSET, stream=sys.\n stdout, format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT)\n', (1539, 1650), False, 'from stsci.tools import logutil\n'), ((348, 383), 'distutils.version.LooseVersion', 'LooseVersion', (['photutils.__version__'], {}), '(photutils.__version__)\n', (360, 383), False, 'from distutils.version import LooseVersion\n'), ((3471, 3494), 'numpy.nan_to_num', 'np.nan_to_num', (['psf', '(0.0)'], {}), '(psf, 0.0)\n', (3484, 3494), True, 'import numpy as np\n'), ((5470, 5486), 'numpy.fft.fft2', 'np.fft.fft2', (['psf'], {}), '(psf)\n', (5481, 5486), True, 'import numpy as np\n'), ((5565, 5587), 'numpy.fft.fft2', 'np.fft.fft2', (['img_block'], {}), '(img_block)\n', (5576, 5587), True, 'import numpy as np\n'), ((6322, 6364), 'numpy.zeros', 'np.zeros', (['img_block_shape'], {'dtype': 'psf.dtype'}), '(img_block_shape, dtype=psf.dtype)\n', (6330, 6364), True, 'import numpy as np\n'), ((6418, 6438), 'numpy.where', 'np.where', (['(psf != 0.0)'], {}), '(psf != 0.0)\n', (6426, 6438), True, 'import numpy as np\n'), ((8140, 8176), 'numpy.zeros', 'np.zeros', (['new_shape'], {'dtype': 'arr.dtype'}), '(new_shape, dtype=arr.dtype)\n', (8148, 8176), True, 'import numpy as np\n'), ((9799, 9847), 'skimage.util.view_as_blocks', 'skimage.util.view_as_blocks', (['new_arr', 'block_size'], {}), '(new_arr, block_size)\n', (9826, 9847), False, 'import skimage\n'), ((11052, 11094), 'numpy.zeros', 'np.zeros', (['new_shape'], {'dtype': 'block_arr.dtype'}), '(new_shape, dtype=block_arr.dtype)\n', (11060, 11094), True, 'import numpy as np\n'), ((12162, 12180), 'astropy.io.fits.open', 'fits.open', (['imgname'], {}), '(imgname)\n', (12171, 12180), True, 'from astropy.io import fits as fits\n'), ((13134, 13181), 'os.path.join', 'os.path.join', (['path_root', 'kw_vals[0]', 'kw_vals[1]'], {}), '(path_root, kw_vals[0], kw_vals[1])\n', (13146, 13181), False, 'import os\n'), ((14483, 14501), 'astropy.io.fits.open', 'fits.open', (['imgname'], {}), '(imgname)\n', (14492, 14501), True, 'from astropy.io import fits as fits\n'), ((17729, 17755), 'os.path.basename', 'os.path.basename', (['psf_name'], {}), '(psf_name)\n', (17745, 17755), False, 'import os\n'), ((17774, 17796), 'astropy.io.fits.getdata', 'fits.getdata', (['psf_name'], {}), '(psf_name)\n', (17786, 17796), True, 'from astropy.io import fits as fits\n'), ((17960, 17992), 'scipy.signal.hamming', 'ss.hamming', (['lib_psf_arr.shape[0]'], {}), '(lib_psf_arr.shape[0])\n', (17970, 17992), True, 'import scipy.signal as ss\n'), ((18732, 18765), 'shutil.copy', 'shutil.copy', (['calimg', 'psf_flt_name'], {}), '(calimg, psf_flt_name)\n', (18743, 18765), False, 'import shutil\n'), ((18816, 18849), 'astropy.io.fits.open', 'fits.open', (['psf_flt'], {'mode': '"""update"""'}), "(psf_flt, mode='update')\n", (18825, 18849), True, 'from astropy.io import fits as fits\n'), ((19026, 19046), 'stsci.tools.fileutil.countExtn', 'fu.countExtn', (['calimg'], {}), '(calimg)\n', (19038, 19046), True, 'from stsci.tools import fileutil as fu\n'), ((35150, 35174), 'astropy.io.fits.getdata', 'fits.getdata', (['drzpsfname'], {}), '(drzpsfname)\n', (35162, 35174), True, 'from astropy.io import fits as fits\n'), ((36722, 36788), 'photutils.detection.find_peaks', 'find_peaks', (['decdrz'], {'threshold': '(0.0)', 'mask': 'invmask', 'box_size': 'box_size'}), '(decdrz, threshold=0.0, mask=invmask, box_size=box_size)\n', (36732, 36788), False, 'from photutils.detection import StarFinderBase, find_peaks\n'), ((37503, 37557), 'photutils.detection.find_peaks', 'find_peaks', (['drz'], {'threshold': '(0.0)', 'box_size': '(box_size // 2)'}), '(drz, threshold=0.0, box_size=box_size // 2)\n', (37513, 37557), False, 'from photutils.detection import StarFinderBase, find_peaks\n'), ((3642, 3653), 'numpy.sort', 'np.sort', (['P2'], {}), '(P2)\n', (3649, 3653), True, 'import numpy as np\n'), ((3920, 3958), 'numpy.nan_to_num', 'np.nan_to_num', (['img'], {'copy': '(True)', 'nan': '(0.0)'}), '(img, copy=True, nan=0.0)\n', (3933, 3958), True, 'import numpy as np\n'), ((13521, 13542), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (13535, 13542), False, 'import os\n'), ((18011, 18025), 'numpy.outer', 'np.outer', (['h', 'h'], {}), '(h, h)\n', (18019, 18025), True, 'import numpy as np\n'), ((18312, 18339), 'astropy.io.fits.getdata', 'fits.getdata', (['calimg'], {'ext': '(1)'}), '(calimg, ext=1)\n', (18324, 18339), True, 'from astropy.io import fits as fits\n'), ((27459, 27530), 'photutils.detection._utils._StarFinderKernel', '_StarFinderKernel', (['self.fwhm', 'self.ratio', 'self.theta', 'self.sigma_radius'], {}), '(self.fwhm, self.ratio, self.theta, self.sigma_radius)\n', (27476, 27530), False, 'from photutils.detection._utils import _StarCutout, _StarFinderKernel, _find_stars\n'), ((31325, 31332), 'astropy.table.Table', 'Table', ([], {}), '()\n', (31330, 31332), False, 'from astropy.table import Table\n'), ((34301, 34319), 'astropy.io.fits.open', 'fits.open', (['drzname'], {}), '(drzname)\n', (34310, 34319), True, 'from astropy.io import fits as fits\n'), ((34724, 34739), 'numpy.invert', 'np.invert', (['mask'], {}), '(mask)\n', (34733, 34739), True, 'import numpy as np\n'), ((36152, 36201), 'scipy.ndimage.binary_erosion', 'ndimage.binary_erosion', (['mask'], {'iterations': 'box_size'}), '(mask, iterations=box_size)\n', (36174, 36201), False, 'from scipy import ndimage\n'), ((37699, 37725), 'os.path.exists', 'os.path.exists', (['drzpsfname'], {}), '(drzpsfname)\n', (37713, 37725), False, 'import os\n'), ((37949, 37965), 'astropy.io.fits.open', 'fits.open', (['image'], {}), '(image)\n', (37958, 37965), True, 'from astropy.io import fits as fits\n'), ((5712, 5722), 'numpy.conj', 'np.conj', (['P'], {}), '(P)\n', (5719, 5722), True, 'import numpy as np\n'), ((13084, 13117), 'os.path.join', 'os.path.join', (['path_root', 'psf_path'], {}), '(path_root, psf_path)\n', (13096, 13117), False, 'import os\n'), ((17542, 17561), 'os.remove', 'os.remove', (['psf_name'], {}), '(psf_name)\n', (17551, 17561), False, 'import os\n'), ((20602, 20692), 'photutils.detection._utils._StarCutout', '_StarCutout', (['data_cutout', 'convdata_cutout', 'slices', 'xpeak', 'ypeak', 'kernel', 'threshold_eff'], {}), '(data_cutout, convdata_cutout, slices, xpeak, ypeak, kernel,\n threshold_eff)\n', (20613, 20692), False, 'from photutils.detection._utils import _StarCutout, _StarFinderKernel, _find_stars\n'), ((26875, 26897), 'numpy.isscalar', 'np.isscalar', (['threshold'], {}), '(threshold)\n', (26886, 26897), True, 'import numpy as np\n'), ((27015, 27032), 'numpy.isscalar', 'np.isscalar', (['fwhm'], {}), '(fwhm)\n', (27026, 27032), True, 'import numpy as np\n'), ((29689, 29790), 'photutils.detection._utils._find_stars', '_find_stars', (['data', 'self.kernel', 'self.threshold_eff'], {'mask': 'mask', 'exclude_border': 'self.exclude_border'}), '(data, self.kernel, self.threshold_eff, mask=mask,\n exclude_border=self.exclude_border)\n', (29700, 29790), False, 'from photutils.detection._utils import _StarCutout, _StarFinderKernel, _find_stars\n'), ((29911, 29971), 'warnings.warn', 'warnings.warn', (['"""No sources were found."""', 'NoDetectionsWarning'], {}), "('No sources were found.', NoDetectionsWarning)\n", (29924, 29971), False, 'import warnings\n'), ((30125, 30179), 'photutils.detection.daofinder._DAOFindProperties', '_DAOFindProperties', (['star_cutout', 'self.kernel', 'self.sky'], {}), '(star_cutout, self.kernel, self.sky)\n', (30143, 30179), False, 'from photutils.detection.daofinder import _DAOFindProperties\n'), ((30884, 31002), 'warnings.warn', 'warnings.warn', (['"""Sources were found, but none pass the sharpness and roundness criteria."""', 'NoDetectionsWarning'], {}), "(\n 'Sources were found, but none pass the sharpness and roundness criteria.',\n NoDetectionsWarning)\n", (30897, 31002), False, 'import warnings\n'), ((31355, 31372), 'numpy.arange', 'np.arange', (['nstars'], {}), '(nstars)\n', (31364, 31372), True, 'import numpy as np\n'), ((37739, 37760), 'os.remove', 'os.remove', (['drzpsfname'], {}), '(drzpsfname)\n', (37748, 37760), False, 'import os\n'), ((3609, 3621), 'numpy.copy', 'np.copy', (['psf'], {}), '(psf)\n', (3616, 3621), True, 'import numpy as np\n'), ((3873, 3896), 'numpy.where', 'np.where', (['(img == np.nan)'], {}), '(img == np.nan)\n', (3881, 3896), True, 'import numpy as np\n'), ((5924, 5939), 'numpy.fft.ifft2', 'np.fft.ifft2', (['M'], {}), '(M)\n', (5936, 5939), True, 'import numpy as np\n'), ((12996, 13021), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (13011, 13021), False, 'import os\n'), ((13931, 13953), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (13947, 13953), False, 'import os\n'), ((20128, 20149), 'numpy.isnan', 'np.isnan', (['data_cutout'], {}), '(data_cutout)\n', (20136, 20149), True, 'import numpy as np\n'), ((36261, 36304), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'data': 'decdrz', 'header': 'drzhdr'}), '(data=decdrz, header=drzhdr)\n', (36276, 36304), True, 'from astropy.io import fits as fits\n'), ((37281, 37306), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'data': 'drz'}), '(data=drz)\n', (37296, 37306), True, 'from astropy.io import fits as fits\n'), ((5726, 5735), 'numpy.abs', 'np.abs', (['P'], {}), '(P)\n', (5732, 5735), True, 'import numpy as np\n'), ((30196, 30217), 'numpy.isnan', 'np.isnan', (['props.dx_hx'], {}), '(props.dx_hx)\n', (30204, 30217), True, 'import numpy as np\n'), ((30227, 30248), 'numpy.isnan', 'np.isnan', (['props.dy_hy'], {}), '(props.dy_hy)\n', (30235, 30248), True, 'import numpy as np\n'), ((31170, 31188), 'numpy.argsort', 'np.argsort', (['fluxes'], {}), '(fluxes)\n', (31180, 31188), True, 'import numpy as np\n')] |
import logging
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
logger = logging.getLogger()
from activeClassifier.visualisation.base import Visualiser, visualisation_level
from activeClassifier.tools.utility import softmax
# annoying UserWarning from plt.imshow in _glimpse_patches_until_t()
import warnings
warnings.filterwarnings(
action='ignore',
category=UserWarning,
module=r'.*matplotlib'
)
class Visualization_predRSSM(Visualiser):
def __init__(self, model, FLAGS):
super().__init__(model, FLAGS)
self.num_policies = model.n_policies
self.size_z = FLAGS.size_z
self.planner = FLAGS.planner
self.use_pixel_obs_FE = FLAGS.use_pixel_obs_FE
self.rnd_first_glimpse = FLAGS.rnd_first_glimpse
self.rnn_cell = FLAGS.rnn_cell
@visualisation_level(1)
def visualise(self, d, suffix='', nr_obs_overview=8, nr_obs_reconstr=5):
nr_obs_overview = min(nr_obs_overview, self.batch_size_eff) # batch_size_eff is set in _eval_feed() -> has to come before
nr_obs_reconstr = min(nr_obs_reconstr, self.batch_size_eff)
nr_obs_FE = min(3, self.batch_size_eff)
self.plot_overview(d, nr_obs_overview, suffix)
self.plot_reconstr(d, nr_obs_reconstr, suffix)
self.plot_reconstr_patches(d, nr_obs_reconstr, suffix)
# moved to batch-wise:
# self.plot_stateBelieves(d, suffix)
# self.plot_fb(d, prefix)
if (self.planner == 'ActInf') & (d['epoch'] >= self.pre_train_epochs):
# self.plot_planning(d, nr_examples=nr_obs_reconstr)
self.plot_planning_patches(d, nr_examples=nr_obs_reconstr)
self.plot_FE(d, nr_obs_FE, suffix)
@visualisation_level(1)
def intermed_plots(self, d, nr_examples, suffix='', folder_name='rnd_loc_eval'):
self.plot_planning_patches(d, nr_examples, suffix, folder_name)
@visualisation_level(1)
def plot_reconstr(self, d, nr_examples, suffix='', folder_name='reconstr'):
def get_title_color(post_believes, hyp):
if post_believes[hyp] == post_believes.max():
color = 'magenta'
elif post_believes[hyp] > 0.1:
color = 'blue'
else:
color = 'black'
return color
nax = 2 + self.num_classes_kn
gl = self._glimpse_reshp(d['glimpse']) # [T, B, scale[0], scales*scale[0]]
gl_post = self._glimpse_reshp(d['reconstr_posterior']) # [T, B, scale[0], scales*scale[0]]
gl_preds = self._glimpse_reshp(d['reconstr_prior']) # [T, B, hyp, scale[0], scales*scale[0]]
idx_examples = self._get_idx_examples(d['y'], nr_examples, replace=False)
for i in idx_examples:
f, axes = plt.subplots(self.num_glimpses + 1, nax, figsize=(4 * self.num_scales * nax, 4 * (self.num_glimpses + 1)))
axes = axes.reshape([self.num_glimpses + 1, nax])
self._plot_img_plus_locs(axes[0, 0], d['x'][i], d['y'][i], d['clf'][i], d['locs'][:, i, :], d['decisions'][:, i])
for t in range(self.num_glimpses):
# true glimpse
axes[t+1, 0].imshow(gl[t, i], **self.im_show_kwargs)
title = 'Label: {}, clf: {}'.format(self.lbl_map[d['y'][i]], self.lbl_map[d['clf'][i]])
if self.uk_label is not None:
title += ', p(uk) post: {:.2f}'.format(d['uk_belief'][t + 1, i])
axes[t+1, 0].set_title(title)
# posterior
axes[t+1, 1].imshow(gl_post[t, i], **self.im_show_kwargs)
axes[t+1, 1].set_title('Posterior, nll: {:.2f}'.format(d['nll_posterior'][t, i]))
# prior for all classes
ranked_losses = np.argsort(d['KLdivs'][t, i, :])
ps = softmax(-d['KLdivs'][t, i, :])
for j, hyp in enumerate(ranked_losses):
axes[t+1, j+2].imshow(gl_preds[t, i, hyp], **self.im_show_kwargs)
if d['decisions'][t, i] != -1:
axes[t+1, j + 2].set_title('Decision: {}'.format(d['decisions'][t, i]))
else:
c = get_title_color(d['state_believes'][t+1, i, :], hyp)
axes[t+1, j + 2].set_title('{}, p: {:.2f}, KL: {:.2f}, post-c: {:.2f}'.format(self.lbl_map[hyp], ps[hyp], d['KLdivs'][t, i, hyp], d['state_believes'][t+1, i, hyp]), color=c)
[ax.set_axis_off() for ax in axes.ravel()]
self._save_fig(f, folder_name, '{}{}_n{}{isuk}.png'.format(self.prefix, suffix, i,
isuk='_uk' if (d['y'][i] == self.uk_label) else ''))
@visualisation_level(1)
def plot_reconstr_patches(self, d, nr_examples, suffix='', folder_name='reconstr_patches'):
def get_title_color(post_believes, hyp):
if post_believes[hyp] == post_believes.max():
color = 'magenta'
elif post_believes[hyp] > 0.1:
color = 'blue'
else:
color = 'black'
return color
nax = 2 + self.num_classes_kn
gl = self._glimpse_reshp(d['glimpse']) # [T, B, scale[0], scales*scale[0]]
gl_post = self._glimpse_reshp(d['reconstr_posterior']) # [T, B, scale[0], scales*scale[0]]
gl_preds = self._glimpse_reshp(d['reconstr_prior']) # [T, B, hyp, scale[0], scales*scale[0]]
idx_examples = self._get_idx_examples(d['y'], nr_examples, replace=False)
for i in idx_examples:
f, axes = plt.subplots(self.num_glimpses, nax, figsize=(4 * self.num_scales * nax, 4 * (self.num_glimpses + 1)))
axes = axes.reshape([self.num_glimpses, nax])
self._plot_img_plus_locs(axes[0, 0], d['x'][i], d['y'][i], d['clf'][i], d['locs'][:, i, :], d['decisions'][:, i])
# rank hypotheses by final believes
T = np.argmax(d['decisions'][:, i]) # all non-decisions are -1
ranked_hyp = np.argsort(-d['state_believes'][T, i, :])
for t in range(self.num_glimpses - 1):
# true glimpses up until and including t
self._plot_seen(d['x'][i], d['locs'][:, i], until_t=min(t + 1, self.num_glimpses), ax=axes[t + 1, 0])
title = 'Label: {}, clf: {}'.format(self.lbl_map[d['y'][i]], self.lbl_map[d['clf'][i]])
if self.uk_label is not None:
title += ', p(uk) post: {:.2f}'.format(d['uk_belief'][t + 1, i])
axes[t + 1, 0].set_title(title)
# posterior
self._glimpse_patches_until_t(t+1, gl[:, i], gl_post[:, i], d['locs'][:, i], axes[t + 1, 1])
axes[t + 1, 1].set_title('Posterior, nll: {:.2f}'.format(d['nll_posterior'][t, i]))
# prior for all classes
ranks_overall = np.argsort(-d['state_believes'][t, i, :]).tolist()
ranks_kl = np.argsort(d['KLdivs'][t, i, :]).tolist()
ps_kl = softmax(-d['KLdivs'][t, i, :])
for j, hyp in enumerate(ranked_hyp):
self._glimpse_patches_until_t(min(t + 1, self.num_glimpses), gl[:, i], gl_preds[:, i, hyp], d['locs'][:, i], axes[t + 1, j + 2])
if d['decisions'][t, i] != -1:
axes[t + 1, j + 2].set_title('Decision: {}'.format(d['decisions'][t, i]))
else:
c = get_title_color(d['state_believes'][min(t + 1, self.num_glimpses), i, :], hyp)
axes[t + 1, j + 2].set_title('{}: tot. rank pre: {}, kl rank: {}\nsftmx(KL): {:.2f}, KL: {:.2f}, post-c: {:.2f}'.format(
self.lbl_map[hyp], ranks_overall.index(hyp), ranks_kl.index(hyp),
ps_kl[hyp], d['KLdivs'][t, i, hyp], d['state_believes'][t + 1, i, hyp]),
color=c)
[(ax.set_xticks([]), ax.set_yticks([]), ax.set_ylim([self.img_shape[0] - 1, 0]), ax.set_xlim([0, self.img_shape[1] - 1])) for ax in axes.ravel()]
[ax.set_axis_off() for ax in axes[0].ravel()]
self._save_fig(f, folder_name, '{}{}_n{}{isuk}.png'.format(self.prefix, suffix, i,
isuk='_uk' if (d['y'][i] == self.uk_label) else ''))
def _stick_glimpse_onto_canvas(self, glimpse, loc):
img_y, img_x = self.img_shape[:2]
loc_y, loc_x = loc
half_width = self.scale_sizes[0] / 2
assert len(self.scale_sizes) == 1, 'Not adjusted for multiple scales yet'
# Adjust glimpse if going over the edge
y_overlap_left = -int(min(round(loc_y - half_width), 0))
y_overlap_right = int(img_y - round(loc_y + half_width)) if ((round(loc_y + half_width) - img_y) > 0) else None
x_overlap_left = -int(min(round(loc_x - half_width), 0))
x_overlap_right = int(img_x - round(loc_x + half_width)) if ((round(loc_x + half_width) - img_x) > 0) else None
glimpse = glimpse[y_overlap_left : y_overlap_right,
x_overlap_left : x_overlap_right]
# Boundaries of the glimpse
x_boundry_left = int(max(round(loc_x - half_width), 0))
x_boundry_right = int(min(round(loc_x + half_width), img_x))
y_boundry_left = int(max(round(loc_y - half_width), 0))
y_boundry_right = int(min(round(loc_y + half_width), img_y))
# Pad up to canvas size
if self.img_shape[2] == 1:
glimpse_padded = np.pad(glimpse, [(y_boundry_left, img_y - y_boundry_right),
(x_boundry_left, img_x - x_boundry_right)],
mode='constant')
else:
glimpse_padded = np.pad(glimpse, [(y_boundry_left, img_y - y_boundry_right),
(x_boundry_left, img_x - x_boundry_right),
(0, 0)],
mode='constant')
assert glimpse_padded.shape == tuple(self.img_shape_squeezed)
return glimpse_padded
def _glimpse_patches_until_t(self, until_t, true_glimpses, glimpses, locs, ax):
"""Plot the true_glimpses[:until_t - 2] & glimpses[until_t - 1] onto a canvas of shape img_shape, with the latest glimpses overlapping older ones (important for predictions)"""
ix, iy = np.meshgrid(np.arange(self.img_shape[0]), np.arange(self.img_shape[1]))
half_width = self.scale_sizes[0] / 2
seen = np.zeros(self.img_shape[:2], np.bool)
glimpse_padded = np.zeros(self.img_shape_squeezed)
for t in range(until_t):
loc = locs[t, :]
y_boundry = [loc[0] - half_width, loc[0] + half_width]
x_boundry = [loc[1] - half_width, loc[1] + half_width]
new = (ix >= round(x_boundry[0])) & (ix < round(x_boundry[1])) & (iy >= round(y_boundry[0])) & (iy < round(y_boundry[1]))
seen[new] = True
input = glimpses if (t == until_t - 1) else true_glimpses
new_glimpse_padded = self._stick_glimpse_onto_canvas(input[t], locs[t])
glimpse_padded = np.where(new, new_glimpse_padded, glimpse_padded)
glimpse_padded_seen = self._mask_unseen(glimpse_padded, seen)
ax.imshow(glimpse_padded_seen, **self.im_show_kwargs)
half_pixel = 0.5 if (self.scale_sizes[0] % 2 == 0) else 0 # glimpses are rounded to pixel values do the same for the rectangle to make it fit nicely
ax.add_patch(Rectangle(np.round(locs[until_t - 1, ::-1] - half_width) - half_pixel, width=self.scale_sizes[0], height=self.scale_sizes[0], edgecolor='green', facecolor='none'))
# @visualisation_level(2)
# def plot_planning(self, d, nr_examples, suffix='', folder_name='planning'):
# # T x [True glimpse, exp_exp_obs, exp_obs...]
# nax_x = self.num_policies
# nax_y = 1 + self.num_glimpses
#
# # exp_exp_obs = self._scale_reshp(d['exp_exp_obs']) # [T, B, n_policies, scale[0], scales*scale[0]]
# # exp_obs = self._scale_reshp(d['exp_obs']) # [T, B, n_policies, num_classes, scale[0], scales*scale[0]]
#
# for i in range(nr_examples):
# f, axes = plt.subplots(nax_y, nax_x, figsize=(4 * self.num_scales * nax_x, 4 * nax_y))
# axes = axes.reshape([nax_y, nax_x])
# self._plot_img_plus_locs(axes[0, 0], d['x'][i], d['y'][i], d['clf'][i], d['locs'][:, i, :], d['decisions'][:, i])
#
# # Note: first action is random, meaning d['potential_actions'][0] will be zero
# for t in range(self.num_glimpses):
# for k in range(self.num_policies):
# # potential location under evaluation
# locs = d['potential_actions'][t, i, k]
# color = 'green' if (locs == d['locs'][t, i, :]).all() else 'cyan'
#
# axes[t, k].imshow(d['x'][i].reshape(self.img_shape_squeezed), **self.im_show_kwargs)
# axes[t, k].scatter(locs[1], locs[0], marker='x', facecolors=color, linewidth=2.5, s=0.25 * (5 * 8 * 24))
# axes[t, k].add_patch(Rectangle(locs[::-1] - self.scale_sizes[0] / 2, width=self.scale_sizes[0], height=self.scale_sizes[0], edgecolor=color, facecolor='none', linewidth=2.5))
# axes[t, k].set_title('G: {:.2f}, H_: {:.2f}, exp_H: {:.2f}, G_dec: {:.2f}'.format(d['G'][t, i, k], d['H_exp_exp_obs'][t, i, k], d['exp_H'][t, i, k], d['G'][t, i, -1]))
#
# # ranked_hyp = np.argsort(d['state_believes'][t, i, :])
# # for j, hyp in enumerate(ranked_hyp[::-1]):
# # # axes[t, j + 2].imshow(exp_obs[t, i, k, hyp], **self.im_show_kwargs)
# # axes[t, j + 2].set_title('Hyp: {}, prob: {:.2f}'.format(hyp, d['state_believes'][t, i, hyp]))
#
# [ax.set_axis_off() for ax in axes.ravel()]
# self._save_fig(f, folder_name, '{}{}_n{}.png'.format(self.prefix, suffix, i))
@visualisation_level(2)
def plot_planning(self, d, nr_examples, suffix='', folder_name='planning'):
# T x [True glimpse, exp_exp_obs, exp_obs...]
nax_x = nr_examples
nax_y = self.num_glimpses
f, axes = plt.subplots(nax_y, nax_x, figsize=(8 * self.num_scales * nax_x, 4 * nax_y), squeeze=False)
for i in range(nr_examples):
# Note: first action is random, meaning d['potential_actions'][0] will be zero
for t in range(self.num_glimpses):
if t == 0: # random action
self._plot_img_plus_locs(axes[0, i], d['x'][i], d['y'][i], d['clf'][i], d['locs'][:, i, :], d['decisions'][:, i])
axes[t, i].set_title('t: {}, random policy, lbl: {}, clf: {}'.format(t, d['y'][i], d['clf'][i]))
else:
if np.sum(d['H_exp_exp_obs'][t, i, :]) == 0.:
axes[t, i].set_title('t: {}, decision - no glimpse'.format(t))
break
axes[t, i].imshow(d['x'][i].reshape(self.img_shape_squeezed), **self.im_show_kwargs)
axes[t, i].set_title('t: {}, selected policy: {}'.format(t, np.argmax(d['G'][t, i, :])))
for k in range(self.num_policies):
# potential location under evaluation
locs = d['potential_actions'][t, i, k]
color = 'C{}'.format(k)
correct = np.all((locs == d['locs'][t, i, :]))
lbl = '{}: G: {:.2f}, H_: {:.2f}, exp_H: {:.2f}, G_dec: {:.2f}'.format(k, d['G'][t, i, k], d['H_exp_exp_obs'][t, i, k], d['exp_H'][t, i, k], d['G'][t, i, -1])
axes[t, i].add_patch(Rectangle(locs[::-1] - self.scale_sizes[0] / 2,
width=self.scale_sizes[0], height=self.scale_sizes[0],
edgecolor=color, facecolor='none', linewidth=1.5, label=lbl))
if correct:
axes[t, i].scatter(locs[1], locs[0], marker='x', facecolors=color, linewidth=1.5, s=0.25 * (5 * 8 * 24))
# add current believes to legend
ranked_believes = np.argsort(- d['state_believes'][t, i, :])
lbl = 'hyp: ' + ', '.join('{} ({:.2f})'.format(j, d['state_believes'][t, i, j]) for j in ranked_believes[:5])
axes[t, i].scatter(0, 0, marker='x', facecolors='k', linewidth=0, s=0, label=lbl)
chartBox = axes[t, i].get_position()
axes[t, i].set_position([chartBox.x0, chartBox.y0, chartBox.width * 0.6, chartBox.height])
axes[t, i].legend(loc='center left', bbox_to_anchor=(1.04, 0.5), borderaxespad=0)
[ax.set_axis_off() for ax in axes.ravel()]
self._save_fig(f, folder_name, '{}{}.png'.format(self.prefix, suffix))
@visualisation_level(2)
def plot_planning_patches(self, d, nr_examples, suffix='', folder_name='planning_patches'):
nax_x = nr_examples
nax_y = self.num_glimpses if self.rnd_first_glimpse else self.num_glimpses + 1
f, axes = plt.subplots(nax_y, nax_x, figsize=(8 * self.num_scales * nax_x, 4 * nax_y), squeeze=False)
frames_cmap = matplotlib.cm.get_cmap('bwr')
frames_color = frames_cmap(np.linspace(1, 0, self.num_policies))
for i in range(nr_examples):
# if first glimpse is random, plot overview in its spot. O/w create an additional plot
self._plot_img_plus_locs(axes[0, i], d['x'][i], d['y'][i], d['clf'][i], d['locs'][:, i, :], d['decisions'][:, i])
if self.rnd_first_glimpse:
start_t = 1
axes[0, i].set_title('t: {}, random policy, lbl: {}, clf: {}'.format(0, d['y'][i], d['clf'][i]))
else:
start_t = 0
axes[0, i].set_title('Lbl: {}, clf: {}'.format(d['y'][i], d['clf'][i]))
for ax, t in enumerate(range(start_t, self.num_glimpses)):
ax += 1
# plot patches seen until now
self._plot_seen(d['x'][i], d['locs'][:, i], until_t=t, ax=axes[ax, i])
# add current believes to legend
ranked_believes = np.argsort(- d['state_believes'][t, i, :])
lbl = 'hyp: ' + ', '.join('{} ({:.2f})'.format(j, d['state_believes'][t, i, j]) for j in ranked_believes[:5])
axes[ax, i].add_patch(Rectangle((0, 0), width=0.1, height=0.1, linewidth=0, color='white', label=lbl))
decided = (d['decisions'][:t+1, i] != -1).any()
if decided:
axes[ax, i].set_title('t: {}, decision - no new glimpse'.format(t))
else:
selected = [j for j, arr in enumerate(d['potential_actions'][t, i, :]) if (arr == d['locs'][t, i]).all()]
axes[ax, i].set_title('t: {}, selected policy: {}'.format(t, selected[0]))
# plot rectangles for evaluated next locations
ranked_policies = np.argsort(- d['G'][t, i, :-1])
for iii, k in enumerate(ranked_policies):
# potential location under evaluation
locs = d['potential_actions'][t, i, k]
correct = np.all((locs == d['locs'][t, i, :]))
lbl = '{}: G: {:.2f}, H(exp): {:.2f}, E(H): {:.2f}, G_dec: {:.2f}'.format(k, d['G'][t, i, k], d['H_exp_exp_obs'][t, i, k], d['exp_H'][t, i, k], d['G'][t, i, -1])
axes[ax, i].add_patch(Rectangle(locs[::-1] - self.scale_sizes[0] / 2,
width=self.scale_sizes[0], height=self.scale_sizes[0],
edgecolor=frames_color[iii], facecolor='none', linewidth=1.5, label=lbl))
if correct:
axes[ax, i].scatter(locs[1], locs[0], marker='x', facecolors=frames_color[iii], linewidth=1.5, s=0.25 * (5 * 8 * 24))
# place legend next to plot
chartBox = axes[ax, i].get_position()
axes[ax, i].set_position([chartBox.x0, chartBox.y0, chartBox.width * 0.6, chartBox.height])
axes[ax, i].legend(loc='center left', bbox_to_anchor=(1.04, 0.5), borderaxespad=0)
if decided: # set all following axes off and stop
[axes[ttt, i].set_axis_off() for ttt in range(ax+1, nax_y)]
break
[(ax.set_xticks([]), ax.set_yticks([]), ax.set_ylim([self.img_shape[0] - 1, 0]), ax.set_xlim([0, self.img_shape[1] - 1])) for ax in axes.ravel()]
self._save_fig(f, folder_name, '{}{}.png'.format(self.prefix, suffix))
@visualisation_level(2)
def plot_FE(self, d, nr_examples, suffix='', folder_name='FE'):
if self.rnn_cell.startswith('Conv') and not self.use_pixel_obs_FE:
logging.debug('Skip FE plots for convLSTM. Shapes for z not defined')
# TODO: adjust size_z to not come from FLAGS but from VAEEncoder.output_shape_flat
return
# T x [True glimpse, posterior, exp_exp_obs, exp_obs...]
nax_x = 3 + self.num_classes_kn
nax_y = self.num_glimpses
gl = self._glimpse_reshp(d['glimpse']) # [T, B, scale[0], scales*scale[0]]
if self.use_pixel_obs_FE:
posterior = self._glimpse_reshp(d['reconstr_posterior'])
exp_exp_obs = self._glimpse_reshp(d['exp_exp_obs'])
exp_obs_prior = self._glimpse_reshp(d['reconstr_prior'])
else:
if self.size_z == 10:
shp = [5, 2]
elif self.size_z == 32:
shp = [8, 4]
elif self.size_z == 128:
shp = [16, 8]
else:
shp = 2 * [int(np.sqrt(self.size_z))]
if np.prod(shp) != self.size_z:
print('Unspecified shape for this size_z and plot_z. Skipping z plots.')
return
posterior = np.reshape(d['z_post'], [self.num_glimpses, self.batch_size_eff] + shp)
exp_exp_obs = np.reshape(d['exp_exp_obs'], [self.num_glimpses, self.batch_size_eff, self.num_policies] + shp)
exp_obs_prior = np.reshape(d['selected_exp_obs_enc'], [self.num_glimpses, self.batch_size_eff, self.num_classes_kn] + shp)
for i in range(nr_examples):
f, axes = plt.subplots(nax_y, nax_x, figsize=(4 * self.num_scales * nax_x, 4 * nax_y), squeeze=False)
for t in range(self.num_glimpses):
if t == 0:
self._plot_img_plus_locs(axes[t, 0], d['x'][i], d['y'][i], d['clf'][i], d['locs'][:, i, :], d['decisions'][:, i])
else:
axes[t, 0].imshow(gl[t, i], **self.im_show_kwargs)
axes[t, 0].set_title('t: {}'.format(t))
axes[t, 1].imshow(posterior[t, i], **self.im_show_kwargs)
axes[t, 1].set_title('posterior')
p = d['selected_action_idx'][t, i]
axes[t, 2].imshow(exp_exp_obs[t, i, p], **self.im_show_kwargs)
axes[t, 2].set_title('H(exp) policy0: {:.2f}'.format(d['H_exp_exp_obs'][t, i, p]))
ranked_believes = np.argsort(- d['state_believes'][t, i, :])
for k in ranked_believes:
axes[t, 3 + k].imshow(exp_obs_prior[t, i, k], **self.im_show_kwargs)
axes[t, 3 + k].set_title('k: {}, p: {:.2f}'.format(k, d['state_believes'][t, i, k]))
[ax.set_axis_off() for ax in axes.ravel()]
self._save_fig(f, folder_name, '{}{}_n{}.png'.format(self.prefix, suffix, i))
@visualisation_level(2)
def plot_fb(self, d, suffix=''):
def fb_hist(fb1, fb2, ax, title, add_legend):
"""fb1, fb2: tuple of (values, legend)"""
ax.hist(fb1[0], bins, alpha=0.5, label=fb1[1])
ax.hist(fb2[0], bins, alpha=0.5, label=fb2[1])
ax.set_title(title)
if add_legend:
ax.legend(loc='upper right')
nax = self.num_classes
ntax = self.num_glimpses - 1
bins = 40
f, axes = plt.subplots(ntax, nax, figsize=(4 * nax, 4 * self.num_glimpses))
if self.uk_label is not None:
is_uk = (d['y'] == self.uk_label)
fb_kn_best = d['fb'][:, ~is_uk, :].min(axis=2) # in-shape: [T, B, hyp]
fb_uk_best = d['fb'][:, is_uk, :].min(axis=2)
else:
fb_kn_best, fb_uk_best = None, None
for t in range(ntax):
for hyp in range(self.num_classes_kn):
is_hyp = (d['y'] == hyp)
if t < self.num_glimpses:
pre = 't{}: '.format(t) if (hyp == 0) else ''
fb_corr = d['fb'][t, is_hyp, hyp]
fb_wrong = d['fb'][t, ~is_hyp, hyp]
else: # last row: sum over time
break
# pre = 'All t: ' if (hyp == 0) else ''
# fb_corr = d['fb'][:, is_hyp, hyp].sum(axis=0)
# fb_wrong = d['fb'][:, ~is_hyp, hyp].sum(axis=0)
fb_hist((fb_corr, 'correct hyp'),
(fb_wrong, 'wrong hyp'),
axes[t, hyp], '{}hyp: {}'.format(pre, self.lbl_map[hyp]), add_legend=(t==0))
if self.uk_label is not None:
# right most: best fb across hyp for kn vs uk
if t < self.num_glimpses:
fb_kn = fb_kn_best[t]
fb_uk = fb_uk_best[t]
else:
fb_kn = fb_kn_best.sum(axis=0)
fb_uk = fb_uk_best.sum(axis=0)
fb_hist((fb_kn, 'known'),
(fb_uk, 'uk'),
axes[t, nax - 1], 'best fb', add_legend=(t==0))
self._save_fig(f, 'fb', '{}{}.png'.format(self.prefix, suffix))
@visualisation_level(2)
def plot_stateBelieves(self, d, suffix):
# TODO: INCLUDE uk_belief and plots differentiating by known/uk
ntax = self.num_glimpses
bins = 40
f, axes = plt.subplots(ntax, 1, figsize=(4, 4 * self.num_glimpses), squeeze=False)
top_believes = d['state_believes'].max(axis=2) # [T+1, B, num_classes] -> [T+1, B]
top_believes_class = d['state_believes'].argmax(axis=2) # [T+1, B, num_classes] -> [T+1, B]
is_corr = (top_believes_class == d['y'][np.newaxis, :])
corr = np.ma.masked_array(top_believes, mask=~is_corr)
wrong = np.ma.masked_array(top_believes, mask=is_corr)
for t in range(ntax):
if corr[t+1].mask.any():
axes[t, 0].hist(corr[t+1].compressed(), bins=bins, alpha=0.5, label='corr')
if wrong[t+1].mask.any():
axes[t, 0].hist(wrong[t+1].compressed(), bins=bins, alpha=0.5, label='wrong')
axes[t, 0].legend(loc='upper right')
axes[t, 0].set_title('Top believes after glimpse {}'.format(t+1))
axes[t, 0].set_xlim([0, 1])
self._save_fig(f, 'c', '{}{}.png'.format(self.prefix, suffix))
| [
"numpy.sum",
"matplotlib.cm.get_cmap",
"numpy.argmax",
"numpy.argsort",
"numpy.arange",
"numpy.ma.masked_array",
"numpy.round",
"numpy.prod",
"numpy.pad",
"matplotlib.patches.Rectangle",
"numpy.reshape",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"activeClassifier.visualisation.base.v... | [((52, 73), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (66, 73), False, 'import matplotlib\n'), ((162, 181), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (179, 181), False, 'import logging\n'), ((400, 490), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'category': 'UserWarning', 'module': '""".*matplotlib"""'}), "(action='ignore', category=UserWarning, module=\n '.*matplotlib')\n", (423, 490), False, 'import warnings\n'), ((897, 919), 'activeClassifier.visualisation.base.visualisation_level', 'visualisation_level', (['(1)'], {}), '(1)\n', (916, 919), False, 'from activeClassifier.visualisation.base import Visualiser, visualisation_level\n'), ((1797, 1819), 'activeClassifier.visualisation.base.visualisation_level', 'visualisation_level', (['(1)'], {}), '(1)\n', (1816, 1819), False, 'from activeClassifier.visualisation.base import Visualiser, visualisation_level\n'), ((1983, 2005), 'activeClassifier.visualisation.base.visualisation_level', 'visualisation_level', (['(1)'], {}), '(1)\n', (2002, 2005), False, 'from activeClassifier.visualisation.base import Visualiser, visualisation_level\n'), ((4798, 4820), 'activeClassifier.visualisation.base.visualisation_level', 'visualisation_level', (['(1)'], {}), '(1)\n', (4817, 4820), False, 'from activeClassifier.visualisation.base import Visualiser, visualisation_level\n'), ((14283, 14305), 'activeClassifier.visualisation.base.visualisation_level', 'visualisation_level', (['(2)'], {}), '(2)\n', (14302, 14305), False, 'from activeClassifier.visualisation.base import Visualiser, visualisation_level\n'), ((17258, 17280), 'activeClassifier.visualisation.base.visualisation_level', 'visualisation_level', (['(2)'], {}), '(2)\n', (17277, 17280), False, 'from activeClassifier.visualisation.base import Visualiser, visualisation_level\n'), ((21157, 21179), 'activeClassifier.visualisation.base.visualisation_level', 'visualisation_level', (['(2)'], {}), '(2)\n', (21176, 21179), False, 'from activeClassifier.visualisation.base import Visualiser, visualisation_level\n'), ((24153, 24175), 'activeClassifier.visualisation.base.visualisation_level', 'visualisation_level', (['(2)'], {}), '(2)\n', (24172, 24175), False, 'from activeClassifier.visualisation.base import Visualiser, visualisation_level\n'), ((26407, 26429), 'activeClassifier.visualisation.base.visualisation_level', 'visualisation_level', (['(2)'], {}), '(2)\n', (26426, 26429), False, 'from activeClassifier.visualisation.base import Visualiser, visualisation_level\n'), ((10745, 10782), 'numpy.zeros', 'np.zeros', (['self.img_shape[:2]', 'np.bool'], {}), '(self.img_shape[:2], np.bool)\n', (10753, 10782), True, 'import numpy as np\n'), ((10808, 10841), 'numpy.zeros', 'np.zeros', (['self.img_shape_squeezed'], {}), '(self.img_shape_squeezed)\n', (10816, 10841), True, 'import numpy as np\n'), ((14521, 14616), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nax_y', 'nax_x'], {'figsize': '(8 * self.num_scales * nax_x, 4 * nax_y)', 'squeeze': '(False)'}), '(nax_y, nax_x, figsize=(8 * self.num_scales * nax_x, 4 * nax_y),\n squeeze=False)\n', (14533, 14616), True, 'from matplotlib import pyplot as plt\n'), ((17511, 17606), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nax_y', 'nax_x'], {'figsize': '(8 * self.num_scales * nax_x, 4 * nax_y)', 'squeeze': '(False)'}), '(nax_y, nax_x, figsize=(8 * self.num_scales * nax_x, 4 * nax_y),\n squeeze=False)\n', (17523, 17606), True, 'from matplotlib import pyplot as plt\n'), ((17625, 17654), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""bwr"""'], {}), "('bwr')\n", (17647, 17654), False, 'import matplotlib\n'), ((24649, 24714), 'matplotlib.pyplot.subplots', 'plt.subplots', (['ntax', 'nax'], {'figsize': '(4 * nax, 4 * self.num_glimpses)'}), '(ntax, nax, figsize=(4 * nax, 4 * self.num_glimpses))\n', (24661, 24714), True, 'from matplotlib import pyplot as plt\n'), ((26617, 26689), 'matplotlib.pyplot.subplots', 'plt.subplots', (['ntax', '(1)'], {'figsize': '(4, 4 * self.num_glimpses)', 'squeeze': '(False)'}), '(ntax, 1, figsize=(4, 4 * self.num_glimpses), squeeze=False)\n', (26629, 26689), True, 'from matplotlib import pyplot as plt\n'), ((26963, 27010), 'numpy.ma.masked_array', 'np.ma.masked_array', (['top_believes'], {'mask': '(~is_corr)'}), '(top_believes, mask=~is_corr)\n', (26981, 27010), True, 'import numpy as np\n'), ((27027, 27073), 'numpy.ma.masked_array', 'np.ma.masked_array', (['top_believes'], {'mask': 'is_corr'}), '(top_believes, mask=is_corr)\n', (27045, 27073), True, 'import numpy as np\n'), ((2839, 2949), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(self.num_glimpses + 1)', 'nax'], {'figsize': '(4 * self.num_scales * nax, 4 * (self.num_glimpses + 1))'}), '(self.num_glimpses + 1, nax, figsize=(4 * self.num_scales * nax,\n 4 * (self.num_glimpses + 1)))\n', (2851, 2949), True, 'from matplotlib import pyplot as plt\n'), ((5670, 5776), 'matplotlib.pyplot.subplots', 'plt.subplots', (['self.num_glimpses', 'nax'], {'figsize': '(4 * self.num_scales * nax, 4 * (self.num_glimpses + 1))'}), '(self.num_glimpses, nax, figsize=(4 * self.num_scales * nax, 4 *\n (self.num_glimpses + 1)))\n', (5682, 5776), True, 'from matplotlib import pyplot as plt\n'), ((6022, 6053), 'numpy.argmax', 'np.argmax', (["d['decisions'][:, i]"], {}), "(d['decisions'][:, i])\n", (6031, 6053), True, 'import numpy as np\n'), ((6107, 6148), 'numpy.argsort', 'np.argsort', (["(-d['state_believes'][T, i, :])"], {}), "(-d['state_believes'][T, i, :])\n", (6117, 6148), True, 'import numpy as np\n'), ((9723, 9847), 'numpy.pad', 'np.pad', (['glimpse', '[(y_boundry_left, img_y - y_boundry_right), (x_boundry_left, img_x -\n x_boundry_right)]'], {'mode': '"""constant"""'}), "(glimpse, [(y_boundry_left, img_y - y_boundry_right), (x_boundry_left,\n img_x - x_boundry_right)], mode='constant')\n", (9729, 9847), True, 'import numpy as np\n'), ((9969, 10101), 'numpy.pad', 'np.pad', (['glimpse', '[(y_boundry_left, img_y - y_boundry_right), (x_boundry_left, img_x -\n x_boundry_right), (0, 0)]'], {'mode': '"""constant"""'}), "(glimpse, [(y_boundry_left, img_y - y_boundry_right), (x_boundry_left,\n img_x - x_boundry_right), (0, 0)], mode='constant')\n", (9975, 10101), True, 'import numpy as np\n'), ((10625, 10653), 'numpy.arange', 'np.arange', (['self.img_shape[0]'], {}), '(self.img_shape[0])\n', (10634, 10653), True, 'import numpy as np\n'), ((10655, 10683), 'numpy.arange', 'np.arange', (['self.img_shape[1]'], {}), '(self.img_shape[1])\n', (10664, 10683), True, 'import numpy as np\n'), ((11386, 11435), 'numpy.where', 'np.where', (['new', 'new_glimpse_padded', 'glimpse_padded'], {}), '(new, new_glimpse_padded, glimpse_padded)\n', (11394, 11435), True, 'import numpy as np\n'), ((17690, 17726), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', 'self.num_policies'], {}), '(1, 0, self.num_policies)\n', (17701, 17726), True, 'import numpy as np\n'), ((21335, 21404), 'logging.debug', 'logging.debug', (['"""Skip FE plots for convLSTM. Shapes for z not defined"""'], {}), "('Skip FE plots for convLSTM. Shapes for z not defined')\n", (21348, 21404), False, 'import logging\n'), ((22454, 22525), 'numpy.reshape', 'np.reshape', (["d['z_post']", '([self.num_glimpses, self.batch_size_eff] + shp)'], {}), "(d['z_post'], [self.num_glimpses, self.batch_size_eff] + shp)\n", (22464, 22525), True, 'import numpy as np\n'), ((22552, 22652), 'numpy.reshape', 'np.reshape', (["d['exp_exp_obs']", '([self.num_glimpses, self.batch_size_eff, self.num_policies] + shp)'], {}), "(d['exp_exp_obs'], [self.num_glimpses, self.batch_size_eff, self.\n num_policies] + shp)\n", (22562, 22652), True, 'import numpy as np\n'), ((22676, 22787), 'numpy.reshape', 'np.reshape', (["d['selected_exp_obs_enc']", '([self.num_glimpses, self.batch_size_eff, self.num_classes_kn] + shp)'], {}), "(d['selected_exp_obs_enc'], [self.num_glimpses, self.\n batch_size_eff, self.num_classes_kn] + shp)\n", (22686, 22787), True, 'import numpy as np\n'), ((22843, 22938), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nax_y', 'nax_x'], {'figsize': '(4 * self.num_scales * nax_x, 4 * nax_y)', 'squeeze': '(False)'}), '(nax_y, nax_x, figsize=(4 * self.num_scales * nax_x, 4 * nax_y),\n squeeze=False)\n', (22855, 22938), True, 'from matplotlib import pyplot as plt\n'), ((3838, 3870), 'numpy.argsort', 'np.argsort', (["d['KLdivs'][t, i, :]"], {}), "(d['KLdivs'][t, i, :])\n", (3848, 3870), True, 'import numpy as np\n'), ((3892, 3922), 'activeClassifier.tools.utility.softmax', 'softmax', (["(-d['KLdivs'][t, i, :])"], {}), "(-d['KLdivs'][t, i, :])\n", (3899, 3922), False, 'from activeClassifier.tools.utility import softmax\n'), ((7112, 7142), 'activeClassifier.tools.utility.softmax', 'softmax', (["(-d['KLdivs'][t, i, :])"], {}), "(-d['KLdivs'][t, i, :])\n", (7119, 7142), False, 'from activeClassifier.tools.utility import softmax\n'), ((18617, 18658), 'numpy.argsort', 'np.argsort', (["(-d['state_believes'][t, i, :])"], {}), "(-d['state_believes'][t, i, :])\n", (18627, 18658), True, 'import numpy as np\n'), ((11758, 11804), 'numpy.round', 'np.round', (['(locs[until_t - 1, ::-1] - half_width)'], {}), '(locs[until_t - 1, ::-1] - half_width)\n', (11766, 11804), True, 'import numpy as np\n'), ((16574, 16615), 'numpy.argsort', 'np.argsort', (["(-d['state_believes'][t, i, :])"], {}), "(-d['state_believes'][t, i, :])\n", (16584, 16615), True, 'import numpy as np\n'), ((18825, 18904), 'matplotlib.patches.Rectangle', 'Rectangle', (['(0, 0)'], {'width': '(0.1)', 'height': '(0.1)', 'linewidth': '(0)', 'color': '"""white"""', 'label': 'lbl'}), "((0, 0), width=0.1, height=0.1, linewidth=0, color='white', label=lbl)\n", (18834, 18904), False, 'from matplotlib.patches import Rectangle\n'), ((19436, 19466), 'numpy.argsort', 'np.argsort', (["(-d['G'][t, i, :-1])"], {}), "(-d['G'][t, i, :-1])\n", (19446, 19466), True, 'import numpy as np\n'), ((23710, 23751), 'numpy.argsort', 'np.argsort', (["(-d['state_believes'][t, i, :])"], {}), "(-d['state_believes'][t, i, :])\n", (23720, 23751), True, 'import numpy as np\n'), ((6968, 7009), 'numpy.argsort', 'np.argsort', (["(-d['state_believes'][t, i, :])"], {}), "(-d['state_believes'][t, i, :])\n", (6978, 7009), True, 'import numpy as np\n'), ((7046, 7078), 'numpy.argsort', 'np.argsort', (["d['KLdivs'][t, i, :]"], {}), "(d['KLdivs'][t, i, :])\n", (7056, 7078), True, 'import numpy as np\n'), ((15128, 15163), 'numpy.sum', 'np.sum', (["d['H_exp_exp_obs'][t, i, :]"], {}), "(d['H_exp_exp_obs'][t, i, :])\n", (15134, 15163), True, 'import numpy as np\n'), ((15765, 15799), 'numpy.all', 'np.all', (["(locs == d['locs'][t, i, :])"], {}), "(locs == d['locs'][t, i, :])\n", (15771, 15799), True, 'import numpy as np\n'), ((19689, 19723), 'numpy.all', 'np.all', (["(locs == d['locs'][t, i, :])"], {}), "(locs == d['locs'][t, i, :])\n", (19695, 19723), True, 'import numpy as np\n'), ((15474, 15500), 'numpy.argmax', 'np.argmax', (["d['G'][t, i, :]"], {}), "(d['G'][t, i, :])\n", (15483, 15500), True, 'import numpy as np\n'), ((16031, 16202), 'matplotlib.patches.Rectangle', 'Rectangle', (['(locs[::-1] - self.scale_sizes[0] / 2)'], {'width': 'self.scale_sizes[0]', 'height': 'self.scale_sizes[0]', 'edgecolor': 'color', 'facecolor': '"""none"""', 'linewidth': '(1.5)', 'label': 'lbl'}), "(locs[::-1] - self.scale_sizes[0] / 2, width=self.scale_sizes[0],\n height=self.scale_sizes[0], edgecolor=color, facecolor='none',\n linewidth=1.5, label=lbl)\n", (16040, 16202), False, 'from matplotlib.patches import Rectangle\n'), ((19959, 20143), 'matplotlib.patches.Rectangle', 'Rectangle', (['(locs[::-1] - self.scale_sizes[0] / 2)'], {'width': 'self.scale_sizes[0]', 'height': 'self.scale_sizes[0]', 'edgecolor': 'frames_color[iii]', 'facecolor': '"""none"""', 'linewidth': '(1.5)', 'label': 'lbl'}), "(locs[::-1] - self.scale_sizes[0] / 2, width=self.scale_sizes[0],\n height=self.scale_sizes[0], edgecolor=frames_color[iii], facecolor=\n 'none', linewidth=1.5, label=lbl)\n", (19968, 20143), False, 'from matplotlib.patches import Rectangle\n'), ((22280, 22292), 'numpy.prod', 'np.prod', (['shp'], {}), '(shp)\n', (22287, 22292), True, 'import numpy as np\n'), ((22238, 22258), 'numpy.sqrt', 'np.sqrt', (['self.size_z'], {}), '(self.size_z)\n', (22245, 22258), True, 'import numpy as np\n')] |
# Copyright 2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This module contains various utility functions needed to perform quantum chemistry calculations with
Strawberry Fields.
"""
from typing import Tuple
import numpy as np
from scipy.constants import c, h, m_u, pi
from thewalrus import quantum
def duschinsky(
Li: np.ndarray, Lf: np.ndarray, ri: np.ndarray, rf: np.ndarray, wf: np.ndarray, m: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
r"""Generate the Duschinsky rotation matrix :math:`U` and displacement vector :math:`\delta`.
The Duschinsky transformation relates the normal coordinates of the initial and
final states in a vibronic transition, :math:`q_i` and :math:`q_f` respectively, as:
.. math:: q_f = U q_i + d,
where :math:`U` is the Duschinsky rotation matrix and :math:`d` is a vector giving the
displacement between the equilibrium structures of the two states involved in the vibronic
transition. The normal coordinates of a molecule can be represented in terms of atomic
displacements as:
.. math:: q = L^T \sqrt{m} (r -r_e),
where :math:`r_e` represents the equilibrium geometry of the molecule, :math:`m` represents
atomic masses and :math:`L` is a matrix containing the eigenvectors of the mass-weighted
Hessian. The Duschinsky parameters :math:`U` and :math:`d` can be obtained as:
.. math:: U = L_f^T L_i,
.. math:: d = L_f^T \sqrt{m} (r_e^i-r_e^f).
Note that :math:`i` and :math:`f` refer to the initial and final states, respectively. The
parameter :math:`d` is usually represented as a dimensionless parameter :math:`\delta` as:
.. math:: \delta = l^{-1} d,
where :math:`l` is a diagonal matrix containing the vibrational frequencies :math:`\omega` of
the final state:
.. math:: l_{kk} = \left ( \frac{\hbar }{2 \pi \omega_k c} \right )^{1/2},
where :math:`\hbar` is the reduced Planck constant and :math:`c` is the speed of light.
The vibrational normal mode matrix for a molecule with :math:`M` vibrational modes and
:math:`N` atoms is a :math:`3N \times M` matrix where :math:`M = 3N - 6` for nonlinear molecules and
:math:`M = 3N - 5` for linear molecules. The Duschinsky rotation matrix of a molecule is an
:math:`M \times M` matrix and the Duschinsky displacement vector has :math:`M` components.
**Example usage:**
>>> Li = np.array([[-0.28933191], [0.0], [0.0], [0.95711104], [0.0], [0.0]])
>>> Lf = np.array([[-0.28933191], [0.0], [0.0], [0.95711104], [0.0], [0.0]])
>>> ri = np.array([-0.0236, 0.0, 0.0, 1.2236, 0.0, 0.0])
>>> rf = np.array([0.0, 0.0, 0.0, 1.4397, 0.0, 0.0])
>>> wf = np.array([1363.2])
>>> m = np.array([11.0093] * 3 + [1.0078] * 3)
>>> U, delta = duschinsky(Li, Lf, ri, rf, wf, m)
>>> U, delta
(array([[0.99977449]]), array([-1.17623073]))
Args:
Li (array): mass-weighted normal modes of the initial electronic state
Lf (array): mass-weighted normal modes of the final electronic state
ri (array): equilibrium molecular geometry of the initial electronic state
rf (array): equilibrium molecular geometry of the final electronic state
wf (array): normal mode frequencies of the final electronic state in units of :math:`\mbox{cm}^{-1}`
m (array): atomic masses in unified atomic mass units
Returns:
tuple[array, array]: Duschinsky rotation matrix :math:`U`, Duschinsky displacement vector
:math:`\delta`
"""
U = Lf.T @ Li
d = Lf.T * m**0.5 @ (ri - rf)
l0_inv = np.diag((h / (wf * 100.0 * c)) ** (-0.5) * 2.0 * pi) / 1.0e10 * m_u**0.5
delta = np.array(d @ l0_inv)
return U, delta
def read_gamess(file) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
r"""Reads molecular data from the output file generated by the GAMESS quantum chemistry package
:cite:`schmidt1993general`.
This function extracts the atomic coordinates (r), atomic masses (m), vibrational frequencies
(w), and normal modes (l) of a molecule from the output file of a vibrational frequency
calculation performed with the GAMESS quantum chemistry package. The output file must contain
the results of a `RUNTYP=HESSIAN` calculation performed with GAMESS. We recommend checking the
output of this function with the GAMESS results to assure that the GAMESS output file is parsed
correctly.
**Example usage:**
>>> r, m, w, l = read_gamess('../BH_data.out')
>>> r # atomic coordinates
array([[0.0000000, 0.0000000, 0.0000000],
[1.2536039, 0.0000000, 0.0000000]])
>>> m # atomic masses
array([11.00931, 1.00782])
>>> w # vibrational frequencies
array([19.74, 19.73, 0.00, 0.00, 0.00, 2320.32])
>>> l # normal modes
array([[-0.0000000e+00, -7.5322000e-04, -8.7276210e-02, 0.0000000e+00,
8.2280900e-03, 9.5339055e-01],
[-0.0000000e+00, -8.7276210e-02, 7.5322000e-04, 0.0000000e+00,
9.5339055e-01, -8.2280900e-03],
[ 2.8846925e-01, -2.0000000e-08, 2.0000000e-08, 2.8846925e-01,
-2.0000000e-08, 2.0000000e-08],
[ 2.0000000e-08, 2.8846925e-01, -2.0000000e-08, 2.0000000e-08,
2.8846925e-01, -2.0000000e-08],
[-2.0000000e-08, 2.0000000e-08, 2.8846925e-01, -2.0000000e-08,
2.0000000e-08, 2.8846925e-01],
[-8.7279460e-02, 0.0000000e+00, 0.0000000e+00, 9.5342606e-01,
-0.0000000e+00, -0.0000000e+00]])
Args:
file (str): path to the GAMESS output file
Returns:
tuple[array, array, array, array]: atomic coordinates, atomic masses, normal mode
frequencies, normal modes
"""
with open(file, "r", encoding="utf-8") as f:
r = []
m = []
w = []
l = []
for line in f:
if "INPUT CARD> $data" in line or "INPUT CARD> $DATA" in line:
line = [next(f) for _ in range(3)][-1]
while "end" not in line and "END" not in line:
r.append(np.array(line.rstrip().split()[-3:], float))
line = next(f).rstrip()
if "ATOMIC WEIGHTS" in line:
next(f)
for _ in range(len(r)):
m.append(np.array(next(f).rstrip().split()[-1:], float))
if "FREQUENCY" in line:
line = line.rstrip().split()
n_mode = len(line) - 1
w.append(np.array(line[-n_mode:], float))
while f.readline() != "\n":
pass
d = []
for _ in range(len(r) * 3):
d.append(f.readline().rstrip().split()[-n_mode:])
l.append(np.array(d, float).T)
if not r:
raise ValueError("No atomic coordinates found in the output file")
if not m:
raise ValueError("No atomic masses found in the output file")
if not w:
raise ValueError("No vibrational frequencies found in the output file")
return (
np.concatenate(r).reshape(len(r), 3),
np.concatenate(m),
np.concatenate(w),
np.concatenate(l),
)
def prob(samples: list, excited_state: list) -> float:
r"""Estimate probability of observing an excited state.
The probability is estimated by calculating the relative frequency of the excited
state among the samples.
**Example usage:**
>>> excited_state = [0, 2]
>>> samples = [[0, 2], [1, 1], [0, 2], [2, 0], [1, 1], [0, 2], [1, 1], [1, 1], [1, 1]]
>>> prob(samples, excited_state)
0.3333333333333333
Args:
samples list[list[int]]: a list of samples
excited_state (list): a Fock state
Returns:
float: probability of observing a Fock state in the given samples
"""
if len(samples) == 0:
raise ValueError("The samples list must not be empty")
if len(excited_state) == 0:
raise ValueError("The excited state list must not be empty")
if not len(excited_state) == len(samples[0]):
raise ValueError("The number of modes in the samples and the excited state must be equal")
if np.any(np.array(excited_state) < 0):
raise ValueError("The excited state must not contain negative values")
return samples.count(excited_state) / len(samples)
def marginals(mu: np.ndarray, V: np.ndarray, n_max: int, hbar: float = 2.0) -> np.ndarray:
r"""Generate single-mode marginal distributions from the displacement vector and covariance
matrix of a Gaussian state.
**Example usage:**
>>> mu = np.array([0.00000000, 2.82842712, 0.00000000,
... 0.00000000, 0.00000000, 0.00000000])
>>> V = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
... [0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
... [0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
... [0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
... [0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
... [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])
>>> n_max = 10
>>> marginals(mu, V, n_max)
array([[1.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00],
[1.35335284e-01, 2.70670567e-01, 2.70670566e-01, 1.80447044e-01,
9.02235216e-02, 3.60894085e-02, 1.20298028e-02, 3.43708650e-03,
8.59271622e-04, 1.90949249e-04],
[1.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00]])
Args:
mu (array): displacement vector
V (array): covariance matrix
n_max (int): maximum number of vibrational quanta in the distribution
hbar (float): the value of :math:`\hbar` in the commutation relation :math:`[\x,\p]=i\hbar`.
Returns:
array[list[float]]: marginal distributions
"""
if not V.shape[0] == V.shape[1]:
raise ValueError("The covariance matrix must be a square matrix")
if not len(mu) == len(V):
raise ValueError(
"The dimension of the displacement vector and the covariance matrix must be equal"
)
if n_max <= 0:
raise ValueError("The number of vibrational states must be larger than zero")
n_modes = len(mu) // 2
p = np.zeros((n_modes, n_max))
for mode in range(n_modes):
mui, vi = quantum.reduced_gaussian(mu, V, mode)
for i in range(n_max):
p[mode, i] = np.real(quantum.density_matrix_element(mui, vi, [i], [i], hbar=hbar))
return p
| [
"numpy.zeros",
"thewalrus.quantum.density_matrix_element",
"numpy.array",
"thewalrus.quantum.reduced_gaussian",
"numpy.diag",
"numpy.concatenate"
] | [((4201, 4221), 'numpy.array', 'np.array', (['(d @ l0_inv)'], {}), '(d @ l0_inv)\n', (4209, 4221), True, 'import numpy as np\n'), ((10954, 10980), 'numpy.zeros', 'np.zeros', (['(n_modes, n_max)'], {}), '((n_modes, n_max))\n', (10962, 10980), True, 'import numpy as np\n'), ((7624, 7641), 'numpy.concatenate', 'np.concatenate', (['m'], {}), '(m)\n', (7638, 7641), True, 'import numpy as np\n'), ((7651, 7668), 'numpy.concatenate', 'np.concatenate', (['w'], {}), '(w)\n', (7665, 7668), True, 'import numpy as np\n'), ((7678, 7695), 'numpy.concatenate', 'np.concatenate', (['l'], {}), '(l)\n', (7692, 7695), True, 'import numpy as np\n'), ((11032, 11069), 'thewalrus.quantum.reduced_gaussian', 'quantum.reduced_gaussian', (['mu', 'V', 'mode'], {}), '(mu, V, mode)\n', (11056, 11069), False, 'from thewalrus import quantum\n'), ((4115, 4165), 'numpy.diag', 'np.diag', (['((h / (wf * 100.0 * c)) ** -0.5 * 2.0 * pi)'], {}), '((h / (wf * 100.0 * c)) ** -0.5 * 2.0 * pi)\n', (4122, 4165), True, 'import numpy as np\n'), ((8700, 8723), 'numpy.array', 'np.array', (['excited_state'], {}), '(excited_state)\n', (8708, 8723), True, 'import numpy as np\n'), ((7578, 7595), 'numpy.concatenate', 'np.concatenate', (['r'], {}), '(r)\n', (7592, 7595), True, 'import numpy as np\n'), ((11134, 11194), 'thewalrus.quantum.density_matrix_element', 'quantum.density_matrix_element', (['mui', 'vi', '[i]', '[i]'], {'hbar': 'hbar'}), '(mui, vi, [i], [i], hbar=hbar)\n', (11164, 11194), False, 'from thewalrus import quantum\n'), ((6998, 7029), 'numpy.array', 'np.array', (['line[-n_mode:]', 'float'], {}), '(line[-n_mode:], float)\n', (7006, 7029), True, 'import numpy as np\n'), ((7264, 7282), 'numpy.array', 'np.array', (['d', 'float'], {}), '(d, float)\n', (7272, 7282), True, 'import numpy as np\n')] |
from typing import Sequence
from typing import Tuple
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import torch
def get_feature_contributions(
model: torch.nn.Module,
dataset: torch.utils.data.Dataset,
) -> Sequence[torch.Tensor]:
feature_contributions = []
unique_features = dataset.unique_features
for i, feature in enumerate(unique_features):
feature = torch.tensor(feature).float().to(model.config.device)
feat_contribution = model.feature_nns[i](feature).cpu().detach().numpy().squeeze()
feature_contributions.append(feat_contribution)
return feature_contributions
def calc_mean_prediction(
model: torch.nn.Module,
dataset: torch.utils.data.Dataset,
) -> Tuple[np.ndarray, dict]:
#@title Calculate the mean prediction
feature_contributions = get_feature_contributions(model, dataset)
avg_hist_data = {col: contributions for col, contributions in zip(dataset.features_names, feature_contributions)}
all_indices, mean_pred = {}, {}
for i, col in enumerate(dataset.features_names):
feature_i = dataset.features[:, i].cpu()
all_indices[col] = np.searchsorted(dataset.unique_features[i][:, 0], feature_i, 'left')
for col in dataset.features_names:
mean_pred[col] = np.mean([avg_hist_data[col]]) #[i] for i in all_indices[col]]) TODO: check the error here
return mean_pred, avg_hist_data
def plot_mean_feature_importance(model: torch.nn.Module, dataset: torch.utils.data.Dataset, width=0.5):
mean_pred, avg_hist_data = calc_mean_prediction(model, dataset)
def compute_mean_feature_importance(mean_pred, avg_hist_data):
mean_abs_score = {}
for k in avg_hist_data:
try:
mean_abs_score[k] = np.mean(np.abs(avg_hist_data[k] - mean_pred[k]))
except:
continue
x1, x2 = zip(*mean_abs_score.items())
return x1, x2
## TODO: rename x1 and x2
x1, x2 = compute_mean_feature_importance(mean_pred, avg_hist_data)
cols = dataset.features_names
fig = plt.figure(figsize=(5, 5))
ind = np.arange(len(x1))
x1_indices = np.argsort(x2)
cols_here = [cols[i] for i in x1_indices]
x2_here = [x2[i] for i in x1_indices]
plt.bar(ind, x2_here, width, label='NAMs')
plt.xticks(ind + width / 2, cols_here, rotation=90, fontsize='large')
plt.ylabel('Mean Absolute Score', fontsize='x-large')
plt.legend(loc='upper right', fontsize='large')
plt.title(f'Overall Importance', fontsize='x-large')
plt.show()
return fig
def plot_nams(model: torch.nn.Module,
dataset: torch.utils.data.Dataset,
num_cols: int = 2,
n_blocks: int = 20,
color: list = [0.4, 0.5, 0.9],
linewidth: float = 7.0,
alpha: float = 1.0,
feature_to_use: list = None):
unique_features, single_features = dataset.ufo, dataset.single_features
mean_pred, feat_data_contrib = calc_mean_prediction(model, dataset)
num_rows = len(dataset.features[0]) // num_cols
fig = plt.figure(num=None, figsize=(num_cols * 10, num_rows * 10), facecolor='w', edgecolor='k')
fig.tight_layout(pad=7.0)
feat_data_contrib_pairs = list(feat_data_contrib.items())
feat_data_contrib_pairs.sort(key=lambda x: x[0])
mean_pred_pairs = list(mean_pred.items())
mean_pred_pairs.sort(key=lambda x: x[0])
if feature_to_use:
feat_data_contrib_pairs = [v for v in feat_data_contrib_pairs if v[0] in feature_to_use]
min_y = np.min([np.min(a[1]) for a in feat_data_contrib_pairs])
max_y = np.max([np.max(a[1]) for a in feat_data_contrib_pairs])
min_max_dif = max_y - min_y
min_y = min_y - 0.1 * min_max_dif
max_y = max_y + 0.1 * min_max_dif
total_mean_bias = 0
def shade_by_density_blocks(color: list = [0.9, 0.5, 0.5]):
single_feature_data = single_features[name]
x_n_blocks = min(n_blocks, len(unique_feat_data))
segments = (max_x - min_x) / x_n_blocks
density = np.histogram(single_feature_data, bins=x_n_blocks)
normed_density = density[0] / np.max(density[0])
rect_params = []
for p in range(x_n_blocks):
start_x = min_x + segments * p
end_x = min_x + segments * (p + 1)
d = min(1.0, 0.01 + normed_density[p])
rect_params.append((d, start_x, end_x))
for param in rect_params:
alpha, start_x, end_x = param
rect = patches.Rectangle(
(start_x, min_y - 1),
end_x - start_x,
max_y - min_y + 1,
linewidth=0.01,
edgecolor=color,
facecolor=color,
alpha=alpha,
)
ax.add_patch(rect)
for i, (name, feat_contrib) in enumerate(feat_data_contrib_pairs):
mean_pred = mean_pred_pairs[i][1]
total_mean_bias += mean_pred
unique_feat_data = unique_features[name]
ax = plt.subplot(num_rows, num_cols, i + 1)
## TODO: CATEGORICAL_NAMES if..else
plt.plot(unique_feat_data, feat_contrib - mean_pred, color=color, linewidth=linewidth, alpha=alpha)
plt.xticks(fontsize='x-large')
plt.ylim(min_y, max_y)
plt.yticks(fontsize='x-large')
min_x = np.min(unique_feat_data) # - 0.5 ## for categorical
max_x = np.max(unique_feat_data) # + 0.5
plt.xlim(min_x, max_x)
shade_by_density_blocks()
if i % num_cols == 0:
plt.ylabel('Features Contribution', fontsize='x-large')
plt.xlabel(name, fontsize='x-large')
plt.show()
return fig
| [
"matplotlib.pyplot.title",
"numpy.abs",
"matplotlib.pyplot.bar",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.histogram",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.yticks",
"numpy.max",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.yli... | [((2114, 2140), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (2124, 2140), True, 'import matplotlib.pyplot as plt\n'), ((2187, 2201), 'numpy.argsort', 'np.argsort', (['x2'], {}), '(x2)\n', (2197, 2201), True, 'import numpy as np\n'), ((2296, 2338), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'x2_here', 'width'], {'label': '"""NAMs"""'}), "(ind, x2_here, width, label='NAMs')\n", (2303, 2338), True, 'import matplotlib.pyplot as plt\n'), ((2343, 2412), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(ind + width / 2)', 'cols_here'], {'rotation': '(90)', 'fontsize': '"""large"""'}), "(ind + width / 2, cols_here, rotation=90, fontsize='large')\n", (2353, 2412), True, 'import matplotlib.pyplot as plt\n'), ((2417, 2470), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Absolute Score"""'], {'fontsize': '"""x-large"""'}), "('Mean Absolute Score', fontsize='x-large')\n", (2427, 2470), True, 'import matplotlib.pyplot as plt\n'), ((2475, 2522), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '"""large"""'}), "(loc='upper right', fontsize='large')\n", (2485, 2522), True, 'import matplotlib.pyplot as plt\n'), ((2527, 2579), 'matplotlib.pyplot.title', 'plt.title', (['f"""Overall Importance"""'], {'fontsize': '"""x-large"""'}), "(f'Overall Importance', fontsize='x-large')\n", (2536, 2579), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2594), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2592, 2594), True, 'import matplotlib.pyplot as plt\n'), ((3141, 3235), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(num_cols * 10, num_rows * 10)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(num_cols * 10, num_rows * 10), facecolor='w',\n edgecolor='k')\n", (3151, 3235), True, 'import matplotlib.pyplot as plt\n'), ((5714, 5724), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5722, 5724), True, 'import matplotlib.pyplot as plt\n'), ((1187, 1255), 'numpy.searchsorted', 'np.searchsorted', (['dataset.unique_features[i][:, 0]', 'feature_i', '"""left"""'], {}), "(dataset.unique_features[i][:, 0], feature_i, 'left')\n", (1202, 1255), True, 'import numpy as np\n'), ((1321, 1350), 'numpy.mean', 'np.mean', (['[avg_hist_data[col]]'], {}), '([avg_hist_data[col]])\n', (1328, 1350), True, 'import numpy as np\n'), ((4104, 4154), 'numpy.histogram', 'np.histogram', (['single_feature_data'], {'bins': 'x_n_blocks'}), '(single_feature_data, bins=x_n_blocks)\n', (4116, 4154), True, 'import numpy as np\n'), ((5074, 5112), 'matplotlib.pyplot.subplot', 'plt.subplot', (['num_rows', 'num_cols', '(i + 1)'], {}), '(num_rows, num_cols, i + 1)\n', (5085, 5112), True, 'import matplotlib.pyplot as plt\n'), ((5166, 5270), 'matplotlib.pyplot.plot', 'plt.plot', (['unique_feat_data', '(feat_contrib - mean_pred)'], {'color': 'color', 'linewidth': 'linewidth', 'alpha': 'alpha'}), '(unique_feat_data, feat_contrib - mean_pred, color=color, linewidth\n =linewidth, alpha=alpha)\n', (5174, 5270), True, 'import matplotlib.pyplot as plt\n'), ((5275, 5305), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (5285, 5305), True, 'import matplotlib.pyplot as plt\n'), ((5315, 5337), 'matplotlib.pyplot.ylim', 'plt.ylim', (['min_y', 'max_y'], {}), '(min_y, max_y)\n', (5323, 5337), True, 'import matplotlib.pyplot as plt\n'), ((5346, 5376), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '"""x-large"""'}), "(fontsize='x-large')\n", (5356, 5376), True, 'import matplotlib.pyplot as plt\n'), ((5394, 5418), 'numpy.min', 'np.min', (['unique_feat_data'], {}), '(unique_feat_data)\n', (5400, 5418), True, 'import numpy as np\n'), ((5464, 5488), 'numpy.max', 'np.max', (['unique_feat_data'], {}), '(unique_feat_data)\n', (5470, 5488), True, 'import numpy as np\n'), ((5506, 5528), 'matplotlib.pyplot.xlim', 'plt.xlim', (['min_x', 'max_x'], {}), '(min_x, max_x)\n', (5514, 5528), True, 'import matplotlib.pyplot as plt\n'), ((5672, 5708), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['name'], {'fontsize': '"""x-large"""'}), "(name, fontsize='x-large')\n", (5682, 5708), True, 'import matplotlib.pyplot as plt\n'), ((3612, 3624), 'numpy.min', 'np.min', (['a[1]'], {}), '(a[1])\n', (3618, 3624), True, 'import numpy as np\n'), ((3680, 3692), 'numpy.max', 'np.max', (['a[1]'], {}), '(a[1])\n', (3686, 3692), True, 'import numpy as np\n'), ((4193, 4211), 'numpy.max', 'np.max', (['density[0]'], {}), '(density[0])\n', (4199, 4211), True, 'import numpy as np\n'), ((4563, 4705), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(start_x, min_y - 1)', '(end_x - start_x)', '(max_y - min_y + 1)'], {'linewidth': '(0.01)', 'edgecolor': 'color', 'facecolor': 'color', 'alpha': 'alpha'}), '((start_x, min_y - 1), end_x - start_x, max_y - min_y + 1,\n linewidth=0.01, edgecolor=color, facecolor=color, alpha=alpha)\n', (4580, 4705), True, 'import matplotlib.patches as patches\n'), ((5607, 5662), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Features Contribution"""'], {'fontsize': '"""x-large"""'}), "('Features Contribution', fontsize='x-large')\n", (5617, 5662), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1852), 'numpy.abs', 'np.abs', (['(avg_hist_data[k] - mean_pred[k])'], {}), '(avg_hist_data[k] - mean_pred[k])\n', (1819, 1852), True, 'import numpy as np\n'), ((430, 451), 'torch.tensor', 'torch.tensor', (['feature'], {}), '(feature)\n', (442, 451), False, 'import torch\n')] |
# based on https://github.com/google-coral/pycoral/blob/master/examples/detect_image.py
from imutils.video import VideoStream, FPS
import argparse
import time
import cv2
from PIL import Image, ImageDraw
import numpy as np
from pycoral.adapters import common
from pycoral.adapters import detect
from pycoral.utils.dataset import read_label_file
from pycoral.utils.edgetpu import make_interpreter
def draw_objects(image, objs, labels):
draw = ImageDraw.Draw(image)
for obj in objs:
bbox = obj.bbox
draw.rectangle(
[(bbox.xmin, bbox.ymin), (bbox.xmax, bbox.ymax)], outline='red')
draw.text((bbox.xmin + 10, bbox.ymin + 10), '%s\n%.2f' %
(labels.get(obj.id, obj.id), obj.score), fill='red')
displayImage = np.asarray(image)
cv2.imshow('Coral Live Object Detection', displayImage)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', help='File path of Tflite model.', required=True)
parser.add_argument(
'--labels', help='File path of label file.', required=True)
parser.add_argument('--picamera',
action='store_true',
help="Use PiCamera for image capture",
default=False)
parser.add_argument(
'-t', '--threshold', type=float, default=0.5,
help='Classification score threshold')
args = parser.parse_args()
print('Loading {} with {} labels.'.format(args.model, args.labels))
labels = read_label_file(args.labels) if args.labels else {}
interpreter = make_interpreter(args.model)
interpreter.allocate_tensors()
# Initialize video stream
vs = VideoStream(usePiCamera=args.picamera, resolution=(640, 480)).start()
time.sleep(1)
fps = FPS().start()
while True:
try:
# Read frame from video
screenshot = vs.read()
image = Image.fromarray(screenshot)
_, scale = common.set_resized_input(
interpreter, image.size, lambda size: image.resize(size, Image.ANTIALIAS))
interpreter.invoke()
objs = detect.get_objects(interpreter, args.threshold, scale)
draw_objects(image, objs, labels)
if(cv2.waitKey(5) & 0xFF == ord('q')):
fps.stop()
break
fps.update()
except KeyboardInterrupt:
fps.stop()
break
print("Elapsed time: " + str(fps.elapsed()))
print("Approx FPS: :" + str(fps.fps()))
cv2.destroyAllWindows()
vs.stop()
time.sleep(2)
if __name__ == '__main__':
main()
| [
"pycoral.utils.edgetpu.make_interpreter",
"imutils.video.VideoStream",
"imutils.video.FPS",
"pycoral.utils.dataset.read_label_file",
"argparse.ArgumentParser",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.asarray",
"pycoral.adapters.detect.get_objects",
"time.sleep",
"PIL.Image.fromarray",
"... | [((448, 469), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (462, 469), False, 'from PIL import Image, ImageDraw\n'), ((771, 788), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (781, 788), True, 'import numpy as np\n'), ((793, 848), 'cv2.imshow', 'cv2.imshow', (['"""Coral Live Object Detection"""', 'displayImage'], {}), "('Coral Live Object Detection', displayImage)\n", (803, 848), False, 'import cv2\n'), ((876, 901), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (899, 901), False, 'import argparse\n'), ((1587, 1615), 'pycoral.utils.edgetpu.make_interpreter', 'make_interpreter', (['args.model'], {}), '(args.model)\n', (1603, 1615), False, 'from pycoral.utils.edgetpu import make_interpreter\n'), ((1765, 1778), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1775, 1778), False, 'import time\n'), ((2548, 2571), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2569, 2571), False, 'import cv2\n'), ((2590, 2603), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2600, 2603), False, 'import time\n'), ((1517, 1545), 'pycoral.utils.dataset.read_label_file', 'read_label_file', (['args.labels'], {}), '(args.labels)\n', (1532, 1545), False, 'from pycoral.utils.dataset import read_label_file\n'), ((1691, 1752), 'imutils.video.VideoStream', 'VideoStream', ([], {'usePiCamera': 'args.picamera', 'resolution': '(640, 480)'}), '(usePiCamera=args.picamera, resolution=(640, 480))\n', (1702, 1752), False, 'from imutils.video import VideoStream, FPS\n'), ((1790, 1795), 'imutils.video.FPS', 'FPS', ([], {}), '()\n', (1793, 1795), False, 'from imutils.video import VideoStream, FPS\n'), ((1925, 1952), 'PIL.Image.fromarray', 'Image.fromarray', (['screenshot'], {}), '(screenshot)\n', (1940, 1952), False, 'from PIL import Image, ImageDraw\n'), ((2145, 2199), 'pycoral.adapters.detect.get_objects', 'detect.get_objects', (['interpreter', 'args.threshold', 'scale'], {}), '(interpreter, args.threshold, scale)\n', (2163, 2199), False, 'from pycoral.adapters import detect\n'), ((2263, 2277), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (2274, 2277), False, 'import cv2\n')] |
#!/usr/bin/python3
__version__ = '0.0.12' # Time-stamp: <2021-09-25T04:50:45Z>
## Language: Japanese/UTF-8
"""Simulation Buddhism Prototype No.2 - Domination
支配関連
"""
##
## Author:
##
## JRF ( http://jrf.cocolog-nifty.com/statuses/ (in Japanese))
##
## License:
##
## The author is a Japanese.
##
## I intended this program to be public-domain, but you can treat
## this program under the (new) BSD-License or under the Artistic
## License, if it is convenient for you.
##
## Within three months after the release of this program, I
## especially admit responsibility of efforts for rational requests
## of correction to this program.
##
## I often have bouts of schizophrenia, but I believe that my
## intention is legitimately fulfilled.
##
from collections import OrderedDict
import math
import random
import numpy as np
import simbdp2.base as base
from simbdp2.base import ARGS, Person0, Economy0, \
Serializable, SerializableExEconomy
from simbdp2.random import negative_binominal_rand, half_normal_rand
from simbdp2.common import np_clip, Child, Marriage, Rape
class PersonDM (Person0):
def get_dominator (self):
p = self
economy = self.economy
nation = economy.nation
pos = p.dominator_position
if pos is None:
return None
elif pos == 'king':
return nation.king
elif pos == 'governor':
return nation.districts[p.district].governor
elif pos == 'vassal':
for d in nation.vassals:
if d.id == p.id:
return d
elif pos == 'cavalier':
for d in nation.districts[p.district].cavaliers:
if d.id == p.id:
return d
raise ValueError('Person.dominator_position is inconsistent.')
def highest_position_of_family (self):
p = self
economy = self.economy
sid = p.supported
if sid is None or sid == '':
sid = p.id
qid = max([sid] + economy.people[sid].supporting_non_nil(),
key=(lambda x: 0 if economy.people[x].death is not None
else economy.position_rank(economy.people[x]
.dominator_position)))
if economy.people[qid].death is not None:
return None
return economy.people[qid].dominator_position
class EconomyDM (Economy0):
def new_dominator (self, position, person, adder=0):
economy = self
p = person
if p.id in economy.dominator_parameters:
d = economy.dominator_parameters[p.id]
adder = 0
else:
d = Dominator()
economy.dominator_parameters[p.id] = d
d.id = p.id
d.people_trust = random.random()
d.faith_realization = random.random()
d.disaster_prophecy = random.random()
d.disaster_strategy = random.random()
d.disaster_tactics = random.random()
d.combat_prophecy = random.random()
#d.combat_strategy = random.random()
d.combat_tactics = random.random()
while adder != 0:
sgn = 0
if adder > 0:
adder -= 1
sgn = +1
elif adder < 0:
adder += 1
sgn = -1
for n in ['people_trust',
'faith_realization',
'disaster_prophecy',
'disaster_strategy',
'disaster_tactics',
'combat_prophecy',
# 'combat_strategy',
'combat_tactics']:
u = sgn * random.random() * ARGS.dominator_adder
setattr(d, n, np_clip(getattr(d, n) + u, 0, 1))
d.economy = economy
d.district = p.district
d.position = position
p.dominator_position = position
if position == 'king':
economy.nation.king = d
elif position == 'governor':
economy.nation.districts[p.district].governor = d
elif position == 'vassal':
economy.nation.vassals.append(d)
elif position == 'cavalier':
economy.nation.districts[p.district].cavaliers.append(d)
return d
def delete_dominator (self, person):
economy = self
p = person
position = p.dominator_position
if position is None:
return
if position == 'king':
assert economy.nation.king.id == p.id
economy.nation.king = None
elif position == 'governor':
assert economy.nation.districts[p.district].governor.id == p.id
economy.nation.districts[p.district].governor = None
elif position == 'vassal':
pl = len(economy.nation.vassals)
economy.nation.vassals = [d for d in economy.nation.vassals
if d.id != p.id]
assert pl == len(economy.nation.vassals) + 1
elif position == 'cavalier':
pl = len(economy.nation.districts[p.district].cavaliers)
economy.nation.districts[p.district].cavaliers \
= [d for d in economy.nation.districts[p.district].cavaliers
if d.id != p.id]
assert pl == len(economy.nation.districts[p.district].cavaliers) + 1
p.dominator_position = None
def calc_dominator_work (self, dominator1, work_func):
economy = self
d = dominator1
nation = economy.nation
dist = nation.districts[d.district]
f = work_func
a_king = f(nation.king)
vab = [f(d) for d in nation.vassals]
vht = np.mean([d.soothed_hating_to_king() for d in nation.vassals])
a_vassals = (0.5 + 0.5 * (1 - vht)) \
* ((1/3) * max(vab) + (2/3) * np.mean(vab))
a_governor = (0.75 + 0.25 * (1 - dist.governor.soothed_hating_to_king())) \
* f(dist.governor)
a_cavalier = f(d)
r_king = 0.5 + 0.5 * (1 - d.soothed_hating_to_king())
r_vassals = 3
r_governor = 0.5 + 0.5 * (1 - d.soothed_hating_to_governor())
r_cavalier = 5
p = (r_king * a_king + r_vassals * a_vassals \
+ r_governor * a_governor + r_cavalier * a_cavalier) \
/ (r_king + r_vassals + r_governor + r_cavalier)
p *= 0.75 + 0.25 \
* (1 - max([d.soothed_hating_to_king(), d.soothed_hating_to_governor()]))
p *= dist.tmp_power
return p
def add_family_political_hating (self, people, max_adder):
economy = self
fa = set()
for p in people:
if p.supported is not None and p.supported != '':
fa.add(p.supported)
else:
fa.add(p.id)
for pid in fa:
p = economy.people[pid]
for qid in [p.id] + p.supporting_non_nil():
q = economy.people[qid]
a = random.uniform(0, max_adder)
q.political_hating = np_clip(q.political_hating + a, 0, 1)
def add_political_hating (self, people, max_adder):
economy = self
fa = set()
for p in people:
a = random.uniform(0, max_adder)
p.political_hating = np_clip(p.political_hating + a, 0, 1)
def injure (self, people, max_permanent=0.5, max_temporal=0.5,
permanent_injury_rate=None):
economy = self
if permanent_injury_rate is None:
permanent_injury_rate = ARGS.permanent_injury_rate
fa = set()
for p in people:
b = random.uniform(0, max_temporal)
p.tmp_injured = np_clip(p.tmp_injured + b, 0, 1)
if random.random() < permanent_injury_rate:
a = random.uniform(0, max_permanent)
p.injured = np_clip(p.injured + a, 0, 1)
def rape (self, people):
economy = self
n_p = 0
for f in people:
af = Rape()
af.spouse = ''
af.init_favor = 0
af.begin = economy.term
af.end = economy.term
f.trash.append(af)
if f.fertility != 0 and f.pregnancy is None:
ft = f.fertility
if random.random() < ARGS.rape_pregnant_rate \
* (ft ** ARGS.rape_pregnant_mag):
f.get_pregnant(af)
n_p += 1
return n_p
position_rank_table = {
None: 0,
'cavalier': 1,
'vassal': 2,
'governor': 3,
'king': 4
}
def position_rank (self, pos):
return type(self).position_rank_table[pos]
class Dominator (SerializableExEconomy):
def __init__ (self):
self.id = None
self.economy = None
self.district = None # 地域
self.position = None # 役職
self.people_trust = 0 # 人望
self.faith_realization = 0 # 信仰理解
self.disaster_prophecy = 0 # 災害予知
self.disaster_strategy = 0 # 災害戦略
self.disaster_tactics = 0 # 災害戦術
self.combat_prophecy = 0 # 戦闘予知
# self.combat_strategy = 0 # 戦闘戦略
self.combat_tactics = 0 # 戦闘戦術
self.adder = 0 # 能力の全体的調整値
self.hating_to_king = 0 # 王への家系的恨み
self.hating_to_governor = 0 # 知事への家系的恨み
self.soothing_by_king = 0 # 王からの慰撫 (マイナス可)
self.soothing_by_governor = 0 # 知事からの慰撫 (マイナス可)
def calc_combat_strategy (self, delta=None):
return (2 * self.disaster_strategy
+ self.combat_tactics) / 3
def update_hating (self):
# 家系を辿って家系的恨みを設定する。「王・知事」に恨みがある者
# と「王・知事」のどちらに家系的距離が近いかを測る。
d0 = self
economy = self.economy
p = economy.get_person(d0.id)
s = set([p.id])
checked = set([p.id])
distance = 1
r = OrderedDict()
r[p.id] = 0
while distance <= ARGS.max_family_distance and s:
s2 = set([])
for qid in s:
q = economy.get_person(qid)
if q is None:
continue
for x in [q.father, q.mother]:
if x == '' or x in checked:
continue
s2.add(x)
r[x] = distance
for ch in q.children + q.trash:
if isinstance(ch, Child):
x = ch.id
if x == '' or x in checked:
continue
s2.add(x)
r[x] = distance
for m in [q.marriage] + q.trash:
if m is not None and isinstance(m, Marriage):
x = m.spouse
if x == '' or x in checked:
continue
s2.add(x)
r[x] = distance
checked.update(s2)
distance += 1
s = s2
k_id = None
if economy.nation.king is None:
k_id = None
else:
k_id = economy.nation.king.id
k_distance = ARGS.max_family_distance + 1
if k_id is not None and k_id in r:
k_distance = r[k_id]
if economy.nation.districts[p.district].governor is None:
g_id = None
else:
g_id = economy.nation.districts[p.district].governor.id
g_distance = ARGS.max_family_distance + 1
if g_id is not None and g_id in r:
g_distance = r[g_id]
hk = 0
hg = 0
for q_id, d in r.items():
if k_id is not None and d < k_distance:
q = economy.get_person(q_id)
if q is not None and k_id in q.hating and q.hating[k_id] > hk:
hk = q.hating[k_id]
if g_id is not None and d < g_distance:
q = economy.get_person(q_id)
if q is not None and g_id in q.hating and q.hating[g_id] > hg:
hg = q.hating[g_id]
d0.hating_to_king = hk
d0.hating_to_governor = hg
def resign (self):
d = self
economy = self.economy
nation = economy.nation
p = economy.people[d.id]
assert p.dominator_position is not None
nation.nomination.append((p.dominator_position, p.district, p.dominator_position, p.id))
economy.delete_dominator(p)
def soothe_district (self):
d = self
economy = self.economy
p = economy.calc_dominator_work(d, lambda x: x.soothe_ability())
q = ((1/2) - (1/4)) * p + (1/4)
for p in economy.people.values():
if p.death is None and p.age >= 18 \
and p.district == d.district \
and random.random() < q:
p.political_hating *= 0.5
def construct (self, p_or_t, calamity_name, idx, challenging=False):
d = self
economy = self.economy
nation = economy.nation
dist = nation.districts[d.district]
cn = calamity_name
cinfo = base.calamity_info[cn]
if p_or_t == 'protection' and cn == 'invasion':
f = lambda x: x.invasion_protection_ability()
ccoeff = cinfo.protection_construct_coeff
cmax = cinfo.protection_max - 0.5
setting = dist.protection_units[cn]
scoeff = {'disaster_strategy': (2/3) * 0.75,
'combat_tactics': (1/3) * 0.75,
'people_trust': 0.25}
elif p_or_t == 'training' and cn == 'invasion':
f = lambda x: x.invasion_training_ability()
ccoeff = cinfo.training_construct_coeff
cmax = cinfo.training_max - 0.5
setting = dist.training_units[cn]
scoeff = {'combat_tactics': 0.70,
'people_trust': 0.15,
'faith_realization': 0.15}
elif p_or_t == 'protection':
f = lambda x: x.disaster_protection_ability()
ccoeff = cinfo.protection_construct_coeff
cmax = cinfo.protection_max - 0.5
setting = dist.protection_units[cn]
scoeff = {'disaster_strategy': 0.75,
'people_trust': 0.25}
elif p_or_t == 'training':
f = lambda x: x.disaster_training_ability()
ccoeff = cinfo.training_construct_coeff
cmax = cinfo.training_max - 0.5
setting = dist.training_units[cn]
scoeff = {'disaster_tactics': 0.75,
'people_trust': 0.25}
p = economy.calc_dominator_work(d, f)
beta = ARGS.challenging_beta if challenging \
else ARGS.not_challenging_beta
p *= np.random.beta(beta, beta)
x = np_clip(math.sqrt((p + ccoeff * (setting[idx] ** 2)) / ccoeff),
0, cmax)
setting[idx] = x
if not challenging:
return x
k = sum(list(scoeff.values()))
for n, v in scoeff.items():
setattr(d, n,
np_clip(
getattr(d, n) + (ARGS.challenging_growth * v / k),
0, 1))
return x
def set_adder (self, new_adder=None):
d = self
economy = self.economy
p = economy.people[d.id]
if new_adder is None:
new_adder = 0
if p.pregnancy is not None:
if p.marriage is not None:
new_adder = -1
else:
new_adder = -2
else:
if p.marriage is not None:
new_adder = 1
else:
new_adder = 0
if p.injured >= 0.6:
new_adder -= 3
elif p.injured >= 0.3:
new_adder -= 2
elif p.injured >= 0.1:
new_adder -= 1
while new_adder != d.adder:
sgn = 0
if new_adder > d.adder:
d.adder += 1
sgn = +1
elif new_adder < d.adder:
d.adder -= 1
sgn = -1
for n in ['people_trust',
'faith_realization',
'disaster_prophecy',
'disaster_strategy',
'disaster_tactics',
'combat_prophecy',
# 'combat_strategy',
'combat_tactics']:
u = sgn * random.random() * ARGS.dominator_adder
setattr(d, n, np_clip(getattr(d, n) + u, 0, 1))
def soothed_hating_to_king (self):
d = self
return np_clip(d.hating_to_king - d.soothing_by_king, 0, 1)
def soothed_hating_to_governor (self):
d = self
return np_clip(d.hating_to_governor - d.soothing_by_governor, 0, 1)
def general_ability (self):
d = self
return np.mean([d.people_trust,
d.faith_realization,
d.disaster_prophecy,
d.disaster_strategy,
d.disaster_tactics,
d.combat_prophecy,
# d.combat_strategy,
d.combat_tactics])
def soothe_ability (self):
d = self
return 0.5 * d.people_trust + 0.5 * d.faith_realization
def disaster_prophecy_ability (self):
d = self
return 0.70 * d.disaster_prophecy + 0.30 * d.faith_realization
def disaster_protection_ability (self):
d = self
return 0.75 * d.disaster_strategy + 0.25 * d.people_trust
def disaster_training_ability (self):
d = self
return 0.75 * d.disaster_tactics + 0.25 * d.people_trust
def invasion_prophecy_ability (self):
d = self
return 0.60 * d.combat_prophecy + 0.40 * d.faith_realization
def invasion_protection_ability (self):
d = self
return 0.75 * d.calc_combat_strategy() + 0.25 * d.people_trust
def invasion_training_ability (self):
d = self
fr = d.faith_realization
if fr > ARGS.faith_realization_power_threshold:
fr = ARGS.faith_realization_power_threshold
if fr < 0.5:
p = 0.8 * (fr / 0.5)
else:
p = 0.8 + 0.2 * ((fr - 0.5) / 0.5)
return 0.70 * d.combat_tactics + 0.15 * d.people_trust \
+ 0.15 * p
class District (Serializable):
def __init__ (self):
self.governor = None
self.cavaliers = []
self.tmp_hated = 0 # 民の political_hated を反映した値
self.protection_units = {} # 防御レベルのユニット
self.training_units = {} # 訓練レベルのユニット
for n in ['invasion', 'flood', 'bigfire', 'earthquake',
'famine', 'cropfailure', 'plague']:
self.protection_units[n] = []
self.training_units[n] = []
self.tmp_disaster_brain = None # 天災の参謀
self.tmp_invasion_brain = None # 戦争の参謀
self.tmp_education = 1.0 # 18才以上の教育平均
self.tmp_fidelity = 1.0 # 忠誠 (1 - 18才以上の political_hating 平均)
self.tmp_population = 0 # 人口
self.tmp_budget = 0 # 予算 (寄付金総額 / 12)
self.prev_budget = [] # 過去10年の予算平均
self.tmp_power = 1.0 # 国力
class Nation (Serializable):
def __init__ (self):
self.districts = []
self.king = None
self.vassals = []
self.tmp_population = 0 # 人口
self.tmp_budget = 0 # 予算 (寄付金総額 / 12)
self.prev_budget = [] # 過去10年の予算平均
self.nomination = [] # 後継者の指名
def dominators (self):
nation = self
l = []
if nation.king is not None:
l.append(nation.king)
l += nation.vassals
for ds in self.districts:
if ds.governor is not None:
l.append(ds.governor)
l += ds.cavaliers
return l
def initialize_nation (economy):
economy.nation = Nation()
nation = economy.nation
for d_num in range(len(ARGS.population)):
district = District()
nation.districts.append(district)
dpeople = [[] for dnum in range(len(ARGS.population))]
for p in economy.people.values():
if p.death is not None:
continue
if p.age >= 18 and p.age <= 50:
dpeople[p.district].append(p)
for dnum, dist in enumerate(nation.districts):
n = math.ceil(ARGS.population[dnum] / 1000) + 1
if dnum == 0:
n += 11
l = random.sample(dpeople[dnum], n)
p = l.pop(0)
d = economy.new_dominator('governor', p)
for i in range(math.ceil(ARGS.population[dnum] / 1000)):
p = l.pop(0)
d = economy.new_dominator('cavalier', p)
if dnum == 0:
p = l.pop(0)
d = economy.new_dominator('king', p)
for i in range(10):
p = l.pop(0)
d = economy.new_dominator('vassal', p)
for d in nation.dominators():
d.set_adder()
d.update_hating()
nation.tmp_budget = ARGS.initial_budget_per_person \
* sum(ARGS.population)
for dnum in range(len(ARGS.population)):
nation.districts[dnum].tmp_budget = ARGS.initial_budget_per_person \
* ARGS.population[dnum]
for cn, ci in base.calamity_info.items():
for dnum in range(len(ARGS.population)):
dist = nation.districts[dnum]
units = math.ceil(ci.protection_units_base
* (ARGS.population[dnum] / 1000))
dist.protection_units[cn] = [ci.protection_construct_max - 1] \
* units
units = math.ceil(ci.training_units_base
* (ARGS.population[dnum] / 1000))
dist.training_units[cn] = [ci.training_construct_max - 1] \
* units
def _random_scored_sort (paired_list):
l = paired_list
r = []
while l:
s = sum([x[0] for x in l])
q = s * random.random()
y = 0
for i, x in enumerate(l):
y += x[0]
if q < y:
r.append(x[1])
l = l[0:i] + l[i+1:]
return r
def _successor_check (economy, person, position, dnum):
p = person
pos = position
if p.death is not None:
return False
if not (p.age >= 18 and p.age <= 50):
return False
pr0 = economy.position_rank(position)
if pr0 <= economy.position_rank(p.dominator_position):
return False
if p.district != dnum:
if pr0 <= economy.position_rank(p.highest_position_of_family()):
return False
return True
def _nominate_successor (economy, person, position, dnum):
q = _nominate_successor_1(economy, person, position, dnum,
lambda x: x.relation == 'M')
if q is not None:
return q
q = _nominate_successor_1(economy, person, position, dnum,
lambda x: x.relation == 'M'
or x.relation == 'A')
if q is not None:
return q
q = _nominate_successor_1(economy, person, position, dnum,
lambda x: True)
return q
def _nominate_successor_1 (economy, person, position, dnum, check_func):
p = person
pos = position
checked = set([p.id])
l = [x for x in p.children + p.trash
if isinstance(x, Child) and check_func(x)]
l.sort(key=lambda x: x.birth_term)
ex = None
for ch in l:
if ch.id is None or ch.id == '':
continue
checked.add(ch.id)
q = economy.get_person(ch.id)
if q is None:
continue
if _successor_check(economy, q, pos, dnum):
ex = q
break
l2 = [x for x in q.children + q.trash
if isinstance(x, Child) and check_func(x)]
l2.sort(key=lambda x: x.birth_term)
for ch2 in l2:
if ch2.id is None or ch2.id == '' or ch2.id not in economy.people:
continue
q2 = economy.people[ch2.id]
if _successor_check(economy, q2, pos, dnum):
ex = q2
if ex is not None:
break
if ex is not None:
return ex
l = []
q = economy.get_person(p.father)
if q is not None:
l2 = [x for x in q.children + q.trash
if isinstance(x, Child) and check_func(x)]
l2.sort(key=lambda x: x.birth_term)
l = l + l2
q = economy.get_person(p.mother)
if q is not None:
l2 = [x for x in q.children + q.trash
if isinstance(x, Child) and check_func(x)]
l2.sort(key=lambda x: x.birth_term)
l = l + l2
for ch in l:
if ch.id is None or ch.id == '':
continue
if ch.id in checked:
continue
checked.add(ch.id)
q = economy.get_person(ch.id)
if q is None:
continue
if _successor_check(economy, q, pos, dnum):
ex = q
break
l2 = [x for x in q.children + q.trash
if isinstance(x, Child) and check_func(x)]
l2.sort(key=lambda x: x.birth_term)
for ch2 in l2:
if ch2.id is None or ch2.id == '' or ch2.id not in economy.people:
continue
q2 = economy.people[ch2.id]
if _successor_check(economy, q2, pos, dnum):
ex = q2
if ex is not None:
break
return ex
def nominate_successors (economy):
nation = economy.nation
new_nomination = []
while True:
ex = None
exd = None
if nation.king is None:
ex = 'king'
exd = 0
if ex is None:
if len(nation.vassals) < 10:
ex = 'vassal'
exd = 0
for dnum, dist in enumerate(nation.districts):
if ex is not None:
continue
if dist.governor is None:
ex = 'governor'
exd = dnum
for dnum, dist in enumerate(nation.districts):
if ex is not None:
continue
if len(dist.cavaliers) < math.ceil(ARGS.population[dnum] / 1000):
ex = 'cavalier'
exd = dnum
if ex is None:
break
print("nominate:", ex, exd)
nation.nomination = [((pos, dnum, pos2, pid)
if pid not in economy.people
or economy.position_rank(pos2) \
>= economy.position_rank(economy.people[pid]
.dominator_position)
else (pos, dnum,
economy.people[pid].dominator_position,
pid))
for pos, dnum, pos2, pid in nation.nomination
if economy.get_person(pid) is not None]
noml = [(pos, dnum, pos2, pid)
for pos, dnum, pos2, pid in nation.nomination
if pos == ex and dnum == exd]
nom = None
if noml:
nomm = max(noml, key=lambda x: economy.position_rank(x[2]))
noml = [x for x in noml if economy.position_rank(x[2])
== economy.position_rank(nomm[2])]
nom = random.choice(noml)
if ex == 'king':
l = ['from_vassals_or_governors',
'from_all_cavaliers',
'from_people']
if nom is not None:
l = ['nominate'] + l
elif ex == 'governor' or ex == 'vassal':
l2 = [(3, 'king_nominate'),
(5, 'from_cavaliers'),
(1, 'from_people')]
if nom is not None:
if nom[2] == 'king':
l2.append((10, 'nominate'))
elif nom[2] == 'vassal' or nom[2] == 'governor':
l2.append((8, 'nominate'))
else:
l2.append((5, 'nominate'))
l = _random_scored_sort(l2)
elif ex == 'cavalier':
l2 = [(3, 'king_nominate'),
(2, 'vassal_nominate'),
(3, 'governor_nominate'),
(2, 'from_people')]
if nom is not None:
if nom[2] == 'king':
l2.append((10, 'nominate'))
elif nom[2] == 'vassal' or nom[2] == 'governor':
l2.append((8, 'nominate'))
else:
l2.append((5, 'nominate'))
l = _random_scored_sort(l2)
adder = 0
done = None
nom2 = None
for method in l:
nom2 = None
if method == 'nominate':
nom2 = economy.get_person(nom[3])
if nom2 is None:
continue
elif method == 'king_nominate':
if nation.king is not None:
nom2 = economy.people[nation.king.id]
else:
continue
elif method == 'vassal_nominate':
if nation.vassals:
nom2 = economy.people[random.choice(nation.vassals).id]
else:
continue
elif method == 'governor_nominate':
if nation.districts[exd].governor is not None:
nom2 = economy.people[nation.districts[exd].governor.id]
else:
continue
if nom2 is not None:
done = _nominate_successor(economy, nom2, ex, exd)
if done is not None:
break
print("no successor:", nom2.id)
continue
elif method == 'from_vassals_or_governors':
l2 = []
l2 += nation.vassals
for d in nation.districts:
l2.append(d.governor)
l2 = [x for x in l2 if x is not None]
l2.sort(key=lambda x: x.general_ability(), reverse=True)
for d in l2:
p = economy.people[d.id]
if economy.position_rank(ex) \
> economy.position_rank(p.highest_position_of_family()):
done = p
break
if done is not None:
break
continue
elif method == 'from_all_cavaliers':
l2 = []
for d in nation.districts:
l2 += d.cavaliers
l2.sort(key=lambda x: x.general_ability(), reverse=True)
for d in l2:
p = economy.people[d.id]
if economy.position_rank(ex) \
> economy.position_rank(p.highest_position_of_family()):
done = p
break
if done is not None:
break
continue
elif method == 'from_cavaliers':
l2 = [x for x in nation.districts[exd].cavaliers]
l2.sort(key=lambda x: x.general_ability(), reverse=True)
for d in l2:
p = economy.people[d.id]
if economy.position_rank(ex) \
> economy.position_rank(p.highest_position_of_family()):
done = p
break
if done is not None:
break
continue
elif method == 'from_people':
l2 = list(economy.people.values())
n = 0
while n < 2 * len(l2):
n += 1
p = random.choice(l2)
if p.death is None and _successor_check(economy, p, ex, exd):
done = p
break
if done is not None:
adder = 2
break
assert done is not None
continue
else:
raise ValueError('method ' + method +' is wrong!')
assert done is not None
if nom2 is not None:
done2 = False
l = []
for pos, dnum, pos2, pid in nation.nomination:
if (not done2) and pos == ex and dnum == exd and pid == nom2.id:
done2 = True
if pos2 == 'cavalier':
adder = 1
print("remove nomination")
else:
l.append((pos, dnum, pos2, pid))
nation.nomination = l
p = done
if ex == 'king':
cs = set()
if p.dominator_position == 'governor' and ex == 'king':
cs.update([d.id for d
in nation.districts[p.district].cavaliers])
for d in nation.dominators():
if d.id in cs:
d.soothing_by_king = d.soothing_by_governor
d.soothing_by_governor = 0
else:
d.soothing_by_king = 0
elif ex == 'governor':
for d in nation.dominators():
if d.district == exd:
d.soothing_by_governor = 0
if p.dominator_position is not None:
p.get_dominator().resign()
sid = p.supported
if sid is None or sid == '':
sid = p.id
for qid in [sid] + economy.people[sid].supporting_non_nil():
economy.people[qid].change_district(exd)
economy.new_dominator(ex, p, adder=adder)
new_nomination.append((ex, exd, p.id))
d = p.get_dominator()
d.update_hating()
if ex == 'cavalier':
d.soothing_by_governor += \
np_clip(d.hating_to_governor - d.soothing_by_governor,
0, 1) / 2
d.soothing_by_king += \
np_clip(d.hating_to_king - d.soothing_by_king,
0, 1) / 2
else:
d.soothing_by_king += \
np_clip(d.hating_to_king - d.soothing_by_king,
0, 1) / 2
for pos, dnum, pos2, pid in nation.nomination:
p = economy.get_person(pid)
q = _nominate_successor(economy, p, pos, dnum)
if not ARGS.no_successor_resentment and q is None:
continue
if q is not None:
p = q
for pos3, dnum3, pid3 in new_nomination:
if pos3 == pos and dnum == dnum3 and p.id != pid3:
print("hate:", p.id, "->", pid3)
if pid3 not in p.hating:
p.hating[pid3] = 0
p.hating[pid3] = np_clip(p.hating[pid3] + 0.1, 0.0, 1.0)
nation.nomination = []
def print_dominators_average (economy):
nation = economy.nation
cavaliers = sum([dist.cavaliers for dist in nation.districts], [])
cset = set([d.id for d in cavaliers])
props = [
'people_trust',
'faith_realization',
'disaster_prophecy',
'disaster_strategy',
'disaster_tactics',
'combat_prophecy',
# 'combat_strategy',
'combat_tactics',
'hating_to_king',
'hating_to_governor',
'soothing_by_king',
'soothing_by_governor'
]
r = {}
for n in props:
r[n] = np.mean([getattr(d, n) for d in nation.dominators()
if d.id not in cset])
print("Non-Cavaliers Average:", r)
r = {}
for n in props:
r[n] = np.mean([getattr(d, n) for d in cavaliers])
print("Cavaliers Average:", r)
def update_dominators (economy):
print("\nNominate Dominators:...", flush=True)
nation = economy.nation
for d in nation.dominators():
p = economy.people[d.id]
if nation.king is not None and nation.king.id == d.id:
if p.injured >= 0.75:
d.resign()
continue
if p.age > 70 or p.injured >= 0.5:
d.resign()
nominate_successors(economy)
for d in nation.dominators():
d.set_adder()
d.update_hating()
print_dominators_average(economy)
| [
"math.sqrt",
"random.uniform",
"numpy.random.beta",
"random.sample",
"math.ceil",
"random.choice",
"random.random",
"numpy.mean",
"collections.OrderedDict",
"simbdp2.common.Rape",
"simbdp2.common.np_clip",
"simbdp2.base.calamity_info.items"
] | [((22068, 22094), 'simbdp2.base.calamity_info.items', 'base.calamity_info.items', ([], {}), '()\n', (22092, 22094), True, 'import simbdp2.base as base\n'), ((10231, 10244), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10242, 10244), False, 'from collections import OrderedDict\n'), ((15249, 15275), 'numpy.random.beta', 'np.random.beta', (['beta', 'beta'], {}), '(beta, beta)\n', (15263, 15275), True, 'import numpy as np\n'), ((17246, 17298), 'simbdp2.common.np_clip', 'np_clip', (['(d.hating_to_king - d.soothing_by_king)', '(0)', '(1)'], {}), '(d.hating_to_king - d.soothing_by_king, 0, 1)\n', (17253, 17298), False, 'from simbdp2.common import np_clip, Child, Marriage, Rape\n'), ((17379, 17439), 'simbdp2.common.np_clip', 'np_clip', (['(d.hating_to_governor - d.soothing_by_governor)', '(0)', '(1)'], {}), '(d.hating_to_governor - d.soothing_by_governor, 0, 1)\n', (17386, 17439), False, 'from simbdp2.common import np_clip, Child, Marriage, Rape\n'), ((17509, 17664), 'numpy.mean', 'np.mean', (['[d.people_trust, d.faith_realization, d.disaster_prophecy, d.\n disaster_strategy, d.disaster_tactics, d.combat_prophecy, d.combat_tactics]'], {}), '([d.people_trust, d.faith_realization, d.disaster_prophecy, d.\n disaster_strategy, d.disaster_tactics, d.combat_prophecy, d.combat_tactics]\n )\n', (17516, 17664), True, 'import numpy as np\n'), ((21239, 21270), 'random.sample', 'random.sample', (['dpeople[dnum]', 'n'], {}), '(dpeople[dnum], n)\n', (21252, 21270), False, 'import random\n'), ((2906, 2921), 'random.random', 'random.random', ([], {}), '()\n', (2919, 2921), False, 'import random\n'), ((2957, 2972), 'random.random', 'random.random', ([], {}), '()\n', (2970, 2972), False, 'import random\n'), ((3008, 3023), 'random.random', 'random.random', ([], {}), '()\n', (3021, 3023), False, 'import random\n'), ((3059, 3074), 'random.random', 'random.random', ([], {}), '()\n', (3072, 3074), False, 'import random\n'), ((3109, 3124), 'random.random', 'random.random', ([], {}), '()\n', (3122, 3124), False, 'import random\n'), ((3158, 3173), 'random.random', 'random.random', ([], {}), '()\n', (3171, 3173), False, 'import random\n'), ((3256, 3271), 'random.random', 'random.random', ([], {}), '()\n', (3269, 3271), False, 'import random\n'), ((7495, 7523), 'random.uniform', 'random.uniform', (['(0)', 'max_adder'], {}), '(0, max_adder)\n', (7509, 7523), False, 'import random\n'), ((7558, 7595), 'simbdp2.common.np_clip', 'np_clip', (['(p.political_hating + a)', '(0)', '(1)'], {}), '(p.political_hating + a, 0, 1)\n', (7565, 7595), False, 'from simbdp2.common import np_clip, Child, Marriage, Rape\n'), ((7906, 7937), 'random.uniform', 'random.uniform', (['(0)', 'max_temporal'], {}), '(0, max_temporal)\n', (7920, 7937), False, 'import random\n'), ((7967, 7999), 'simbdp2.common.np_clip', 'np_clip', (['(p.tmp_injured + b)', '(0)', '(1)'], {}), '(p.tmp_injured + b, 0, 1)\n', (7974, 7999), False, 'from simbdp2.common import np_clip, Child, Marriage, Rape\n'), ((8288, 8294), 'simbdp2.common.Rape', 'Rape', ([], {}), '()\n', (8292, 8294), False, 'from simbdp2.common import np_clip, Child, Marriage, Rape\n'), ((15299, 15351), 'math.sqrt', 'math.sqrt', (['((p + ccoeff * setting[idx] ** 2) / ccoeff)'], {}), '((p + ccoeff * setting[idx] ** 2) / ccoeff)\n', (15308, 15351), False, 'import math\n'), ((21138, 21177), 'math.ceil', 'math.ceil', (['(ARGS.population[dnum] / 1000)'], {}), '(ARGS.population[dnum] / 1000)\n', (21147, 21177), False, 'import math\n'), ((21367, 21406), 'math.ceil', 'math.ceil', (['(ARGS.population[dnum] / 1000)'], {}), '(ARGS.population[dnum] / 1000)\n', (21376, 21406), False, 'import math\n'), ((22210, 22278), 'math.ceil', 'math.ceil', (['(ci.protection_units_base * (ARGS.population[dnum] / 1000))'], {}), '(ci.protection_units_base * (ARGS.population[dnum] / 1000))\n', (22219, 22278), False, 'import math\n'), ((22433, 22499), 'math.ceil', 'math.ceil', (['(ci.training_units_base * (ARGS.population[dnum] / 1000))'], {}), '(ci.training_units_base * (ARGS.population[dnum] / 1000))\n', (22442, 22499), False, 'import math\n'), ((22773, 22788), 'random.random', 'random.random', ([], {}), '()\n', (22786, 22788), False, 'import random\n'), ((28330, 28349), 'random.choice', 'random.choice', (['noml'], {}), '(noml)\n', (28343, 28349), False, 'import random\n'), ((7244, 7272), 'random.uniform', 'random.uniform', (['(0)', 'max_adder'], {}), '(0, max_adder)\n', (7258, 7272), False, 'import random\n'), ((7311, 7348), 'simbdp2.common.np_clip', 'np_clip', (['(q.political_hating + a)', '(0)', '(1)'], {}), '(q.political_hating + a, 0, 1)\n', (7318, 7348), False, 'from simbdp2.common import np_clip, Child, Marriage, Rape\n'), ((8016, 8031), 'random.random', 'random.random', ([], {}), '()\n', (8029, 8031), False, 'import random\n'), ((8078, 8110), 'random.uniform', 'random.uniform', (['(0)', 'max_permanent'], {}), '(0, max_permanent)\n', (8092, 8110), False, 'import random\n'), ((8140, 8168), 'simbdp2.common.np_clip', 'np_clip', (['(p.injured + a)', '(0)', '(1)'], {}), '(p.injured + a, 0, 1)\n', (8147, 8168), False, 'from simbdp2.common import np_clip, Child, Marriage, Rape\n'), ((27099, 27138), 'math.ceil', 'math.ceil', (['(ARGS.population[dnum] / 1000)'], {}), '(ARGS.population[dnum] / 1000)\n', (27108, 27138), False, 'import math\n'), ((35012, 35072), 'simbdp2.common.np_clip', 'np_clip', (['(d.hating_to_governor - d.soothing_by_governor)', '(0)', '(1)'], {}), '(d.hating_to_governor - d.soothing_by_governor, 0, 1)\n', (35019, 35072), False, 'from simbdp2.common import np_clip, Child, Marriage, Rape\n'), ((35156, 35208), 'simbdp2.common.np_clip', 'np_clip', (['(d.hating_to_king - d.soothing_by_king)', '(0)', '(1)'], {}), '(d.hating_to_king - d.soothing_by_king, 0, 1)\n', (35163, 35208), False, 'from simbdp2.common import np_clip, Child, Marriage, Rape\n'), ((35307, 35359), 'simbdp2.common.np_clip', 'np_clip', (['(d.hating_to_king - d.soothing_by_king)', '(0)', '(1)'], {}), '(d.hating_to_king - d.soothing_by_king, 0, 1)\n', (35314, 35359), False, 'from simbdp2.common import np_clip, Child, Marriage, Rape\n'), ((35944, 35983), 'simbdp2.common.np_clip', 'np_clip', (['(p.hating[pid3] + 0.1)', '(0.0)', '(1.0)'], {}), '(p.hating[pid3] + 0.1, 0.0, 1.0)\n', (35951, 35983), False, 'from simbdp2.common import np_clip, Child, Marriage, Rape\n'), ((6090, 6102), 'numpy.mean', 'np.mean', (['vab'], {}), '(vab)\n', (6097, 6102), True, 'import numpy as np\n'), ((8570, 8585), 'random.random', 'random.random', ([], {}), '()\n', (8583, 8585), False, 'import random\n'), ((13248, 13263), 'random.random', 'random.random', ([], {}), '()\n', (13261, 13263), False, 'import random\n'), ((3855, 3870), 'random.random', 'random.random', ([], {}), '()\n', (3868, 3870), False, 'import random\n'), ((17066, 17081), 'random.random', 'random.random', ([], {}), '()\n', (17079, 17081), False, 'import random\n'), ((30226, 30255), 'random.choice', 'random.choice', (['nation.vassals'], {}), '(nation.vassals)\n', (30239, 30255), False, 'import random\n'), ((32860, 32877), 'random.choice', 'random.choice', (['l2'], {}), '(l2)\n', (32873, 32877), False, 'import random\n')] |
# Copyright (c) 2021 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrap ``dm_control`` environment with a Gym interface.
Adapted and simplified from https://github.com/denisyarats/dmc2gym
"""
from functools import partial
import gym
from gym import spaces
from gym.envs.registration import register
import numpy as np
from typing import Dict, Optional, Any
try:
import dm_control
from dm_control import suite
import dm_env
except ImportError:
dm_control = None
def _dmc_spec_to_box(spec):
"""Convert a dmc spec to a Gym Box.
Copied from https://github.com/denisyarats/dmc2gym
"""
def extract_min_max(s):
assert s.dtype == np.float64 or s.dtype == np.float32
dim = np.int(np.prod(s.shape))
if type(s) == dm_env.specs.Array:
bound = np.inf * np.ones(dim, dtype=np.float32)
return -bound, bound
elif type(s) == dm_env.specs.BoundedArray:
zeros = np.zeros(dim, dtype=np.float32)
return s.minimum + zeros, s.maximum + zeros
mins, maxs = [], []
for s in spec:
mn, mx = extract_min_max(s)
mins.append(mn)
maxs.append(mx)
low = np.concatenate(mins, axis=0)
high = np.concatenate(maxs, axis=0)
assert low.shape == high.shape
return spaces.Box(low, high, dtype=np.float32)
def _flatten_obs(obs):
"""Flatten a dict to a vector.
Copied from https://github.com/denisyarats/dmc2gym
"""
obs_pieces = []
for v in obs.values():
flat = np.array([v]) if np.isscalar(v) else v.ravel()
obs_pieces.append(flat)
return np.concatenate(obs_pieces, axis=0)
class DMCGYMWrapper(gym.core.Env):
def __init__(self,
domain_name: str,
task_name: str,
visualize_reward: bool = True,
from_pixels: bool = False,
height: int = 84,
width: int = 84,
camera_id: int = 0,
control_timestep: Optional[float] = None):
"""A Gym env that wraps a ``dm_control`` environment.
Args:
domain_name: the domain name corresponds to the physical robot
task_name: a specific task under a domain, which corresponds to a
particular MDP structure
visualize_reward: if True, then the rendered frame will have
a highlighted color when the agent achieves a reward.
from_pixels: if True, the observation will be raw pixels; otherwise
use the interval state vector as the observation.
height: image observation height
width: image observation width
camera_id: which camera to render; a MuJoCo xml file can define
multiple cameras with different views
control_timestep: the time duration between two agent actions. If
this is greater than the agent's primitive physics timestep, then
multiple physics simulation steps might be performed between two
actions. If None, the default control timstep defined by DM control
suite will be used.
"""
self.metadata.update({'render.modes': ["rgb_array"]})
self._from_pixels = from_pixels
self._height = height
self._width = width
self._camera_id = camera_id
if control_timestep is not None:
environment_kwargs = {"control_timestep": control_timestep}
else:
environment_kwargs = None
# create task
self._env_fn = partial(
suite.load,
domain_name=domain_name,
task_name=task_name,
task_kwargs={"time_limit": float('inf')},
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
self._env = self._env_fn()
self._action_space = _dmc_spec_to_box([self._env.action_spec()])
# create observation space
if from_pixels:
shape = [3, height, width]
self._observation_space = spaces.Box(
low=0, high=255, shape=shape, dtype=np.uint8)
else:
self._observation_space = _dmc_spec_to_box(
self._env.observation_spec().values())
def __getattr__(self, name):
return getattr(self._env, name)
def _get_obs(self, time_step):
if self._from_pixels:
# this returns channels_last images
obs = self.render(
height=self._height,
width=self._width,
camera_id=self._camera_id)
obs = obs.transpose(2, 0, 1).copy()
else:
obs = _flatten_obs(time_step.observation)
return obs
@property
def observation_space(self):
return self._observation_space
@property
def action_space(self):
return self._action_space
def seed(self, seed):
self._action_space.seed(seed)
self._observation_space.seed(seed)
# Because dm_control seems not to provide an API for
# seeding after an env is created, here we need to re-create
# an env again.
self._env = self._env_fn(task_kwargs={
'random': seed,
"time_limit": float('inf')
})
def step(self, action):
assert self._action_space.contains(action)
time_step = self._env.step(action)
reward = time_step.reward or 0
obs = self._get_obs(time_step)
return obs, reward, False, {}
def reset(self):
time_step = self._env.reset()
obs = self._get_obs(time_step)
return obs
def render(self, mode='rgb_array', height=None, width=None, camera_id=0):
"""Render an RGB image.
Copied from https://github.com/denisyarats/dmc2gym
"""
assert mode == 'rgb_array', (
'only support rgb_array mode, given %s' % mode)
height = height or self._height
width = width or self._width
camera_id = camera_id or self._camera_id
return self._env.physics.render(
height=height, width=width, camera_id=camera_id)
| [
"numpy.isscalar",
"numpy.zeros",
"numpy.ones",
"numpy.prod",
"numpy.array",
"gym.spaces.Box",
"numpy.concatenate"
] | [((1739, 1767), 'numpy.concatenate', 'np.concatenate', (['mins'], {'axis': '(0)'}), '(mins, axis=0)\n', (1753, 1767), True, 'import numpy as np\n'), ((1779, 1807), 'numpy.concatenate', 'np.concatenate', (['maxs'], {'axis': '(0)'}), '(maxs, axis=0)\n', (1793, 1807), True, 'import numpy as np\n'), ((1854, 1893), 'gym.spaces.Box', 'spaces.Box', (['low', 'high'], {'dtype': 'np.float32'}), '(low, high, dtype=np.float32)\n', (1864, 1893), False, 'from gym import spaces\n'), ((2169, 2203), 'numpy.concatenate', 'np.concatenate', (['obs_pieces'], {'axis': '(0)'}), '(obs_pieces, axis=0)\n', (2183, 2203), True, 'import numpy as np\n'), ((1289, 1305), 'numpy.prod', 'np.prod', (['s.shape'], {}), '(s.shape)\n', (1296, 1305), True, 'import numpy as np\n'), ((2096, 2110), 'numpy.isscalar', 'np.isscalar', (['v'], {}), '(v)\n', (2107, 2110), True, 'import numpy as np\n'), ((2079, 2092), 'numpy.array', 'np.array', (['[v]'], {}), '([v])\n', (2087, 2092), True, 'import numpy as np\n'), ((4651, 4707), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': 'shape', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=shape, dtype=np.uint8)\n', (4661, 4707), False, 'from gym import spaces\n'), ((1378, 1408), 'numpy.ones', 'np.ones', (['dim'], {'dtype': 'np.float32'}), '(dim, dtype=np.float32)\n', (1385, 1408), True, 'import numpy as np\n'), ((1513, 1544), 'numpy.zeros', 'np.zeros', (['dim'], {'dtype': 'np.float32'}), '(dim, dtype=np.float32)\n', (1521, 1544), True, 'import numpy as np\n')] |
"""
QE by <NAME> and <NAME>.
Illustrates preimages of functions
"""
import matplotlib.pyplot as plt
import numpy as np
def f(x):
return 0.6 * np.cos(4 * x) + 1.4
xmin, xmax = -1, 1
x = np.linspace(xmin, xmax, 160)
y = f(x)
ya, yb = np.min(y), np.max(y)
fig, axes = plt.subplots(2, 1, figsize=(8, 8))
for ax in axes:
# Set the axes through the origin
for spine in ['left', 'bottom']:
ax.spines[spine].set_position('zero')
for spine in ['right', 'top']:
ax.spines[spine].set_color('none')
ax.set_ylim(-0.6, 3.2)
ax.set_xlim(xmin, xmax)
ax.set_yticks(())
ax.set_xticks(())
ax.plot(x, y, 'k-', lw=2, label=r'$f$')
ax.fill_between(x, ya, yb, facecolor='blue', alpha=0.05)
ax.vlines([0], ya, yb, lw=3, color='blue', label=r'range of $f$')
ax.text(0.04, -0.3, '$0$', fontsize=16)
ax = axes[0]
ax.legend(loc='upper right', frameon=False)
ybar = 1.5
ax.plot(x, x * 0 + ybar, 'k--', alpha=0.5)
ax.text(0.05, 0.8 * ybar, r'$y$', fontsize=16)
for i, z in enumerate((-0.35, 0.35)):
ax.vlines(z, 0, f(z), linestyle='--', alpha=0.5)
ax.text(z, -0.2, r'$x_{}$'.format(i), fontsize=16)
ax = axes[1]
ybar = 2.6
ax.plot(x, x * 0 + ybar, 'k--', alpha=0.5)
ax.text(0.04, 0.91 * ybar, r'$y$', fontsize=16)
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.min",
"numpy.max",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.subplots"
] | [((192, 220), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(160)'], {}), '(xmin, xmax, 160)\n', (203, 220), True, 'import numpy as np\n'), ((273, 307), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(8, 8)'}), '(2, 1, figsize=(8, 8))\n', (285, 307), True, 'import matplotlib.pyplot as plt\n'), ((1265, 1275), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1273, 1275), True, 'import matplotlib.pyplot as plt\n'), ((239, 248), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (245, 248), True, 'import numpy as np\n'), ((250, 259), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (256, 259), True, 'import numpy as np\n'), ((147, 160), 'numpy.cos', 'np.cos', (['(4 * x)'], {}), '(4 * x)\n', (153, 160), True, 'import numpy as np\n')] |
# Authors: <NAME> <<EMAIL>>
# License: MIT
import mne
from mne.externals.pymatreader import read_mat
import numpy as np
from pathlib import Path
from .utils import get_epochs_to_trials
def get_montage_lemon(subject, root_path, montage_rel_path="EEG_MPILMBB_LEMON/EEG_Localizer_BIDS_ID/",
parse_pattern_montage="sub-{subject}/sub-{subject}.mat"):
path_montage = Path(root_path) / montage_rel_path
file_name = path_montage / parse_pattern_montage.format(subject=subject)
if file_name.exists():
montage_mat_file = read_mat(str(file_name))
head_points = {}
for ch_name in np.unique(montage_mat_file["HeadPoints"]["Label"]):
inds = np.where(np.array(montage_mat_file["HeadPoints"]["Label"]) == ch_name)[0]
head_points[ch_name] = montage_mat_file["HeadPoints"]["Loc"][:, inds].mean(1)
ch_names = [ch_name.split("_")[-1] for ch_name in montage_mat_file["Channel"]["Name"]]
ch_names = [ch_name if ch_name[:2] != "FP" else "Fp" + ch_name[2:] for ch_name in ch_names]
ch_pos = dict(zip(ch_names, montage_mat_file["Channel"]["Loc"]))
return mne.channels.make_dig_montage(ch_pos=ch_pos,
nasion=head_points["NA"],
lpa=head_points["LPA"],
rpa=head_points["RPA"])
return mne.channels.make_standard_montage('standard_1020')
def get_events_lemon(raw, event_ids=None):
if event_ids is None:
event_ids = {"EO": 1, "EC": 2}
annot_sample = []
annot_id = []
freq = raw.info["sfreq"]
annot_map = {'2': "EO", '3': 'EO', '4': 'EC'}
for a in raw.annotations:
if a["description"] in annot_map:
annot_sample.append(int(a["onset"] * freq))
annot_id.append(event_ids[annot_map[a["description"]]])
return np.array([annot_sample, [0] * len(annot_sample), annot_id], dtype=int).T
def get_epochs_lemon(subject, root_path, event_id, tmin=0, tmax=2 - 1/250, baseline=None,
eeg_rel_path="EEG_MPILMBB_LEMON/EEG_Preprocessed_BIDS_ID/EEG_Preprocessed"):
montage = get_montage_lemon(subject, root_path)
raw = mne.io.read_raw_eeglab(str(Path(root_path) / eeg_rel_path / f"sub-{subject}_{event_id}.set"), verbose=False)
raw.set_montage(montage, on_missing="warn", verbose=False)
events = get_events_lemon(raw)
return mne.Epochs(raw, events, tmin=tmin, tmax=tmax, baseline=baseline, event_repeated="drop", verbose=False)
def get_trials_lemon(subject, root_path, event_ids=("EO", "EC"), use_csd=False):
epochs_lst = []
for event_id in event_ids:
epochs = get_epochs_lemon(subject, root_path, event_id)
epochs.load_data()
epochs = epochs.pick("eeg")
assert(epochs.info["sfreq"] == 250)
assert(len(epochs.times) == 500)
epochs_lst.append(epochs)
return get_epochs_to_trials(epochs_lst, subject, use_csd=use_csd)
| [
"mne.channels.make_standard_montage",
"mne.channels.make_dig_montage",
"mne.Epochs",
"pathlib.Path",
"numpy.array",
"numpy.unique"
] | [((1418, 1469), 'mne.channels.make_standard_montage', 'mne.channels.make_standard_montage', (['"""standard_1020"""'], {}), "('standard_1020')\n", (1452, 1469), False, 'import mne\n'), ((2454, 2560), 'mne.Epochs', 'mne.Epochs', (['raw', 'events'], {'tmin': 'tmin', 'tmax': 'tmax', 'baseline': 'baseline', 'event_repeated': '"""drop"""', 'verbose': '(False)'}), "(raw, events, tmin=tmin, tmax=tmax, baseline=baseline,\n event_repeated='drop', verbose=False)\n", (2464, 2560), False, 'import mne\n'), ((391, 406), 'pathlib.Path', 'Path', (['root_path'], {}), '(root_path)\n', (395, 406), False, 'from pathlib import Path\n'), ((631, 681), 'numpy.unique', 'np.unique', (["montage_mat_file['HeadPoints']['Label']"], {}), "(montage_mat_file['HeadPoints']['Label'])\n", (640, 681), True, 'import numpy as np\n'), ((1152, 1275), 'mne.channels.make_dig_montage', 'mne.channels.make_dig_montage', ([], {'ch_pos': 'ch_pos', 'nasion': "head_points['NA']", 'lpa': "head_points['LPA']", 'rpa': "head_points['RPA']"}), "(ch_pos=ch_pos, nasion=head_points['NA'], lpa=\n head_points['LPA'], rpa=head_points['RPA'])\n", (1181, 1275), False, 'import mne\n'), ((2262, 2277), 'pathlib.Path', 'Path', (['root_path'], {}), '(root_path)\n', (2266, 2277), False, 'from pathlib import Path\n'), ((711, 760), 'numpy.array', 'np.array', (["montage_mat_file['HeadPoints']['Label']"], {}), "(montage_mat_file['HeadPoints']['Label'])\n", (719, 760), True, 'import numpy as np\n')] |
"""
CSCC11 - Introduction to Machine Learning, Winter 2020, Assignment 3
<NAME>, <NAME>, <NAME>
This file visualizes the document dataset by reducing the dimensionality with PCA
"""
import matplotlib
import _pickle as pickle
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from pca import PCA
def main(dataset):
documents = dataset['data'].astype(np.float)
# NOTE: MATLAB is really fast for this compared to numpy!
pca = PCA(documents)
low_dim_data = pca.reduce_dimensionality(documents, 3)
classes = np.unique(dataset['labels'])
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
for class_i in classes:
class_i_data = low_dim_data[dataset['labels'].flatten() == class_i]
ax.scatter(class_i_data[:, 0],
class_i_data[:, 1],
class_i_data[:, 2],
s=1)
plt.show()
if __name__ == "__main__":
dataset = pickle.load(open("data/BBC_data.pkl", "rb"))
main(dataset)
| [
"matplotlib.pyplot.figure",
"pca.PCA",
"numpy.unique",
"matplotlib.pyplot.show"
] | [((482, 496), 'pca.PCA', 'PCA', (['documents'], {}), '(documents)\n', (485, 496), False, 'from pca import PCA\n'), ((571, 599), 'numpy.unique', 'np.unique', (["dataset['labels']"], {}), "(dataset['labels'])\n", (580, 599), True, 'import numpy as np\n'), ((611, 623), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (621, 623), True, 'import matplotlib.pyplot as plt\n'), ((921, 931), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (929, 931), True, 'import matplotlib.pyplot as plt\n')] |
import copy
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sparse_ho.utils_cross_entropy import (
cross_entropy, grad_cross_entropy, accuracy)
class LogisticMulticlass():
"""Multiclass logistic loss.
Parameters
----------
idx_train: ndarray
indices of the training set
idx_val: ndarray
indices of the validation set
algo: instance of ``sparse_ho.base.AlgoModel``
A model that follows the sparse_ho API.
idx_test: ndarray
indices of the test set
Attributes
----------
dict_models: dict
dict with the models corresponding to each class.
"""
def __init__(self, idx_train, idx_val, algo, idx_test=None):
self.idx_train = idx_train
self.idx_val = idx_val
# passing test is dirty but we need it for the multiclass logreg
self.idx_test = idx_test
# passing algo is dirty but we need it for the multiclass logreg
self.algo = algo
self.dict_models = None
def _initialize(self, model, X, y):
enc = OneHotEncoder(sparse=False) # maybe remove the sparse=False
# split data set in test validation and train
self.one_hot_code = enc.fit_transform(pd.DataFrame(y))
self.n_classes = self.one_hot_code.shape[1]
# dict with all the one vs all models
self.dict_models = {}
for k in range(self.n_classes):
self.dict_models[k] = copy.deepcopy(model)
self.dict_warm_start = {}
self.n_samples, self.n_features = X.shape
def get_val_grad(
self, model, X, y, log_alpha, compute_beta_grad, monitor,
tol=1e-3):
"""Get value and gradient of criterion.
Parameters
----------
model: instance of ``sparse_ho.base.BaseModel``
A model that follows the sparse_ho API.
X: array-like, shape (n_samples, n_features)
Design matrix.
y: ndarray, shape (n_samples,)
Observation vector.
log_alpha: float or np.array
Logarithm of hyperparameter.
compute_beta_grad: callable
Returns the regression coefficients beta and the hypergradient.
monitor: instance of Monitor.
Monitor.
tol: float, optional (default=1e-3)
Tolerance for the inner problem.
"""
# TODO use sparse matrices
if self.dict_models is None:
self._initialize(model, X, y)
all_betas = np.zeros((self.n_features, self.n_classes))
all_jacs = np.zeros((self.n_features, self.n_classes))
for k in range(self.n_classes):
mask0, dense0, jac0 = self.dict_warm_start.get(
k, (None, None, None))
mask, dense, jac = self.algo.get_beta_jac(
X[self.idx_train, :], self.one_hot_code[self.idx_train, k],
log_alpha[k], self.dict_models[k], None, mask0=mask0,
dense0=dense0,
quantity_to_warm_start=jac0, tol=tol)
self.dict_warm_start[k] = (mask, dense, jac)
all_betas[mask, k] = dense # maybe use np.ix_
all_jacs[mask, k] = jac # maybe use np.ix_
acc_val = accuracy(
all_betas, X[self.idx_val, :], self.one_hot_code[self.idx_val, :])
val = cross_entropy(
all_betas, X[self.idx_val, :], self.one_hot_code[self.idx_val, :])
grad = self.grad_total_loss(
all_betas, all_jacs, X[self.idx_val, :],
self.one_hot_code[self.idx_val, :])
if self.idx_test is not None:
acc_test = accuracy(
all_betas, X[self.idx_test, :], self.one_hot_code[
self.idx_test, :])
print(
"Value outer %f || Acc. validation %f || Acc. test %f" %
(val, acc_val, acc_test))
else:
acc_test = None
print("Value outer %f || Acc. validation %f" %
(val, acc_val))
monitor(
val, alpha=np.exp(log_alpha), grad=grad.copy(), acc_val=acc_val,
acc_test=acc_test)
self.all_betas = all_betas
return val, grad
def get_val(
self, model, X, y, log_alpha, compute_beta_grad, monitor,
tol=1e-3):
# TODO not the same as for other losses?
"""Get value of criterion.
Parameters
----------
model: instance of ``sparse_ho.base.BaseModel``
A model that follows the sparse_ho API.
X: array-like, shape (n_samples, n_features)
Design matrix.
y: ndarray, shape (n_samples,)
Observation vector.
log_alpha: float or np.array
Logarithm of hyperparameter.
compute_beta_grad: callable
Returns the regression coefficients beta and the hypergradient.
monitor: instance of Monitor.
Monitor.
tol: float, optional (default=1e-3)
Tolerance for the inner problem.
"""
if self.dict_models is None:
self._initialize(model, X, y)
all_betas = np.zeros((self.n_features, self.n_classes))
for k in range(self.n_classes):
mask0, dense0, jac0 = self.dict_warm_start.get(
k, (None, None, None))
mask, dense, jac = self.algo.get_beta_jac(
X[self.idx_train, :], self.one_hot_code[self.idx_train, k],
log_alpha[k], self.dict_models[k], None, mask0=mask0,
dense0=dense0,
quantity_to_warm_start=jac0, tol=tol)
self.dict_warm_start[k] = (mask, dense, jac)
all_betas[mask, k] = dense # maybe use np.ix_
acc_val = accuracy(
all_betas, X[self.idx_val, :], self.one_hot_code[self.idx_val, :])
acc_test = accuracy(
all_betas, X[self.idx_test, :],
self.one_hot_code[self.idx_test, :])
val = cross_entropy(
all_betas, X[self.idx_val, :], self.one_hot_code[self.idx_val, :])
monitor(
val, alpha=np.exp(log_alpha), grad=None, acc_val=acc_val,
acc_test=acc_test)
print("Value outer %f || Accuracy validation %f || Accuracy test %f" %
(val, acc_val, acc_test))
self.all_betas = all_betas
return val
def proj_hyperparam(self, model, X, y, log_alpha):
"""Project hyperparameter on admissible range of values
Parameters
----------
model: instance of ``sparse_ho.base.BaseModel``
A model that follows the sparse_ho API.
X: array-like, shape (n_samples, n_features)
Design matrix.
y: ndarray, shape (n_samples,)
Observation vector.
log_alpha: float or np.array
Logarithm of hyperparameter.
"""
# TODO doesn't an other object do this?
# TODO model not needed I think
log_alpha_max = model.compute_alpha_max(X, y)
log_alpha[log_alpha < log_alpha_max - 7] = log_alpha_max - 7
log_alpha[log_alpha > log_alpha_max - np.log(0.9)] = (
log_alpha_max - np.log(0.9))
return log_alpha
def grad_total_loss(self, all_betas, all_jacs, X, Y):
"""Compute the gradient of the multiclass logistic loss.
Parameters
----------
all_betas: array-like, shape (n_features, n_classes)
Solutions of the optimization problems corresponding to each class.
all_jacs: array-like, shape (n_features, n_classes)
Jacobians of the optimization problems corresponding to each class.
X: array-like, shape (n_samples, n_features)
Design matrix.
Y: ndarray, shape (n_samples, n_classes)
One hot encoding representation of the observation y.
"""
grad_ce = grad_cross_entropy(all_betas, X, Y)
grad_total = (grad_ce * all_jacs).sum(axis=0)
return grad_total
# def grad_k_loss(self, all_betas, jack, X, Y, k):
# grad_ce = grad_cross_entropyk(all_betas, X, Y, k)
# grad_k = grad_ce @ jack
# return grad_k
| [
"pandas.DataFrame",
"copy.deepcopy",
"numpy.log",
"numpy.zeros",
"sklearn.preprocessing.OneHotEncoder",
"numpy.exp",
"sparse_ho.utils_cross_entropy.accuracy",
"sparse_ho.utils_cross_entropy.cross_entropy",
"sparse_ho.utils_cross_entropy.grad_cross_entropy"
] | [((1099, 1126), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (1112, 1126), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((2532, 2575), 'numpy.zeros', 'np.zeros', (['(self.n_features, self.n_classes)'], {}), '((self.n_features, self.n_classes))\n', (2540, 2575), True, 'import numpy as np\n'), ((2595, 2638), 'numpy.zeros', 'np.zeros', (['(self.n_features, self.n_classes)'], {}), '((self.n_features, self.n_classes))\n', (2603, 2638), True, 'import numpy as np\n'), ((3254, 3329), 'sparse_ho.utils_cross_entropy.accuracy', 'accuracy', (['all_betas', 'X[self.idx_val, :]', 'self.one_hot_code[self.idx_val, :]'], {}), '(all_betas, X[self.idx_val, :], self.one_hot_code[self.idx_val, :])\n', (3262, 3329), False, 'from sparse_ho.utils_cross_entropy import cross_entropy, grad_cross_entropy, accuracy\n'), ((3357, 3442), 'sparse_ho.utils_cross_entropy.cross_entropy', 'cross_entropy', (['all_betas', 'X[self.idx_val, :]', 'self.one_hot_code[self.idx_val, :]'], {}), '(all_betas, X[self.idx_val, :], self.one_hot_code[self.idx_val, :]\n )\n', (3370, 3442), False, 'from sparse_ho.utils_cross_entropy import cross_entropy, grad_cross_entropy, accuracy\n'), ((5165, 5208), 'numpy.zeros', 'np.zeros', (['(self.n_features, self.n_classes)'], {}), '((self.n_features, self.n_classes))\n', (5173, 5208), True, 'import numpy as np\n'), ((5768, 5843), 'sparse_ho.utils_cross_entropy.accuracy', 'accuracy', (['all_betas', 'X[self.idx_val, :]', 'self.one_hot_code[self.idx_val, :]'], {}), '(all_betas, X[self.idx_val, :], self.one_hot_code[self.idx_val, :])\n', (5776, 5843), False, 'from sparse_ho.utils_cross_entropy import cross_entropy, grad_cross_entropy, accuracy\n'), ((5876, 5953), 'sparse_ho.utils_cross_entropy.accuracy', 'accuracy', (['all_betas', 'X[self.idx_test, :]', 'self.one_hot_code[self.idx_test, :]'], {}), '(all_betas, X[self.idx_test, :], self.one_hot_code[self.idx_test, :])\n', (5884, 5953), False, 'from sparse_ho.utils_cross_entropy import cross_entropy, grad_cross_entropy, accuracy\n'), ((5993, 6078), 'sparse_ho.utils_cross_entropy.cross_entropy', 'cross_entropy', (['all_betas', 'X[self.idx_val, :]', 'self.one_hot_code[self.idx_val, :]'], {}), '(all_betas, X[self.idx_val, :], self.one_hot_code[self.idx_val, :]\n )\n', (6006, 6078), False, 'from sparse_ho.utils_cross_entropy import cross_entropy, grad_cross_entropy, accuracy\n'), ((7895, 7930), 'sparse_ho.utils_cross_entropy.grad_cross_entropy', 'grad_cross_entropy', (['all_betas', 'X', 'Y'], {}), '(all_betas, X, Y)\n', (7913, 7930), False, 'from sparse_ho.utils_cross_entropy import cross_entropy, grad_cross_entropy, accuracy\n'), ((1260, 1275), 'pandas.DataFrame', 'pd.DataFrame', (['y'], {}), '(y)\n', (1272, 1275), True, 'import pandas as pd\n'), ((1481, 1501), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (1494, 1501), False, 'import copy\n'), ((3651, 3728), 'sparse_ho.utils_cross_entropy.accuracy', 'accuracy', (['all_betas', 'X[self.idx_test, :]', 'self.one_hot_code[self.idx_test, :]'], {}), '(all_betas, X[self.idx_test, :], self.one_hot_code[self.idx_test, :])\n', (3659, 3728), False, 'from sparse_ho.utils_cross_entropy import cross_entropy, grad_cross_entropy, accuracy\n'), ((7188, 7199), 'numpy.log', 'np.log', (['(0.9)'], {}), '(0.9)\n', (7194, 7199), True, 'import numpy as np\n'), ((4077, 4094), 'numpy.exp', 'np.exp', (['log_alpha'], {}), '(log_alpha)\n', (4083, 4094), True, 'import numpy as np\n'), ((6127, 6144), 'numpy.exp', 'np.exp', (['log_alpha'], {}), '(log_alpha)\n', (6133, 6144), True, 'import numpy as np\n'), ((7143, 7154), 'numpy.log', 'np.log', (['(0.9)'], {}), '(0.9)\n', (7149, 7154), True, 'import numpy as np\n')] |
from mmdet.core import anchor, build_anchor_generator,build_assigner
import mmdet
import mmcv
import numpy as np
import time
import cv2 as cv
import torch
def show_anchor(input_shape_hw, stride, anchor_generator_cfg, random_n, select_n):
img = np.zeros(input_shape_hw, np.uint8)
feature_map = []
for s in stride:
feature_map.append([input_shape_hw[0] // s, input_shape_hw[1] // s])
anchor_generator = build_anchor_generator(anchor_generator_cfg)
anchors = anchor_generator.grid_anchors(feature_map) # 输出原图尺度上anchor坐标 xyxy格式 左上角格式
base_anchors = anchor_generator.base_anchors
assigner=dict(type='ATSSAssigner', topk=9)
assigner = build_assigner(assigner)
#print(anchors[0].shape,anchors[1].shape)
nums_per_level = [len(each) for each in anchors]
#for each in anchors:
# nums_per_level.append(len(each))
anchors = torch.cat([each for each in anchors],dim=0)
gt_bboxes = torch.tensor([[100,100,300,300],[400,400,600,600]]).to(anchors.device)
gt_labels = torch.tensor([1,2]).to(anchors.device)
#print(anchors.device,gt_bboxes.device)
#print(nums_per_level)
assign_result = assigner.assign(anchors, nums_per_level, gt_bboxes, None, gt_labels)
print((assign_result.gt_inds!=0).nonzero().shape)
anchors = anchors[(assign_result.gt_inds!=0).nonzero().squeeze(1)]
print(anchors)
values,indices = anchors.min(-1)
anchors = anchors[(values>0).nonzero().squeeze(1)].cpu().numpy()
print(anchors)
img_ = mmcv.imshow_bboxes(img, anchors, thickness=1, show=False)
img_ = mmcv.imshow_bboxes(img_,gt_bboxes.cpu().numpy() , thickness=1, colors='red', show=False)
cv.imshow('img',img_)
if cv.waitKey(0) & 0xFF== ord('q'):
exit(0)
'''
for i,each in enumerate(base_anchors):
each[:,0:4:2] += input_shape_hw[0]//2
each[:,1:4:2] += input_shape_hw[1]//2
for _ in range(random_n):
disp_img = []
for i,anchor in enumerate(anchors):
img = np.zeros(input_shape_hw, np.uint8)
anchor = anchor.cpu().numpy()
print(anchor.shape)
index = (anchor[:, 0] > 0) & (anchor[:, 1] > 0) & (anchor[:, 2] < input_shape_hw[1]) & \
(anchor[:, 3] < input_shape_hw[0])
anchor = anchor[index]
anchor = np.random.permutation(anchor)
img_ = mmcv.imshow_bboxes(img, anchor[:select_n], thickness=1, show=False)
img_ = mmcv.imshow_bboxes(img_, base_anchors[i].cpu().numpy(), thickness=1, colors='red', show=False)
#disp_img.append(img_)
#time.sleep(0.3)
'''
def demo_atss(input_shape_hw):
stride = [8, 16, 32, 64, 128]
anchor_generator_cfg = dict(
type='AnchorGenerator',
octave_base_scale=8, # 每层特征图的base anchor scale,如果变大,则整体anchor都会放大
scales_per_octave=1, # 每层有3个尺度 2**0 2**(1/3) 2**(2/3)
ratios=[1.0], # 每层的anchor有3种长宽比 故每一层每个位置有9个anchor
strides=stride) # 每个特征图层输出stride,故anchor范围是4x8=32,4x128x2**(2/3)=812.7
random_n = 10
select_n = 100
show_anchor(input_shape_hw, stride, anchor_generator_cfg, random_n, select_n)
if __name__ == '__main__':
input_shape_hw = (640, 640, 3)
demo_atss(input_shape_hw)
#demo_yolov3(input_shape_hw) | [
"mmcv.imshow_bboxes",
"mmdet.core.build_anchor_generator",
"cv2.waitKey",
"mmdet.core.build_assigner",
"numpy.zeros",
"torch.cat",
"cv2.imshow",
"torch.tensor"
] | [((249, 283), 'numpy.zeros', 'np.zeros', (['input_shape_hw', 'np.uint8'], {}), '(input_shape_hw, np.uint8)\n', (257, 283), True, 'import numpy as np\n'), ((426, 470), 'mmdet.core.build_anchor_generator', 'build_anchor_generator', (['anchor_generator_cfg'], {}), '(anchor_generator_cfg)\n', (448, 470), False, 'from mmdet.core import anchor, build_anchor_generator, build_assigner\n'), ((671, 695), 'mmdet.core.build_assigner', 'build_assigner', (['assigner'], {}), '(assigner)\n', (685, 695), False, 'from mmdet.core import anchor, build_anchor_generator, build_assigner\n'), ((882, 926), 'torch.cat', 'torch.cat', (['[each for each in anchors]'], {'dim': '(0)'}), '([each for each in anchors], dim=0)\n', (891, 926), False, 'import torch\n'), ((1508, 1565), 'mmcv.imshow_bboxes', 'mmcv.imshow_bboxes', (['img', 'anchors'], {'thickness': '(1)', 'show': '(False)'}), '(img, anchors, thickness=1, show=False)\n', (1526, 1565), False, 'import mmcv\n'), ((1670, 1692), 'cv2.imshow', 'cv.imshow', (['"""img"""', 'img_'], {}), "('img', img_)\n", (1679, 1692), True, 'import cv2 as cv\n'), ((942, 1000), 'torch.tensor', 'torch.tensor', (['[[100, 100, 300, 300], [400, 400, 600, 600]]'], {}), '([[100, 100, 300, 300], [400, 400, 600, 600]])\n', (954, 1000), False, 'import torch\n'), ((1029, 1049), 'torch.tensor', 'torch.tensor', (['[1, 2]'], {}), '([1, 2])\n', (1041, 1049), False, 'import torch\n'), ((1699, 1712), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (1709, 1712), True, 'import cv2 as cv\n')] |
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import os
import pybullet as p
import pybullet_data
import math
import numpy as np
import random
from pybullet_object_models import ycb_objects
import time
from Load_Object_URDF import LoadObjectURDF
MAX_EPISODE_LEN = 20*100
class PandaEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
self.step_counter = 0
p.connect(p.GUI)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.resetDebugVisualizerCamera(cameraDistance=0.5, cameraYaw=90, cameraPitch=-14, cameraTargetPosition=[0.60, 0.0, 0.45])
self.action_space = spaces.Box(np.array([-1]*4), np.array([1]*4))
self.observation_space = spaces.Box(np.array([-1]*5), np.array([1]*5))
self.maxFingerForce = 20.0
self.object=None
self.storage_folder=os.path.join(os.path.abspath(os.path.dirname(os.getcwd())), "3d_object_reconstruction", "Data")
self.record_end = False
def step(self, action):
p.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING)
jointPoses = p.calculateInverseKinematics(self.pandaUid, 11, action[1], action[2], maxNumIterations=200,
residualThreshold=1e-5)[0:7]
p.setJointMotorControlArray(self.pandaUid, self.joints, p.POSITION_CONTROL,
list(jointPoses))
p.setJointMotorControlArray(self.pandaUid, [9, 10], p.POSITION_CONTROL, action[3])
p.stepSimulation()
state_robot = p.getLinkState(self.pandaUid, 11)[0:2]
state_fingers = (p.getJointState(self.pandaUid,9)[0], p.getJointState(self.pandaUid, 10)[0])
if action[0] == 'rotate':
time.sleep(1 / 240.)
self.step_counter += 1
if self.step_counter > MAX_EPISODE_LEN:
reward = 0
done = True
else:
reward = 0
done = False
self.observation = state_robot[0] + state_fingers
return np.array(self.observation).astype(np.float32), reward, done
def stick_simulation(self):
state_robot = p.getLinkState(self.pandaUid, 11)[0:2]
jointPoses = p.calculateInverseKinematics(self.pandaUid, 11, state_robot[0], state_robot[1], maxNumIterations=200,
residualThreshold=1e-5)[0:7]
inverse_tip_pos, inverse_tip_ori = p.invertTransform(self.init_tip_pose, self.init_tip_ori)
transform = p.multiplyTransforms(state_robot[0], state_robot[1],
inverse_tip_pos, inverse_tip_ori)
new = p.multiplyTransforms(transform[0], transform[1], self.init_obj_pose, self.init_obj_ori)
newPos = new[0]
newOri = new[1]
for i in range(5):
p.resetBasePositionAndOrientation(self.objectUid, newPos, newOri)
p.setJointMotorControlArray(self.pandaUid, self.joints, p.POSITION_CONTROL,
list(jointPoses))
p.stepSimulation()
def activate(self):
while self.step_counter < MAX_EPISODE_LEN:
p.setJointMotorControlArray(self.pandaUid, [9, 10], p.POSITION_CONTROL, [0, 0])
p.stepSimulation()
contacts = p.getContactPoints(self.pandaUid, self.objectUid)
if len(contacts)>=10:
break
self.step_counter += 1
state_fingers = (p.getJointState(self.pandaUid,9)[0], p.getJointState(self.pandaUid, 10)[0])
return state_fingers
def reset(self):
self.step_counter = 0
p.resetSimulation()
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,0) # we will enable rendering after we loaded everything
urdfRootPath=pybullet_data.getDataPath()
p.setGravity(0,0,-10)
p.setTimeStep(1/240.)
planeUid = p.loadURDF(os.path.join(urdfRootPath,"plane.urdf"), basePosition=[0,0,-0.65])
rest_poses = [0,-0.215,0,-2.57,0,2.356,2.356,0.08,0.08]
self.pandaUid = p.loadURDF(os.path.join(urdfRootPath, "franka_panda/panda.urdf"),useFixedBase=True)
num_joints = p.getNumJoints(self.pandaUid)
joints = [p.getJointInfo(self.pandaUid, i) for i in range(num_joints)]
self.joints = [j[0] for j in joints if j[2] == p.JOINT_REVOLUTE]
for i in range(7):
p.resetJointState(self.pandaUid,i, rest_poses[i])
p.resetJointState(self.pandaUid, 9, 0.08)
p.resetJointState(self.pandaUid,10, 0.08)
currentPosition = p.getLinkState(self.pandaUid, 11)[0]
jointPoses = p.calculateInverseKinematics(self.pandaUid, 11,
[currentPosition[0],currentPosition[1]+0.02,currentPosition[2]],
p.getLinkState(self.pandaUid, 11)[1])[0:7]
p.setJointMotorControlArray(self.pandaUid, list(range(7)) + [9, 10], p.POSITION_CONTROL,
list(jointPoses) + 2 * [0.08])
tableUid = p.loadURDF(os.path.join(urdfRootPath, "table/table.urdf"),basePosition=[0.5,0,-0.65])
trayUid = p.loadURDF(os.path.join(urdfRootPath, "tray/traybox.urdf"),basePosition=[0.5,0,0])
self.objectUid = LoadObjectURDF(self.object)
p.stepSimulation()
state_fingers = (p.getJointState(self.pandaUid,9)[0], p.getJointState(self.pandaUid, 10)[0])
state_robot = p.getLinkState(self.pandaUid, 11)[0]
self.observation = state_robot + state_fingers
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,1)
return np.array(self.observation).astype(np.float32)
def is_static(self):
v = np.linalg.norm(p.getBaseVelocity(self.objectUid)[0])
return v<1e-4
def render(self, mode='human'):
view_matrix = p.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=[0.5, 0, 0.15],
distance=.7,
yaw=0,
pitch=-20,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(fov=60,
aspect=float(960) /720,
nearVal=0.1,
farVal=100.0)
(_, _, px, pd, _) = p.getCameraImage(width=960,
height=720,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (720,960, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array
def storage(self):
# result=p.getDebugVisualizerCamera(self)
width, height, viewMat, projMat, cameraUp, camForward, horizon, vertical, _, _, dist, camTarget =p.getDebugVisualizerCamera(self)
view_matrix = p.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=[0.60, 0.0, 0.45],
distance=0.5,
yaw=90,
pitch=-14,
roll=0,
upAxisIndex=2)
far = 100.0
near = 0.1
proj_matrix = p.computeProjectionMatrixFOV(fov=60,
aspect=float(640) /480,
nearVal=near,
farVal=far)
(_, _, px, pd, pseg) = p.getCameraImage(width=640,
height=480,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (480,640, 4))
rgb_array = rgb_array[:, :, :3]
cad = rgb_array.copy()
cad[pd >= 1.0] = np.array([0, 0, 0], dtype=np.uint8)
pd[pd >= 1.0] = 0
m= pseg.max()
pmask = pseg.copy()
pseg[pseg == m] = 255
pseg[pseg != 255] = 0
pmask[pseg == 255] = 1
pmask[pseg != 255] = 0
pseg = np.array(pseg, dtype=np.uint8)
pmask = np.array(pmask, dtype=np.uint8)
m , mm = pd.max(), pd.min()
pd = pd * 65535
pd = pd.astype(np.uint16)
return rgb_array,cad,pd,pseg,pmask
def _get_state(self):
return self.observation
def close(self):
p.disconnect()
| [
"pybullet.resetSimulation",
"pybullet.calculateInverseKinematics",
"pybullet.computeViewMatrixFromYawPitchRoll",
"pybullet.resetDebugVisualizerCamera",
"pybullet.getBaseVelocity",
"pybullet.connect",
"os.path.join",
"pybullet.getLinkState",
"pybullet.getContactPoints",
"pybullet.setJointMotorContr... | [((436, 452), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (445, 452), True, 'import pybullet as p\n'), ((461, 508), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_GUI', '(0)'], {}), '(p.COV_ENABLE_GUI, 0)\n', (487, 508), True, 'import pybullet as p\n'), ((517, 640), 'pybullet.resetDebugVisualizerCamera', 'p.resetDebugVisualizerCamera', ([], {'cameraDistance': '(0.5)', 'cameraYaw': '(90)', 'cameraPitch': '(-14)', 'cameraTargetPosition': '[0.6, 0.0, 0.45]'}), '(cameraDistance=0.5, cameraYaw=90, cameraPitch=\n -14, cameraTargetPosition=[0.6, 0.0, 0.45])\n', (545, 640), True, 'import pybullet as p\n'), ((1043, 1105), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_SINGLE_STEP_RENDERING'], {}), '(p.COV_ENABLE_SINGLE_STEP_RENDERING)\n', (1069, 1105), True, 'import pybullet as p\n'), ((1446, 1532), 'pybullet.setJointMotorControlArray', 'p.setJointMotorControlArray', (['self.pandaUid', '[9, 10]', 'p.POSITION_CONTROL', 'action[3]'], {}), '(self.pandaUid, [9, 10], p.POSITION_CONTROL,\n action[3])\n', (1473, 1532), True, 'import pybullet as p\n'), ((1538, 1556), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (1554, 1556), True, 'import pybullet as p\n'), ((2450, 2506), 'pybullet.invertTransform', 'p.invertTransform', (['self.init_tip_pose', 'self.init_tip_ori'], {}), '(self.init_tip_pose, self.init_tip_ori)\n', (2467, 2506), True, 'import pybullet as p\n'), ((2527, 2617), 'pybullet.multiplyTransforms', 'p.multiplyTransforms', (['state_robot[0]', 'state_robot[1]', 'inverse_tip_pos', 'inverse_tip_ori'], {}), '(state_robot[0], state_robot[1], inverse_tip_pos,\n inverse_tip_ori)\n', (2547, 2617), True, 'import pybullet as p\n'), ((2669, 2761), 'pybullet.multiplyTransforms', 'p.multiplyTransforms', (['transform[0]', 'transform[1]', 'self.init_obj_pose', 'self.init_obj_ori'], {}), '(transform[0], transform[1], self.init_obj_pose, self.\n init_obj_ori)\n', (2689, 2761), True, 'import pybullet as p\n'), ((3641, 3660), 'pybullet.resetSimulation', 'p.resetSimulation', ([], {}), '()\n', (3658, 3660), True, 'import pybullet as p\n'), ((3669, 3722), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_RENDERING', '(0)'], {}), '(p.COV_ENABLE_RENDERING, 0)\n', (3695, 3722), True, 'import pybullet as p\n'), ((3797, 3824), 'pybullet_data.getDataPath', 'pybullet_data.getDataPath', ([], {}), '()\n', (3822, 3824), False, 'import pybullet_data\n'), ((3833, 3856), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-10)'], {}), '(0, 0, -10)\n', (3845, 3856), True, 'import pybullet as p\n'), ((3863, 3887), 'pybullet.setTimeStep', 'p.setTimeStep', (['(1 / 240.0)'], {}), '(1 / 240.0)\n', (3876, 3887), True, 'import pybullet as p\n'), ((4176, 4205), 'pybullet.getNumJoints', 'p.getNumJoints', (['self.pandaUid'], {}), '(self.pandaUid)\n', (4190, 4205), True, 'import pybullet as p\n'), ((4455, 4496), 'pybullet.resetJointState', 'p.resetJointState', (['self.pandaUid', '(9)', '(0.08)'], {}), '(self.pandaUid, 9, 0.08)\n', (4472, 4496), True, 'import pybullet as p\n'), ((4505, 4547), 'pybullet.resetJointState', 'p.resetJointState', (['self.pandaUid', '(10)', '(0.08)'], {}), '(self.pandaUid, 10, 0.08)\n', (4522, 4547), True, 'import pybullet as p\n'), ((5284, 5311), 'Load_Object_URDF.LoadObjectURDF', 'LoadObjectURDF', (['self.object'], {}), '(self.object)\n', (5298, 5311), False, 'from Load_Object_URDF import LoadObjectURDF\n'), ((5321, 5339), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (5337, 5339), True, 'import pybullet as p\n'), ((5565, 5618), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_RENDERING', '(1)'], {}), '(p.COV_ENABLE_RENDERING, 1)\n', (5591, 5618), True, 'import pybullet as p\n'), ((5851, 5982), 'pybullet.computeViewMatrixFromYawPitchRoll', 'p.computeViewMatrixFromYawPitchRoll', ([], {'cameraTargetPosition': '[0.5, 0, 0.15]', 'distance': '(0.7)', 'yaw': '(0)', 'pitch': '(-20)', 'roll': '(0)', 'upAxisIndex': '(2)'}), '(cameraTargetPosition=[0.5, 0, 0.15],\n distance=0.7, yaw=0, pitch=-20, roll=0, upAxisIndex=2)\n', (5886, 5982), True, 'import pybullet as p\n'), ((6566, 6701), 'pybullet.getCameraImage', 'p.getCameraImage', ([], {'width': '(960)', 'height': '(720)', 'viewMatrix': 'view_matrix', 'projectionMatrix': 'proj_matrix', 'renderer': 'p.ER_BULLET_HARDWARE_OPENGL'}), '(width=960, height=720, viewMatrix=view_matrix,\n projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL)\n', (6582, 6701), True, 'import pybullet as p\n'), ((6895, 6923), 'numpy.array', 'np.array', (['px'], {'dtype': 'np.uint8'}), '(px, dtype=np.uint8)\n', (6903, 6923), True, 'import numpy as np\n'), ((6944, 6980), 'numpy.reshape', 'np.reshape', (['rgb_array', '(720, 960, 4)'], {}), '(rgb_array, (720, 960, 4))\n', (6954, 6980), True, 'import numpy as np\n'), ((7225, 7257), 'pybullet.getDebugVisualizerCamera', 'p.getDebugVisualizerCamera', (['self'], {}), '(self)\n', (7251, 7257), True, 'import pybullet as p\n'), ((7281, 7415), 'pybullet.computeViewMatrixFromYawPitchRoll', 'p.computeViewMatrixFromYawPitchRoll', ([], {'cameraTargetPosition': '[0.6, 0.0, 0.45]', 'distance': '(0.5)', 'yaw': '(90)', 'pitch': '(-14)', 'roll': '(0)', 'upAxisIndex': '(2)'}), '(cameraTargetPosition=[0.6, 0.0, 0.45],\n distance=0.5, yaw=90, pitch=-14, roll=0, upAxisIndex=2)\n', (7316, 7415), True, 'import pybullet as p\n'), ((8041, 8176), 'pybullet.getCameraImage', 'p.getCameraImage', ([], {'width': '(640)', 'height': '(480)', 'viewMatrix': 'view_matrix', 'projectionMatrix': 'proj_matrix', 'renderer': 'p.ER_BULLET_HARDWARE_OPENGL'}), '(width=640, height=480, viewMatrix=view_matrix,\n projectionMatrix=proj_matrix, renderer=p.ER_BULLET_HARDWARE_OPENGL)\n', (8057, 8176), True, 'import pybullet as p\n'), ((8371, 8399), 'numpy.array', 'np.array', (['px'], {'dtype': 'np.uint8'}), '(px, dtype=np.uint8)\n', (8379, 8399), True, 'import numpy as np\n'), ((8420, 8456), 'numpy.reshape', 'np.reshape', (['rgb_array', '(480, 640, 4)'], {}), '(rgb_array, (480, 640, 4))\n', (8430, 8456), True, 'import numpy as np\n'), ((8552, 8587), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.uint8'}), '([0, 0, 0], dtype=np.uint8)\n', (8560, 8587), True, 'import numpy as np\n'), ((8802, 8832), 'numpy.array', 'np.array', (['pseg'], {'dtype': 'np.uint8'}), '(pseg, dtype=np.uint8)\n', (8810, 8832), True, 'import numpy as np\n'), ((8849, 8880), 'numpy.array', 'np.array', (['pmask'], {'dtype': 'np.uint8'}), '(pmask, dtype=np.uint8)\n', (8857, 8880), True, 'import numpy as np\n'), ((9109, 9123), 'pybullet.disconnect', 'p.disconnect', ([], {}), '()\n', (9121, 9123), True, 'import pybullet as p\n'), ((676, 694), 'numpy.array', 'np.array', (['([-1] * 4)'], {}), '([-1] * 4)\n', (684, 694), True, 'import numpy as np\n'), ((694, 711), 'numpy.array', 'np.array', (['([1] * 4)'], {}), '([1] * 4)\n', (702, 711), True, 'import numpy as np\n'), ((755, 773), 'numpy.array', 'np.array', (['([-1] * 5)'], {}), '([-1] * 5)\n', (763, 773), True, 'import numpy as np\n'), ((773, 790), 'numpy.array', 'np.array', (['([1] * 5)'], {}), '([1] * 5)\n', (781, 790), True, 'import numpy as np\n'), ((1128, 1248), 'pybullet.calculateInverseKinematics', 'p.calculateInverseKinematics', (['self.pandaUid', '(11)', 'action[1]', 'action[2]'], {'maxNumIterations': '(200)', 'residualThreshold': '(1e-05)'}), '(self.pandaUid, 11, action[1], action[2],\n maxNumIterations=200, residualThreshold=1e-05)\n', (1156, 1248), True, 'import pybullet as p\n'), ((1579, 1612), 'pybullet.getLinkState', 'p.getLinkState', (['self.pandaUid', '(11)'], {}), '(self.pandaUid, 11)\n', (1593, 1612), True, 'import pybullet as p\n'), ((1765, 1786), 'time.sleep', 'time.sleep', (['(1 / 240.0)'], {}), '(1 / 240.0)\n', (1775, 1786), False, 'import time\n'), ((2165, 2198), 'pybullet.getLinkState', 'p.getLinkState', (['self.pandaUid', '(11)'], {}), '(self.pandaUid, 11)\n', (2179, 2198), True, 'import pybullet as p\n'), ((2225, 2356), 'pybullet.calculateInverseKinematics', 'p.calculateInverseKinematics', (['self.pandaUid', '(11)', 'state_robot[0]', 'state_robot[1]'], {'maxNumIterations': '(200)', 'residualThreshold': '(1e-05)'}), '(self.pandaUid, 11, state_robot[0], state_robot\n [1], maxNumIterations=200, residualThreshold=1e-05)\n', (2253, 2356), True, 'import pybullet as p\n'), ((2845, 2910), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['self.objectUid', 'newPos', 'newOri'], {}), '(self.objectUid, newPos, newOri)\n', (2878, 2910), True, 'import pybullet as p\n'), ((3069, 3087), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (3085, 3087), True, 'import pybullet as p\n'), ((3176, 3255), 'pybullet.setJointMotorControlArray', 'p.setJointMotorControlArray', (['self.pandaUid', '[9, 10]', 'p.POSITION_CONTROL', '[0, 0]'], {}), '(self.pandaUid, [9, 10], p.POSITION_CONTROL, [0, 0])\n', (3203, 3255), True, 'import pybullet as p\n'), ((3268, 3286), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (3284, 3286), True, 'import pybullet as p\n'), ((3310, 3359), 'pybullet.getContactPoints', 'p.getContactPoints', (['self.pandaUid', 'self.objectUid'], {}), '(self.pandaUid, self.objectUid)\n', (3328, 3359), True, 'import pybullet as p\n'), ((3915, 3955), 'os.path.join', 'os.path.join', (['urdfRootPath', '"""plane.urdf"""'], {}), "(urdfRootPath, 'plane.urdf')\n", (3927, 3955), False, 'import os\n'), ((4082, 4135), 'os.path.join', 'os.path.join', (['urdfRootPath', '"""franka_panda/panda.urdf"""'], {}), "(urdfRootPath, 'franka_panda/panda.urdf')\n", (4094, 4135), False, 'import os\n'), ((4224, 4256), 'pybullet.getJointInfo', 'p.getJointInfo', (['self.pandaUid', 'i'], {}), '(self.pandaUid, i)\n', (4238, 4256), True, 'import pybullet as p\n'), ((4397, 4447), 'pybullet.resetJointState', 'p.resetJointState', (['self.pandaUid', 'i', 'rest_poses[i]'], {}), '(self.pandaUid, i, rest_poses[i])\n', (4414, 4447), True, 'import pybullet as p\n'), ((4573, 4606), 'pybullet.getLinkState', 'p.getLinkState', (['self.pandaUid', '(11)'], {}), '(self.pandaUid, 11)\n', (4587, 4606), True, 'import pybullet as p\n'), ((5083, 5129), 'os.path.join', 'os.path.join', (['urdfRootPath', '"""table/table.urdf"""'], {}), "(urdfRootPath, 'table/table.urdf')\n", (5095, 5129), False, 'import os\n'), ((5187, 5234), 'os.path.join', 'os.path.join', (['urdfRootPath', '"""tray/traybox.urdf"""'], {}), "(urdfRootPath, 'tray/traybox.urdf')\n", (5199, 5234), False, 'import os\n'), ((5464, 5497), 'pybullet.getLinkState', 'p.getLinkState', (['self.pandaUid', '(11)'], {}), '(self.pandaUid, 11)\n', (5478, 5497), True, 'import pybullet as p\n'), ((1643, 1676), 'pybullet.getJointState', 'p.getJointState', (['self.pandaUid', '(9)'], {}), '(self.pandaUid, 9)\n', (1658, 1676), True, 'import pybullet as p\n'), ((1680, 1714), 'pybullet.getJointState', 'p.getJointState', (['self.pandaUid', '(10)'], {}), '(self.pandaUid, 10)\n', (1695, 1714), True, 'import pybullet as p\n'), ((3476, 3509), 'pybullet.getJointState', 'p.getJointState', (['self.pandaUid', '(9)'], {}), '(self.pandaUid, 9)\n', (3491, 3509), True, 'import pybullet as p\n'), ((3513, 3547), 'pybullet.getJointState', 'p.getJointState', (['self.pandaUid', '(10)'], {}), '(self.pandaUid, 10)\n', (3528, 3547), True, 'import pybullet as p\n'), ((5366, 5399), 'pybullet.getJointState', 'p.getJointState', (['self.pandaUid', '(9)'], {}), '(self.pandaUid, 9)\n', (5381, 5399), True, 'import pybullet as p\n'), ((5403, 5437), 'pybullet.getJointState', 'p.getJointState', (['self.pandaUid', '(10)'], {}), '(self.pandaUid, 10)\n', (5418, 5437), True, 'import pybullet as p\n'), ((5633, 5659), 'numpy.array', 'np.array', (['self.observation'], {}), '(self.observation)\n', (5641, 5659), True, 'import numpy as np\n'), ((5732, 5765), 'pybullet.getBaseVelocity', 'p.getBaseVelocity', (['self.objectUid'], {}), '(self.objectUid)\n', (5749, 5765), True, 'import pybullet as p\n'), ((923, 934), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (932, 934), False, 'import os\n'), ((2050, 2076), 'numpy.array', 'np.array', (['self.observation'], {}), '(self.observation)\n', (2058, 2076), True, 'import numpy as np\n'), ((4844, 4877), 'pybullet.getLinkState', 'p.getLinkState', (['self.pandaUid', '(11)'], {}), '(self.pandaUid, 11)\n', (4858, 4877), True, 'import pybullet as p\n')] |
# http://francescopochetti.com/fast-neural-style-transfer-sagemaker-deployment/
import os, sys
import json
import numpy as np
import tarfile
import random
import torch
# import inspect
# currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# parentdir = os.path.dirname(currentdir)
# sys.path.insert(0,parentdir)
import data_feeder as df
from har_model import load_model
JSON_CONTENT_TYPE = 'application/json'
NPY_CONTENT_TYPE = 'application/x-npy'
SAVED_MODEL = 'har_model' ## leave off *.pth extension (DON'T name it model.pth !!!!!) # har_model_smooth
ENTRY_POINT = 'har_model.py'
# role = 'arn:aws:iam::821179856091:role/dsv_sage_exec'# revibe AWS acct
role = 'arn:aws:iam::257759225263:role/service-role/AmazonSageMaker-ExecutionRole-20180525T100991'# new AWS acct
def get_data(gpu_preproc=True):
data_path = '/home/david/data/revibe/boris/npy/har1'
data_path = '/home/david/data/revibe/boris/npy/test2'
npy_files = [os.path.join(data_path, f) for f in os.listdir(data_path) if f.endswith('.npy')]
# random.shuffle(npy_files)
seed = 1234
cfg = df.Config(seed=seed)
cfg.window_size = 32
cfg.test_step = 8
cfg.batch_size = 50
cfg.nz = 1
cfg.gpu_preproc = gpu_preproc
cfg.feats_raw = True
cfg.feats_fft = True
cfg.add_mag = True
cfg.labels = ['', '.fidget.', '.walk.', '.run.']
cfg.label_fxn = lambda s: np.nonzero([int(l in s) for l in cfg.labels])[0][-1]
sb = df.SigBatcher(npy_files, cfg, train=False)
for b in sb.batch_stream():
x = np.transpose(b.X, (0,2,1)).astype(np.float32)
y = b.y
break
return x,y,sb
def make_payload(x):
bs = x.shape[0]
data = (
x.flatten().tolist()
)
payload = {"bs": bs, "data": data}
return payload
def dump_payload(x, fn='har_payload.json'):
payload = make_payload(x)
with open(fn, 'w') as file:
json.dump(payload, file)
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
if __name__ == "__main__":
gpu_preproc = False
## get sample data...
x,_,sb = get_data(gpu_preproc=gpu_preproc)
dump_payload(x)
saved_model_file = '{}.pth'.format(SAVED_MODEL)
onnx_file = '{}.onnx'.format(SAVED_MODEL)
## load saved model...
model = load_model('.', saved_model_file, gpu_preproc=gpu_preproc)
device = torch.device('cpu')
model.to(device)
model.eval()
x_t = torch.from_numpy(x).float().to(device)
torch_out = model(x_t)
out = to_numpy(torch_out)
y = np.argmax(out, 1)
print(y)
# Export the model...
torch.onnx.export(model, # model being run
x_t, # model input (or a tuple for multiple inputs)
onnx_file, # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
do_constant_folding=True, # whether to execute constant folding for optimization
input_names = ['input'], # the model's input names
output_names = ['output'], # the model's output names
dynamic_axes={'input' : {0 : 'batch_size'}, # variable length axes
'output' : {0 : 'batch_size'}},
verbose=True,
# opset_version=10, # the ONNX version to export the model to
)
## load model and check...
import onnx
onnx_model = onnx.load(onnx_file)
# onnx.checker.check_model(onnx_model)
## run model and compare outputs...
import onnxruntime
ort_session = onnxruntime.InferenceSession(onnx_file)
ort_inputs = {ort_session.get_inputs()[0].name: x}
ort_outs = ort_session.run(None, ort_inputs)
ort_outs = np.array(ort_outs)
print(ort_outs.shape)
np.testing.assert_allclose(to_numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
print('passed!')
| [
"os.listdir",
"json.dump",
"os.path.join",
"numpy.argmax",
"numpy.transpose",
"onnxruntime.InferenceSession",
"numpy.array",
"data_feeder.SigBatcher",
"torch.device",
"onnx.load",
"torch.onnx.export",
"data_feeder.Config",
"har_model.load_model",
"torch.from_numpy"
] | [((1118, 1138), 'data_feeder.Config', 'df.Config', ([], {'seed': 'seed'}), '(seed=seed)\n', (1127, 1138), True, 'import data_feeder as df\n'), ((1492, 1534), 'data_feeder.SigBatcher', 'df.SigBatcher', (['npy_files', 'cfg'], {'train': '(False)'}), '(npy_files, cfg, train=False)\n', (1505, 1534), True, 'import data_feeder as df\n'), ((2395, 2453), 'har_model.load_model', 'load_model', (['"""."""', 'saved_model_file'], {'gpu_preproc': 'gpu_preproc'}), "('.', saved_model_file, gpu_preproc=gpu_preproc)\n", (2405, 2453), False, 'from har_model import load_model\n'), ((2467, 2486), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2479, 2486), False, 'import torch\n'), ((2649, 2666), 'numpy.argmax', 'np.argmax', (['out', '(1)'], {}), '(out, 1)\n', (2658, 2666), True, 'import numpy as np\n'), ((2715, 2952), 'torch.onnx.export', 'torch.onnx.export', (['model', 'x_t', 'onnx_file'], {'export_params': '(True)', 'do_constant_folding': '(True)', 'input_names': "['input']", 'output_names': "['output']", 'dynamic_axes': "{'input': {(0): 'batch_size'}, 'output': {(0): 'batch_size'}}", 'verbose': '(True)'}), "(model, x_t, onnx_file, export_params=True,\n do_constant_folding=True, input_names=['input'], output_names=['output'\n ], dynamic_axes={'input': {(0): 'batch_size'}, 'output': {(0):\n 'batch_size'}}, verbose=True)\n", (2732, 2952), False, 'import torch\n'), ((3733, 3753), 'onnx.load', 'onnx.load', (['onnx_file'], {}), '(onnx_file)\n', (3742, 3753), False, 'import onnx\n'), ((3883, 3922), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['onnx_file'], {}), '(onnx_file)\n', (3911, 3922), False, 'import onnxruntime\n'), ((4042, 4060), 'numpy.array', 'np.array', (['ort_outs'], {}), '(ort_outs)\n', (4050, 4060), True, 'import numpy as np\n'), ((974, 1000), 'os.path.join', 'os.path.join', (['data_path', 'f'], {}), '(data_path, f)\n', (986, 1000), False, 'import os, sys\n'), ((1959, 1983), 'json.dump', 'json.dump', (['payload', 'file'], {}), '(payload, file)\n', (1968, 1983), False, 'import json\n'), ((1010, 1031), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (1020, 1031), False, 'import os, sys\n'), ((1584, 1612), 'numpy.transpose', 'np.transpose', (['b.X', '(0, 2, 1)'], {}), '(b.X, (0, 2, 1))\n', (1596, 1612), True, 'import numpy as np\n'), ((2540, 2559), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (2556, 2559), False, 'import torch\n')] |
import os
import gzip
import pickle
import numpy as np
from .train import onto
SRC_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data/')
def get_embeddings():
fname = os.path.join(SRC_DIR, 'embeddings.npz')
embs = np.load(fname, allow_pickle=True)['embds'].item()
return embs
def get_go():
fname = os.path.join(SRC_DIR, 'go.obo')
go = onto.Ontology(fname, with_rels=True, include_alt_ids=False)
return go
| [
"os.path.realpath",
"os.path.join",
"numpy.load"
] | [((217, 256), 'os.path.join', 'os.path.join', (['SRC_DIR', '"""embeddings.npz"""'], {}), "(SRC_DIR, 'embeddings.npz')\n", (229, 256), False, 'import os\n'), ((362, 393), 'os.path.join', 'os.path.join', (['SRC_DIR', '"""go.obo"""'], {}), "(SRC_DIR, 'go.obo')\n", (374, 393), False, 'import os\n'), ((121, 147), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (137, 147), False, 'import os\n'), ((268, 301), 'numpy.load', 'np.load', (['fname'], {'allow_pickle': '(True)'}), '(fname, allow_pickle=True)\n', (275, 301), True, 'import numpy as np\n')] |
import cv2
import numpy as np
img = np.random.randint(0, 256, size=[5, 5], dtype=np.uint8)
min = 100
max = 200
mask = cv2.inRange(img, min, max)
print("img=\n", img)
print("mask=\n", mask)
| [
"numpy.random.randint",
"cv2.inRange"
] | [((37, 91), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {'size': '[5, 5]', 'dtype': 'np.uint8'}), '(0, 256, size=[5, 5], dtype=np.uint8)\n', (54, 91), True, 'import numpy as np\n'), ((119, 145), 'cv2.inRange', 'cv2.inRange', (['img', 'min', 'max'], {}), '(img, min, max)\n', (130, 145), False, 'import cv2\n')] |
#2次元Poisson方程式を、有限要素法で解く
#偏微分方程式: ∇・[p(x,y)∇u(x,y)] = f(x,y) (in Ω)
#境界条件: u(x,y)=alpha (on Γ1), du(x,y)/dx=beta (on Γ2)
import time #時刻を扱うライブラリ
import numpy as np #数値計算用
import scipy.spatial #ドロネー分割
import scipy.linalg #SciPyの線形計算ソルバー
import scipy.sparse #圧縮行列の処理
import scipy.sparse.linalg #圧縮行列用ソルバー
import matplotlib.pyplot as plt #グラフ作成
from mpl_toolkits.mplot3d import Axes3D #3Dグラフ
from matplotlib import cm #カラーマップ
#節点データを生成
def generate_nodes(node_type):
#格子点配置
if (node_type[0]=='lattice'):
lattice_num = node_type[1] #格子分割におけるx・y方向の節点数
x = np.linspace(x_min, x_max, lattice_num)
y = np.linspace(y_min, y_max, lattice_num)
nodes = np.empty((lattice_num*lattice_num,2), np.float64)
for j in range(lattice_num):
for i in range(lattice_num):
nodes[i +lattice_num*j, 0] = x[i]
nodes[i +lattice_num*j, 1] = y[j]
#ランダム配置
elif (node_type[0]=='random'):
random_num = node_type[1] #ランダム分割における節点数
nodes = np.random.rand(random_num,2) #[0~1,0~1]の点をrandom_num個生成
nodes[:,0] = x_min +(x_max-x_min)*nodes[:,0]
nodes[:,1] = y_min +(y_max-y_min)*nodes[:,1]
#隅に点を移動
if (4<=random_num):
nodes[0,0],nodes[0,1] = (x_min, (y_max+y_min)/2)
nodes[1,0],nodes[1,1] = (x_max, (y_max+y_min)/2)
nodes[2,0],nodes[2,1] = ((x_max+x_min)/2, y_min)
nodes[3,0],nodes[3,1] = ((x_max+x_min)/2, y_max)
'''nodes[4,0],nodes[4,1] = (x_min, y_min)
nodes[5,0],nodes[5,1] = (x_min, y_max)
nodes[6,0],nodes[6,1] = (x_max, y_min)
nodes[7,0],nodes[7,1] = (x_max, y_max) #'''
#節点をドロネー分割
delaunay_data = scipy.spatial.Delaunay(nodes)
nod_total = delaunay_data.points.shape[0]
tri_ele_total = delaunay_data.simplices.shape[0]
seg_ele_total = delaunay_data.convex_hull.shape[0]
print('節点数、三角形要素数、境界線分要素数')
print(nod_total, tri_ele_total, seg_ele_total)
nod_pos_glo = delaunay_data.points #[nod_total,2]
print('Global節点のx,y座標\n', nod_pos_glo)
nod_num_tri = delaunay_data.simplices #[tri_ele_total,3]
print('三角形要素を構成する節点番号\n', nod_num_tri)
nod_num_seg = delaunay_data.convex_hull #[seg_ele_total,2]
print('境界線分要素を構成する節点番号\n', nod_num_seg)
return nod_pos_glo, nod_num_tri, nod_num_seg
def make_mesh_data():
print('三角形要素を構成するLocal節点座標')
nod_pos_tri = np.empty((len(nod_num_tri),3,2), np.float64) #各要素のLocal節点のx,y座標
for e in range(len(nod_num_tri)):
for n in range(3):
nod_pos_tri[e,n,0] = nod_pos_glo[nod_num_tri[e,n], 0]
nod_pos_tri[e,n,1] = nod_pos_glo[nod_num_tri[e,n], 1]
print('nod_pos_tri(x0, y0),(x1, y1),(x2, y2) =\n', nod_pos_tri)
print('境界線分要素を構成するLocal節点座標')
nod_pos_seg = np.empty((len(nod_num_seg),2,2), np.float64) #各境界要素のLocal節点のx,y座標
for e in range(len(nod_num_seg)):
for n in range(2):
nod_pos_seg[e,n,0] = nod_pos_glo[nod_num_seg[e,n], 0]
nod_pos_seg[e,n,1] = nod_pos_glo[nod_num_seg[e,n], 1]
print('nod_pos_seg(x0, y0),(x1, y1) =\n', nod_pos_seg)
return nod_pos_tri, nod_pos_seg
#要素行列の構築
def assemble_element_matrix(nod_num_tri, nod_pos_tri):
#各要素の面積
print('Element area_tri')
area_tri = (nod_pos_tri[:,1,0]-nod_pos_tri[:,0,0])*(nod_pos_tri[:,2,1]-nod_pos_tri[:,0,1]) \
-(nod_pos_tri[:,2,0]-nod_pos_tri[:,0,0])*(nod_pos_tri[:,1,1]-nod_pos_tri[:,0,1])
area_tri = np.absolute(area_tri)/2.0
print(area_tri)
#各要素の形状関数の係数
print('Shape function a,b,c')
shape_a = np.empty((len(nod_pos_tri),3), np.float64)
shape_b = np.empty((len(nod_pos_tri),3), np.float64)
shape_c = np.empty((len(nod_pos_tri),3), np.float64)
for e in range(len(nod_pos_tri)):
shape_a[e,0] = nod_pos_tri[e,1,0]*nod_pos_tri[e,2,1] -nod_pos_tri[e,2,0]*nod_pos_tri[e,1,1]
shape_a[e,1] = nod_pos_tri[e,2,0]*nod_pos_tri[e,0,1] -nod_pos_tri[e,0,0]*nod_pos_tri[e,2,1]
shape_a[e,2] = nod_pos_tri[e,0,0]*nod_pos_tri[e,1,1] -nod_pos_tri[e,1,0]*nod_pos_tri[e,0,1]
shape_b[e,0] = nod_pos_tri[e,1,1] -nod_pos_tri[e,2,1]
shape_b[e,1] = nod_pos_tri[e,2,1] -nod_pos_tri[e,0,1]
shape_b[e,2] = nod_pos_tri[e,0,1] -nod_pos_tri[e,1,1]
shape_c[e,0] = nod_pos_tri[e,2,0] -nod_pos_tri[e,1,0]
shape_c[e,1] = nod_pos_tri[e,0,0] -nod_pos_tri[e,2,0]
shape_c[e,2] = nod_pos_tri[e,1,0] -nod_pos_tri[e,0,0]
for e in range(min(len(nod_pos_tri),10)): #形状関数の係数を10番目の三角形要素まで確認
print(shape_a[e,:], shape_b[e,:], shape_c[e,:])
#要素行列の初期化
mat_A_ele = np.zeros((len(nod_num_tri),3,3), np.float64) #要素係数行列(ゼロで初期化)
vec_b_ele = np.zeros((len(nod_num_tri),3), np.float64) #要素係数ベクトル(ゼロで初期化)
print("Local matrix")
for e in range(len(nod_pos_tri)):
for i in range(3):
for j in range(3):
mat_A_ele[e,i,j] = (shape_b[e,i]*shape_b[e,j] +shape_c[e,i]*shape_c[e,j]) / (4.0*area_tri[e])
vec_b_ele[e,i] = -func_f *area_tri[e]/3.0
return mat_A_ele, vec_b_ele, area_tri
#全体行列の構築
def assemble_global_matrix(matrix_type):
#全体行列を用意
if(matrix_type=='basic'):
mat_A_glo = np.zeros((len(nod_pos_glo),len(nod_pos_glo)), np.float64) #全体行列(ゼロで初期化)
elif(matrix_type=='sparse'):
mat_A_glo = scipy.sparse.lil_matrix((len(nod_pos_glo),len(nod_pos_glo))) #lil形式の圧縮行列(ゼロで初期化)
vec_b_glo = np.zeros(len(nod_pos_glo), np.float64) #全体ベクトル(ゼロで初期化)
#全体行列を組み立てる
print('Assemble matrix')
CountPercent = 1
for e in range(len(nod_pos_tri)):
for i in range(3):
for j in range(3):
mat_A_glo[ nod_num_tri[e,i], nod_num_tri[e,j] ] += mat_A_ele[e,i,j]
vec_b_glo[ nod_num_tri[e,i] ] += vec_b_ele[e,i]
#処理の経過を%表示
if(CountPercent <= 100*e/len(nod_pos_tri)):
print("{:7.2f}%".format(100*e/len(nod_pos_tri)), end='')
CountPercent += 1
print(" 100.00%")
print('Pre global matrix')
for i in range(min(len(nod_pos_glo),10)): #全体行列を10行10列まで確認
for j in range(min(len(nod_pos_glo),10)):
print("{:7.2f}".format(mat_A_glo[i,j]), end='')
print(";{:7.2f}".format(vec_b_glo[i]))
#print(np.concatenate((mat_A_glo, np.reshape(vec_b_glo, (-1,1))), axis=1))
return mat_A_glo, vec_b_glo
#境界要素の情報を設定
def make_boundary_info(nod_pos_seg):
#境界線分要素の長さ
leng_seg = (nod_pos_seg[:,0,0]-nod_pos_seg[:,1,0])**2.0 +(nod_pos_seg[:,0,1]-nod_pos_seg[:,1,1])**2.0
leng_seg = np.sqrt(leng_seg)
print('leng_seg =\n', leng_seg)
#境界要素の種類を分類
BC_type = [""]*len(nod_pos_seg)
BC_value = [""]*len(nod_pos_seg)
BC_threshold = 0.5*np.mean(leng_seg)
for e in range(len(nod_pos_seg)):
if(nod_pos_seg[e,0,0] < x_min +BC_threshold and
nod_pos_seg[e,1,0] < x_min +BC_threshold): #左側境界
BC_type[e] = BC_left[0]
BC_value[e] = BC_left[1]
elif(x_max -BC_threshold < nod_pos_seg[e,0,0] and
x_max -BC_threshold < nod_pos_seg[e,1,0]): #右側境界
BC_type[e] = BC_right[0]
BC_value[e] = BC_right[1]
elif(nod_pos_seg[e,0,1] < y_min +BC_threshold and
nod_pos_seg[e,1,1] < y_min +BC_threshold): #下側境界
BC_type[e] = BC_bottom[0]
BC_value[e] = BC_bottom[1]
elif(y_max -BC_threshold < nod_pos_seg[e,0,1] and
y_max -BC_threshold < nod_pos_seg[e,1,1]): #上側境界
BC_type[e] = BC_top[0]
BC_value[e] = BC_top[1]
else: #それ以外はNeumann境界にしておく(何もしない)
BC_type[e] = 'Neumann'
BC_value[e] = 0.0
print('BC_type =\n', BC_type)
print('BC_value =\n', BC_value)
return BC_type, BC_value, leng_seg
#境界条件を実装
def set_boundary_condition(mat_A_glo, vec_b_glo, BC_type, BC_value, leng_seg):
#各要素の各節点に対応したGlobal節点に対して処理する
print('Boundary conditions')
CountPercent = 1
for e in range(len(nod_pos_seg)):
for n in range(2):
if(BC_type[e]=='Dirichlet'):
vec_b_glo[:] -= BC_value[e]*mat_A_glo[nod_num_seg[e,n],:] #移項
vec_b_glo[nod_num_seg[e,n]] = BC_value[e] #関数を任意の値で固定
mat_A_glo[nod_num_seg[e,n],:] = 0.0 #行を全て0にする
mat_A_glo[:,nod_num_seg[e,n]] = 0.0 #列を全て0にする
mat_A_glo[nod_num_seg[e,n],nod_num_seg[e,n]] = 1.0 #対角成分は1にする
if (BC_type[e]=='Neumann'): #Neumann境界条件の処理
vec_b_glo[nod_num_seg[e,n]] += BC_value[e]*leng_seg[e]/2.0 #関数を任意の傾きで固定
#処理の経過を%表示
if(CountPercent <= 100*e/len(nod_pos_seg)):
print("{:7.2f}%".format(100*e/len(nod_pos_seg)), end='')
CountPercent += 1
print(" 100.00%")
print("Post global matrix")
for i in range(min(len(nod_pos_glo),10)): #全体行列を10行10列まで確認
for j in range(min(len(nod_pos_glo),10)):
print("{:7.2f}".format(mat_A_glo[i,j]), end='')
print(";{:7.2f}".format(vec_b_glo[i]))
#print(np.concatenate((mat_A_glo, np.reshape(vec_b_glo, (-1,1))), axis=1))
return mat_A_glo, vec_b_glo
#連立方程式を解く
def solve_simultaneous_equations(mat_A_glo, vec_b_glo):
print('節点数、三角形要素数、境界線分要素数')
print(len(nod_pos_glo), len(nod_pos_tri), len(nod_pos_seg))
#print("detA = ", scipy.linalg.det(mat_A_glo)) #Aの行列式
#print("Rank A = ", np.linalg.matrix_rank(mat_A_glo)) #AのRank(階数)
#print("Inverse A = ", scipy.linalg.inv(mat_A_glo)) #Aの逆行列
print('Solve linear equations')
if(matrix_type=='basic'):
unknown_vec_u = scipy.linalg.solve(mat_A_glo,vec_b_glo) #Au=bから、未知数ベクトルUを求める
elif(matrix_type=='sparse'):
unknown_vec_u = scipy.sparse.linalg.spsolve(mat_A_glo,vec_b_glo) #lil形式をcsr形式に変換して計算
print('Unkown vector U = ') #未知数ベクトル
print(unknown_vec_u)
print('Max U = ', max(unknown_vec_u), ', Min U = ',min(unknown_vec_u)) #uの最大値、最小値
return unknown_vec_u
#メッシュを表示
def visualize_mesh(nod_pos_glo, show_text, out_type):
#plt.rcParams['font.family'] = 'Times New Roman' #全体のフォントを設定
fig = plt.figure(figsize=(8, 6), dpi=100, facecolor='#ffffff') #図の設定
#plt.title("2D mesh for FEM") #グラフタイトル
plt.xlabel('$x$') #x軸の名前
plt.ylabel('$y$') #y軸の名前
#メッシュをプロット
plt.triplot(nod_pos_glo[:,0],nod_pos_glo[:,1], nod_num_tri, color='#0000ff') #三角形要素
plt.scatter(nod_pos_glo[:,0],nod_pos_glo[:,1], color='#0000ff') #節点
if(show_text==True):
for n in range(len(nod_pos_glo)): #節点番号
plt.text(nod_pos_glo[n,0], nod_pos_glo[n,1], n, ha='right')
for e in range(len(nod_pos_tri)): #三角形要素番号
meanX = (nod_pos_tri[e,0,0] +nod_pos_tri[e,1,0] +nod_pos_tri[e,2,0])/3.0
meanY = (nod_pos_tri[e,0,1] +nod_pos_tri[e,1,1] +nod_pos_tri[e,2,1])/3.0
plt.text(meanX, meanY, '#%d' %e, ha='center')
for e in range(len(nod_pos_seg)): #線分要素番号
meanX = (nod_pos_seg[e,0,0] +nod_pos_seg[e,1,0])/2.0
meanY = (nod_pos_seg[e,0,1] +nod_pos_seg[e,1,1])/2.0
plt.text(meanX, meanY, '*%d' %e, ha='center')
#グラフを表示
if(out_type=='show'):
plt.show()
elif(out_type=='save'):
plt.savefig("fem2d_mesh.png")
plt.close() #作成した図のウィンドウを消す
#計算結果を表示
def visualize_result(nod_pos_glo, unknown_vec_u, show_text, out_type):
#plt.rcParams['font.family'] = 'Times New Roman' #全体のフォントを設定
fig = plt.figure(figsize=(8, 6), dpi=100, facecolor='#ffffff') #図の設定
ax = fig.gca(projection='3d', azim=-120, elev=20) #3Dグラフを設定
#plt.title("FEA of 2D Poisson's equation") #グラフタイトル
ax.set_xlabel('$x$') #x軸の名前
ax.set_ylabel('$y$') #y軸の名前
ax.set_zlabel('$u(x,y)$') #z軸の名前
#数値解をプロット
surf = ax.plot_trisurf(nod_pos_glo[:,0],nod_pos_glo[:,1],unknown_vec_u, cmap=cm.jet, linewidth=0)
plt.colorbar(surf, shrink=0.8, aspect=10) #カラーバー
#更に体裁を整える
plt.legend(loc='best') #凡例(グラフラベル)を表示
if(show_text==True):
for n in range(len(nod_pos_glo)): #節点番号
ax.text(nod_pos_glo[n,0],nod_pos_glo[n,1],unknown_vec_u[n], 'n%d' %n, ha='center',va='bottom', color='#000000')
for e in range(len(nod_pos_tri)): #三角形要素番号
meanX = (nod_pos_tri[e,0,0] +nod_pos_tri[e,1,0] +nod_pos_tri[e,2,0])/3.0
meanY = (nod_pos_tri[e,0,1] +nod_pos_tri[e,1,1] +nod_pos_tri[e,2,1])/3.0
meanU = (unknown_vec_u[nod_num_tri[e,0]] +unknown_vec_u[nod_num_tri[e,1]] +unknown_vec_u[nod_num_tri[e,2]])/3.0
ax.text(meanX, meanY, meanU, 'e%d' %e, ha='center', color='#000000')
#グラフを表示
if(out_type=='show'):
plt.show()
elif(out_type=='save'):
plt.savefig("fem2d_poisson.png")
plt.close() #作成した図のウィンドウを消す
#メイン実行部
if __name__ == '__main__':
##### プリプロセス #####
x_min = -1.0 #計算領域のXの最小値
x_max = 1.0 #計算領域のXの最大値
y_min = -1.0 #計算領域のYの最小値
y_max = 1.0 #計算領域のYの最大値
func_f = 1.0 #定数関数f
#左部(x=x_min)、右部(x=x_max)、下部(y=y_min)、上部(y=y_max)の、境界の種類と値
#境界の種類はDirichlet,Neumann
BC_left = ['Dirichlet', 0.0]
BC_right = ['Neumann', 1.0]
BC_bottom = ['Neumann', 0.0]
BC_top = ['Neumann', 0.0]
#節点の生成方法。lattice,random
node_type = ['lattice', 10] #数字は格子分割におけるx・y方向の節点数
#node_type = ['random', 50] #数字はランダム分割における節点数
matrix_type = 'sparse' #全体行列の形式。basic,sparse
#節点データ生成。Global節点座標、三角形要素の節点番号、境界線分要素の節点番号
nod_pos_glo, nod_num_tri, nod_num_seg = generate_nodes(node_type)
#Local節点座標を作成。三角形要素のLocal節点座標、境界線分要素のLocal節点座標
nod_pos_tri, nod_pos_seg = make_mesh_data()
#メッシュを表示(ポストプロセス)。番号などの有無(True,False)、グラフの表示方法(show,save)
visualize_mesh(nod_pos_glo, show_text=True, out_type='show')
##### メインプロセス #####
#計算の開始時刻を記録
print ("Calculation start: ", time.ctime()) #計算開始時刻を表示
compute_time = time.time() #計算の開始時刻
#要素行列の構築
mat_A_ele, vec_b_ele, area_tri = assemble_element_matrix(nod_num_tri, nod_pos_tri)
#全体行列の構築
mat_A_glo, vec_b_glo = assemble_global_matrix(matrix_type)
#境界要素の情報を設定
BC_type, BC_value, leng_seg = make_boundary_info(nod_pos_seg)
#境界条件を実装
mat_A_glo, vec_b_glo = set_boundary_condition(mat_A_glo, vec_b_glo, BC_type, BC_value, leng_seg)
#連立方程式を解く
unknown_vec_u = solve_simultaneous_equations(mat_A_glo, vec_b_glo)
#計算時間の表示
compute_time = time.time() -compute_time
print ("Calculation time: {:0.5f}[sec]".format(compute_time))
#計算結果を表示(ポストプロセス)。番号などの有無(True,False)、グラフの表示方法(show,save)
visualize_result(nod_pos_glo, unknown_vec_u, show_text=False, out_type='show')
| [
"numpy.absolute",
"numpy.empty",
"time.ctime",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.close",
"matplotlib.pyplot.colorbar",
"numpy.linspace",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.triplot",
"matplotlib.pyplot.text",
"matplotlib.pyplot.... | [((6561, 6578), 'numpy.sqrt', 'np.sqrt', (['leng_seg'], {}), '(leng_seg)\n', (6568, 6578), True, 'import numpy as np\n'), ((10079, 10135), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)', 'dpi': '(100)', 'facecolor': '"""#ffffff"""'}), "(figsize=(8, 6), dpi=100, facecolor='#ffffff')\n", (10089, 10135), True, 'import matplotlib.pyplot as plt\n'), ((10191, 10208), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (10201, 10208), True, 'import matplotlib.pyplot as plt\n'), ((10220, 10237), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y$"""'], {}), "('$y$')\n", (10230, 10237), True, 'import matplotlib.pyplot as plt\n'), ((10265, 10344), 'matplotlib.pyplot.triplot', 'plt.triplot', (['nod_pos_glo[:, 0]', 'nod_pos_glo[:, 1]', 'nod_num_tri'], {'color': '"""#0000ff"""'}), "(nod_pos_glo[:, 0], nod_pos_glo[:, 1], nod_num_tri, color='#0000ff')\n", (10276, 10344), True, 'import matplotlib.pyplot as plt\n'), ((10354, 10420), 'matplotlib.pyplot.scatter', 'plt.scatter', (['nod_pos_glo[:, 0]', 'nod_pos_glo[:, 1]'], {'color': '"""#0000ff"""'}), "(nod_pos_glo[:, 0], nod_pos_glo[:, 1], color='#0000ff')\n", (10365, 10420), True, 'import matplotlib.pyplot as plt\n'), ((11217, 11228), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11226, 11228), True, 'import matplotlib.pyplot as plt\n'), ((11404, 11460), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)', 'dpi': '(100)', 'facecolor': '"""#ffffff"""'}), "(figsize=(8, 6), dpi=100, facecolor='#ffffff')\n", (11414, 11460), True, 'import matplotlib.pyplot as plt\n'), ((11816, 11857), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['surf'], {'shrink': '(0.8)', 'aspect': '(10)'}), '(surf, shrink=0.8, aspect=10)\n', (11828, 11857), True, 'import matplotlib.pyplot as plt\n'), ((11885, 11907), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (11895, 11907), True, 'import matplotlib.pyplot as plt\n'), ((12681, 12692), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12690, 12692), True, 'import matplotlib.pyplot as plt\n'), ((13785, 13796), 'time.time', 'time.time', ([], {}), '()\n', (13794, 13796), False, 'import time\n'), ((591, 629), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', 'lattice_num'], {}), '(x_min, x_max, lattice_num)\n', (602, 629), True, 'import numpy as np\n'), ((642, 680), 'numpy.linspace', 'np.linspace', (['y_min', 'y_max', 'lattice_num'], {}), '(y_min, y_max, lattice_num)\n', (653, 680), True, 'import numpy as np\n'), ((698, 750), 'numpy.empty', 'np.empty', (['(lattice_num * lattice_num, 2)', 'np.float64'], {}), '((lattice_num * lattice_num, 2), np.float64)\n', (706, 750), True, 'import numpy as np\n'), ((3507, 3528), 'numpy.absolute', 'np.absolute', (['area_tri'], {}), '(area_tri)\n', (3518, 3528), True, 'import numpy as np\n'), ((6728, 6745), 'numpy.mean', 'np.mean', (['leng_seg'], {}), '(leng_seg)\n', (6735, 6745), True, 'import numpy as np\n'), ((11136, 11146), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11144, 11146), True, 'import matplotlib.pyplot as plt\n'), ((12597, 12607), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12605, 12607), True, 'import matplotlib.pyplot as plt\n'), ((13740, 13752), 'time.ctime', 'time.ctime', ([], {}), '()\n', (13750, 13752), False, 'import time\n'), ((14302, 14313), 'time.time', 'time.time', ([], {}), '()\n', (14311, 14313), False, 'import time\n'), ((1040, 1069), 'numpy.random.rand', 'np.random.rand', (['random_num', '(2)'], {}), '(random_num, 2)\n', (1054, 1069), True, 'import numpy as np\n'), ((10510, 10571), 'matplotlib.pyplot.text', 'plt.text', (['nod_pos_glo[n, 0]', 'nod_pos_glo[n, 1]', 'n'], {'ha': '"""right"""'}), "(nod_pos_glo[n, 0], nod_pos_glo[n, 1], n, ha='right')\n", (10518, 10571), True, 'import matplotlib.pyplot as plt\n'), ((10804, 10850), 'matplotlib.pyplot.text', 'plt.text', (['meanX', 'meanY', "('#%d' % e)"], {'ha': '"""center"""'}), "(meanX, meanY, '#%d' % e, ha='center')\n", (10812, 10850), True, 'import matplotlib.pyplot as plt\n'), ((11043, 11089), 'matplotlib.pyplot.text', 'plt.text', (['meanX', 'meanY', "('*%d' % e)"], {'ha': '"""center"""'}), "(meanX, meanY, '*%d' % e, ha='center')\n", (11051, 11089), True, 'import matplotlib.pyplot as plt\n'), ((11183, 11212), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fem2d_mesh.png"""'], {}), "('fem2d_mesh.png')\n", (11194, 11212), True, 'import matplotlib.pyplot as plt\n'), ((12644, 12676), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fem2d_poisson.png"""'], {}), "('fem2d_poisson.png')\n", (12655, 12676), True, 'import matplotlib.pyplot as plt\n')] |
import logging
from itertools import zip_longest
from typing import Iterator
import cv2
import numpy as np
import opencv_wrapper as orig_cvw
from more_itertools.recipes import grouper
from tqdm import tqdm
from skelshop.config import conf as config
from skelshop.skelgraphs.openpose import MODE_SKELS
from skelshop.skelgraphs.posetrack import POSETRACK18_SKEL
from skelshop.utils.bbox import points_bbox_x1y1x2y2
from skelshop.utils.geom import rnd, rot
from skelshop.utils.vidreadwrapper import VidReadWrapper as cvw
logger = logging.getLogger(__name__)
def scale_video(vid_read, dim) -> Iterator[np.ndarray]:
for frame in vid_read:
yield cv2.resize(frame, dim)
class ScaledVideo:
def __init__(self, vid_read, vid_path: str, scale: float):
self.vid_read = vid_read
self.vid_path = vid_path
self.scale = scale
self.width = int(vid_read.width) * scale
self.height = int(vid_read.height) * scale
self.fps = vid_read.fps
def reset(self):
# XXX: In general CAP_PROP_POS_FRAMES will cause problems with
# keyframes but okay in this case?
self.vid_read.set(cv2.CAP_PROP_POS_FRAMES, 0)
def __iter__(self) -> Iterator[np.ndarray]:
frame_iter = iter(self.vid_read)
if self.scale == 1:
return frame_iter
else:
return scale_video(frame_iter, (self.width, self.height))
def limb_invisible(confidence, subskel):
# TODO when interpolating limbs, have special confidence-values reserved for that
return confidence == 0 or (
config.THRESHOLDS[subskel] and confidence < config.THRESHOLDS[subskel]
)
class SkelDraw:
def __init__(
self, skel, conv_to_posetrack=False, ann_ids=True, scale=1,
):
self.skel = skel
self.conv_to_posetrack = conv_to_posetrack
self.ann_ids = ann_ids
self.scale = scale
def draw_skel(self, frame, numarr):
for (x1, y1, c1), (x2, y2, c2), subskel in self.skel.iter_limbs(numarr):
interpolated = False
if c1 > 1:
interpolated = True
c1 -= 1
if c2 > 1:
interpolated = True
c2 -= 1
c = min(c1, c2)
if limb_invisible(c, subskel):
continue
if interpolated:
color = (rnd(128 * (1 - c)), rnd(128 * (1 - c)), 255)
else:
color = (255, rnd(255 * (1 - c)), rnd(255 * (1 - c)))
cv2.line(
frame, (rnd(x1), rnd(y1)), (rnd(x2), rnd(y2)), color, 1,
)
def draw_ann(self, frame, pers_id, numarr):
if not self.ann_ids:
return
left_idx = self.skel.names.index("left shoulder")
right_idx = self.skel.names.index("right shoulder")
if left_idx == -1 or right_idx == -1:
return
if numarr[right_idx][0] > numarr[left_idx][0]:
# Right shoulder
anchor = numarr[right_idx]
else:
# Left shoulder
anchor = numarr[left_idx]
x, y, c = anchor
if c == 0:
# Just pick lowest index with some conf
for x, y, c in numarr:
if c > 0.2:
break
else:
return
cvw.put_text(
frame, str(pers_id), (rnd(x + 2), rnd(y + 2)), (0, 0, 255), scale=0.5
)
def draw_bundle(self, frame, bundle, iter=None):
numarrs = []
for pers_id, person in bundle:
if self.conv_to_posetrack:
flat = person.as_posetrack()
else:
flat = person.flat()
numarr = []
for point in grouper(flat, 3):
numarr.append([point[0] * self.scale, point[1] * self.scale, point[2]])
numarrs.append(numarr) # TODO why is numarr 138 long and the skeleton 137?
for numarr in numarrs:
self.draw_skel(frame, numarr)
for (pers_id, person), numarr in zip(bundle, numarrs):
self.draw_ann(frame, pers_id, numarr)
def get_hover(self, mouse_pos, bundle):
return None
def rot_bbox(bbox, angle):
bbox_2pts = bbox.reshape((2, 2))
center = bbox_2pts.sum(axis=0) / 2
bbox_4pts = np.array(
[
bbox_2pts[0],
[bbox_2pts[0, 0], bbox_2pts[1, 1]],
bbox_2pts[1],
[bbox_2pts[1, 0], bbox_2pts[0, 1]],
]
)
return (rot(angle) @ (bbox_4pts - center).T).T + center
class FaceDraw:
def draw_bbox(self, frame, bbox, angle=0, color=(0, 0, 255)):
if angle != 0:
points = rot_bbox(bbox, angle)
points += np.array([0.5, 0.5])
points = points.astype("int32")
cv2.polylines(
frame, [points], isClosed=True, color=color, thickness=1,
)
else:
cv2.rectangle(
frame,
pt1=(bbox[0], bbox[1]),
pt2=(bbox[2], bbox[3]),
color=color,
thickness=1,
)
def is_point_in_chip_bbox(self, point, chip_bbox):
return self.is_point_in_bbox(
point, points_bbox_x1y1x2y2(rot_bbox(chip_bbox[:4], chip_bbox[4]))
)
def is_point_in_bbox(self, point, bbox):
import pygame as pg
rect = pg.Rect(bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1])
return rect.collidepoint(point)
def draw_bundle(self, frame, bundle):
for fod_bbox in bundle.get("fod_bbox", ()):
if fod_bbox is None:
continue
self.draw_bbox(frame, fod_bbox, color=(0, 255, 0))
for chip_bbox in bundle.get("chip_bbox", ()):
if chip_bbox is None:
continue
self.draw_bbox(
frame, chip_bbox[:4], angle=chip_bbox[4], color=(255, 255, 0)
)
def get_hover(self, mouse_pos, bundle):
for fod_bbox, chip_bbox, chip in zip_longest(
bundle.get("fod_bbox", ()),
bundle.get("chip_bbox", ()),
bundle.get("chip", ()),
):
if chip is not None and (
(fod_bbox is not None and self.is_point_in_bbox(mouse_pos, fod_bbox))
or (
chip_bbox is not None
and self.is_point_in_chip_bbox(mouse_pos, chip_bbox)
)
):
return cv2.cvtColor(chip, cv2.COLOR_RGB2BGR)
return None
class VideoSticksWriter:
def __init__(
self,
out,
width,
height,
fps,
skel,
add_cuts=True,
number_joints=False,
add_frame_number=False,
conv_to_posetrack=False,
ann_ids=True,
scale=1,
):
self.out = orig_cvw.VideoWriter(out, fps=fps, fourcc="mp4v")
self.width = width
self.height = height
self.fps = fps
self.skel = skel
self.add_cuts = add_cuts
self.number_joints = number_joints
self.conv_to_posetrack = conv_to_posetrack
self.ann_ids = ann_ids
self.scale = scale
self.skel_draw = SkelDraw(skel, conv_to_posetrack, ann_ids, scale)
self.cut_img = self.get_cut_img()
def draw(self, frame, bundle=None):
if frame is None:
frame = self.get_empty_frame()
if bundle is not None:
self.skel_draw.draw_bundle(frame, bundle)
self.out.write(frame)
def add_cut(self):
if not self.add_cuts:
return
self.out.write(self.cut_img)
def get_empty_frame(self):
height = int(self.height)
width = int(self.width)
img = np.zeros((height, width, 3), np.uint8)
return img
def get_cut_img(self):
img = self.get_empty_frame()
height = int(self.height)
cvw.put_text(img, "Shot cut", (30, height // 2), (255, 255, 255))
return img
def drawsticks_shots(vid_read, stick_read, vid_write):
shots_it = iter(stick_read)
shot = next(shots_it, None)
if shot is None:
return
shot_it = iter(shot)
bundle = None
for frame in vid_read:
if shot is not None:
bundle = next(shot_it, None)
if bundle is None:
shot = next(shots_it, None)
if shot is not None:
vid_write.add_cut()
shot_it = iter(shot)
bundle = next(shot_it, None)
vid_write.draw(frame, bundle)
def drawsticks_unseg(vid_read, stick_read, vid_write):
for frame, bundle in tqdm(
zip_longest(vid_read, stick_read), total=stick_read.total_frames
):
vid_write.draw(frame, bundle)
def get_skel(h5f, posetrack):
mode = h5f.attrs["mode"]
if posetrack:
return POSETRACK18_SKEL
else:
return MODE_SKELS[mode]
| [
"cv2.resize",
"skelshop.utils.vidreadwrapper.VidReadWrapper.put_text",
"cv2.polylines",
"more_itertools.recipes.grouper",
"skelshop.utils.geom.rnd",
"cv2.cvtColor",
"pygame.Rect",
"numpy.zeros",
"itertools.zip_longest",
"skelshop.utils.geom.rot",
"numpy.array",
"cv2.rectangle",
"opencv_wrapp... | [((530, 557), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (547, 557), False, 'import logging\n'), ((4306, 4421), 'numpy.array', 'np.array', (['[bbox_2pts[0], [bbox_2pts[0, 0], bbox_2pts[1, 1]], bbox_2pts[1], [bbox_2pts\n [1, 0], bbox_2pts[0, 1]]]'], {}), '([bbox_2pts[0], [bbox_2pts[0, 0], bbox_2pts[1, 1]], bbox_2pts[1], [\n bbox_2pts[1, 0], bbox_2pts[0, 1]]])\n', (4314, 4421), True, 'import numpy as np\n'), ((5391, 5454), 'pygame.Rect', 'pg.Rect', (['bbox[0]', 'bbox[1]', '(bbox[2] - bbox[0])', '(bbox[3] - bbox[1])'], {}), '(bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1])\n', (5398, 5454), True, 'import pygame as pg\n'), ((6857, 6906), 'opencv_wrapper.VideoWriter', 'orig_cvw.VideoWriter', (['out'], {'fps': 'fps', 'fourcc': '"""mp4v"""'}), "(out, fps=fps, fourcc='mp4v')\n", (6877, 6906), True, 'import opencv_wrapper as orig_cvw\n'), ((7760, 7798), 'numpy.zeros', 'np.zeros', (['(height, width, 3)', 'np.uint8'], {}), '((height, width, 3), np.uint8)\n', (7768, 7798), True, 'import numpy as np\n'), ((7925, 7990), 'skelshop.utils.vidreadwrapper.VidReadWrapper.put_text', 'cvw.put_text', (['img', '"""Shot cut"""', '(30, height // 2)', '(255, 255, 255)'], {}), "(img, 'Shot cut', (30, height // 2), (255, 255, 255))\n", (7937, 7990), True, 'from skelshop.utils.vidreadwrapper import VidReadWrapper as cvw\n'), ((8683, 8716), 'itertools.zip_longest', 'zip_longest', (['vid_read', 'stick_read'], {}), '(vid_read, stick_read)\n', (8694, 8716), False, 'from itertools import zip_longest\n'), ((657, 679), 'cv2.resize', 'cv2.resize', (['frame', 'dim'], {}), '(frame, dim)\n', (667, 679), False, 'import cv2\n'), ((3740, 3756), 'more_itertools.recipes.grouper', 'grouper', (['flat', '(3)'], {}), '(flat, 3)\n', (3747, 3756), False, 'from more_itertools.recipes import grouper\n'), ((4722, 4742), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (4730, 4742), True, 'import numpy as np\n'), ((4799, 4870), 'cv2.polylines', 'cv2.polylines', (['frame', '[points]'], {'isClosed': '(True)', 'color': 'color', 'thickness': '(1)'}), '(frame, [points], isClosed=True, color=color, thickness=1)\n', (4812, 4870), False, 'import cv2\n'), ((4928, 5027), 'cv2.rectangle', 'cv2.rectangle', (['frame'], {'pt1': '(bbox[0], bbox[1])', 'pt2': '(bbox[2], bbox[3])', 'color': 'color', 'thickness': '(1)'}), '(frame, pt1=(bbox[0], bbox[1]), pt2=(bbox[2], bbox[3]), color=\n color, thickness=1)\n', (4941, 5027), False, 'import cv2\n'), ((3380, 3390), 'skelshop.utils.geom.rnd', 'rnd', (['(x + 2)'], {}), '(x + 2)\n', (3383, 3390), False, 'from skelshop.utils.geom import rnd, rot\n'), ((3392, 3402), 'skelshop.utils.geom.rnd', 'rnd', (['(y + 2)'], {}), '(y + 2)\n', (3395, 3402), False, 'from skelshop.utils.geom import rnd, rot\n'), ((4502, 4512), 'skelshop.utils.geom.rot', 'rot', (['angle'], {}), '(angle)\n', (4505, 4512), False, 'from skelshop.utils.geom import rnd, rot\n'), ((6487, 6524), 'cv2.cvtColor', 'cv2.cvtColor', (['chip', 'cv2.COLOR_RGB2BGR'], {}), '(chip, cv2.COLOR_RGB2BGR)\n', (6499, 6524), False, 'import cv2\n'), ((2373, 2391), 'skelshop.utils.geom.rnd', 'rnd', (['(128 * (1 - c))'], {}), '(128 * (1 - c))\n', (2376, 2391), False, 'from skelshop.utils.geom import rnd, rot\n'), ((2393, 2411), 'skelshop.utils.geom.rnd', 'rnd', (['(128 * (1 - c))'], {}), '(128 * (1 - c))\n', (2396, 2411), False, 'from skelshop.utils.geom import rnd, rot\n'), ((2466, 2484), 'skelshop.utils.geom.rnd', 'rnd', (['(255 * (1 - c))'], {}), '(255 * (1 - c))\n', (2469, 2484), False, 'from skelshop.utils.geom import rnd, rot\n'), ((2486, 2504), 'skelshop.utils.geom.rnd', 'rnd', (['(255 * (1 - c))'], {}), '(255 * (1 - c))\n', (2489, 2504), False, 'from skelshop.utils.geom import rnd, rot\n'), ((2552, 2559), 'skelshop.utils.geom.rnd', 'rnd', (['x1'], {}), '(x1)\n', (2555, 2559), False, 'from skelshop.utils.geom import rnd, rot\n'), ((2561, 2568), 'skelshop.utils.geom.rnd', 'rnd', (['y1'], {}), '(y1)\n', (2564, 2568), False, 'from skelshop.utils.geom import rnd, rot\n'), ((2572, 2579), 'skelshop.utils.geom.rnd', 'rnd', (['x2'], {}), '(x2)\n', (2575, 2579), False, 'from skelshop.utils.geom import rnd, rot\n'), ((2581, 2588), 'skelshop.utils.geom.rnd', 'rnd', (['y2'], {}), '(y2)\n', (2584, 2588), False, 'from skelshop.utils.geom import rnd, rot\n')] |
#! /usr/bin/env python
import rospy
import numpy as np
# Controller
from lenny_control.trajectory import TrajectoryController
if __name__ == '__main__':
np.set_printoptions(precision=4, suppress=True)
rospy.init_node('example_trajectory_controller')
controller = TrajectoryController()
# Set a random goal for the arms
joint_names = controller.get_joint_names()
active_joints = [name for name in joint_names if 'b2' not in name]
controller.set_active_joints(active_joints)
rospy.loginfo('Moving all the robot joints 3 times...')
for i in range(3):
controller.clear_points()
qstart = controller.get_active_joint_positions()
qgoal = 0.25*(2*np.random.rand(len(active_joints)) - 1)
controller.add_point(qstart, 0.0)
controller.add_point(qgoal, 3.0)
controller.start()
controller.wait()
error = qgoal - controller.get_active_joint_positions()
rospy.loginfo('Trajectory {0} error: {1}'.format(i+1, error))
| [
"rospy.loginfo",
"numpy.set_printoptions",
"rospy.init_node",
"lenny_control.trajectory.TrajectoryController"
] | [((157, 204), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'suppress': '(True)'}), '(precision=4, suppress=True)\n', (176, 204), True, 'import numpy as np\n'), ((207, 255), 'rospy.init_node', 'rospy.init_node', (['"""example_trajectory_controller"""'], {}), "('example_trajectory_controller')\n", (222, 255), False, 'import rospy\n'), ((271, 293), 'lenny_control.trajectory.TrajectoryController', 'TrajectoryController', ([], {}), '()\n', (291, 293), False, 'from lenny_control.trajectory import TrajectoryController\n'), ((491, 546), 'rospy.loginfo', 'rospy.loginfo', (['"""Moving all the robot joints 3 times..."""'], {}), "('Moving all the robot joints 3 times...')\n", (504, 546), False, 'import rospy\n')] |
import numpy as np
import matplotlib.pyplot as plt
import time
r,n = 0.3,50
p=1
u=1
t=0.02
def grid(r,n):
x = np.zeros(n+1)
rng=(0,1)
#gp sum
sum=0
for i in range(n):
sum = sum + pow(r,i)
x0 = (rng[1]-rng[0])/sum
x[0] = rng[0]
for i in range(n):
x[i+1] = x[i] + x0*pow(r,i)
# print(x)
return(x)
x = grid(r,n)
a = np.zeros(n+1)
b = np.zeros(n+1)
c = np.zeros(n+1)
d = np.zeros(n+1)
phi = np.zeros(n+1)
#boundary conditions
bc = (0,1)
phi[0] = bc[0]
phi[n] = bc[1]
#Calculating coefficients
for i in range(1,n):
gamma = ( (-p*u)/(x[i+1]-x[i-1]) ) - ( (2*t)/((x[i]-x[i-1])*(x[i+1]-x[i-1])) )
alpha = ( (2*t)/( (x[i+1]-x[i-1])*(x[i+1]-x[i]) ) ) + ( (2*t)/((x[i+1]-x[i-1])*(x[i]-x[i-1])) )
beta = ( (p*u)/(x[i+1]-x[i-1]) ) - ( (2*t)/((x[i+1]-x[i])*(x[i+1]-x[i-1])) )
if(i is 1):
b[i]=alpha
c[i] =beta
d[i] = 0 - (gamma*bc[0])
elif(i is n-1):
b[i]=alpha
a[i]=gamma
d[i] = 0 - (beta*bc[1])
else:
a[i] = gamma
b[i] = alpha
c[i] = beta
#Thomas algorithm
for i in range(2,n):
a[i] = a[i]/b[i-1]
b[i] = b[i] - a[i]*c[i-1]
d[i] = d[i] - a[i]*d[i-1]
phi[n-1] = d[n-1] / b[n-1]
for i in range(n-2,0,-1):
phi[i]= (d[i] - c[i]*phi[i+1])/b[i]
print(phi)
print(x)
plt.plot(x, phi,'r+')
plt.title("phi vs. x")
plt.ylabel('phi')
plt.xlabel('x')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((402, 417), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {}), '(n + 1)\n', (410, 417), True, 'import numpy as np\n'), ((421, 436), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {}), '(n + 1)\n', (429, 436), True, 'import numpy as np\n'), ((440, 455), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {}), '(n + 1)\n', (448, 455), True, 'import numpy as np\n'), ((459, 474), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {}), '(n + 1)\n', (467, 474), True, 'import numpy as np\n'), ((480, 495), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {}), '(n + 1)\n', (488, 495), True, 'import numpy as np\n'), ((1396, 1418), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'phi', '"""r+"""'], {}), "(x, phi, 'r+')\n", (1404, 1418), True, 'import matplotlib.pyplot as plt\n'), ((1419, 1441), 'matplotlib.pyplot.title', 'plt.title', (['"""phi vs. x"""'], {}), "('phi vs. x')\n", (1428, 1441), True, 'import matplotlib.pyplot as plt\n'), ((1443, 1460), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""phi"""'], {}), "('phi')\n", (1453, 1460), True, 'import matplotlib.pyplot as plt\n'), ((1462, 1477), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (1472, 1477), True, 'import matplotlib.pyplot as plt\n'), ((1479, 1489), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1487, 1489), True, 'import matplotlib.pyplot as plt\n'), ((128, 143), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {}), '(n + 1)\n', (136, 143), True, 'import numpy as np\n')] |
from generic_neural_network import Dataset, Learner
import numpy as np
# Example with a train set from Google and a test set from the filesystem
IMAGE_DIMENSIONS = 400
# ====== Create Train Set =======
train_set = Dataset()
# 0 is cat, 1 is dog
train_set.add_category([0, 4000, 'happy face'])
train_set.add_category([1, 4000, 'sad face'])
train_set.IMAGE_DIMENSIONS = IMAGE_DIMENSIONS
train_set.run()
train_set.export_compressed(train_set.relative_to_absolute('../../dataset/'))
train_labels, train_data = train_set.get_shuffled_label_data_separated()
print('type ' + type(train_data))
# ====== Create Test Set =======
test_set = Dataset()
# 0 is cat, 1 is dog
test_set.add_category([0, 0, train_set.relative_to_absolute('../../dataset/test/happy/')])
test_set.add_category([1, 0, train_set.relative_to_absolute('../../dataset/test/sad/')])
test_set.IMAGE_DIMENSIONS = IMAGE_DIMENSIONS
test_set.run()
test_labels, test_data = test_set.get_shuffled_label_data_separated()
# ======= Train a network ======
genie = Learner(train_labels, train_data, len(train_set.get_categories()))
genie.EPOCHS = 20
genie.train()
# ======= Test the network =======
genie.predict(np.asarray(test_data), labels=test_labels)
| [
"generic_neural_network.Dataset",
"numpy.asarray"
] | [((215, 224), 'generic_neural_network.Dataset', 'Dataset', ([], {}), '()\n', (222, 224), False, 'from generic_neural_network import Dataset, Learner\n'), ((631, 640), 'generic_neural_network.Dataset', 'Dataset', ([], {}), '()\n', (638, 640), False, 'from generic_neural_network import Dataset, Learner\n'), ((1164, 1185), 'numpy.asarray', 'np.asarray', (['test_data'], {}), '(test_data)\n', (1174, 1185), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""deep_dream.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1BXwGWfLaUvZYWHyYdire26VsLMRgXRKX
"""
import tensorflow as tf
import matplotlib.pyplot as plt
import PIL.Image
import numpy as np
from scipy.ndimage.filters import gaussian_filter
import inception5h
inception5h.maybe_download()
model=inception5h.Inception5h()
#number of layers we will use for deep dream
len(model.layer_tensors)
#loading image
def load_image(filename):
image =PIL.Image.open(filename)
return np.float32(image)
#saving image
def save_image(image,filename):
image=np.clip(image,0.0,255.0)
image=image.astype(np.uint8)
with open(filename,'wb') as file:
PIL.Image.fromarray(image).save(file,'jpeg')
def plot_image(image):
if False:
# Convert the pixel-values to the range between 0.0 and 1.0
image = np.clip(image/255.0, 0.0, 1.0)
# Plot using matplotlib.
plt.imshow(image, interpolation='lanczos')
plt.show()
else:
#plot using pil
image=np.clip(image,0.0,255.0)
image=image.astype(np.uint8)
display(PIL.Image.fromarray(image))
#normalizing image used for plotting gradient
def normalize_image(x):
#take max and min value of pixels in input
x_min=x.min()
x_max=x.max()
#normalize in range 0-1
x_normalize=(x-x_min)/(x_max-x_min)
return x_normalize
def plot_gradient(gradient):
#normalize the gradient in range 0-1
gradient_normalized=normalize_image(gradient)
plt.imshow(gradient_normalized,interpolation='bilinear')
plt.show()
def resize_image(image,size=None,factor=None):
if factor is not None:
#scale heigth and width
size=np.array(image.shape[0:2])* factor
#as size is float-point but pil requires integers
size=size.astype(int)
else:
size=size[0:2]
#heigth and width is reversed in numpy vs pil
size=tuple(reversed(size))
img=np.clip(image,0.0,255.0)
img=img.astype(np.uint8)
img=PIL.Image.fromarray(img)
#resize the image
img_resized=img.resize(size,PIL.Image.LANCZOS)
#convert 8-bit pixel back to float-point
img_resized=np.float32(img_resized)
return img_resized
import math
import random
def get_tile_size(num_pixels,tile_size=400):
num_tiles=int(round(num_pixels/tile_size))
num_tiles=max(1,num_tiles)
actual_tile_size = math.ceil(num_pixels/num_tiles)
return actual_tile_size
def tiled_gradient(gradient,image,tile_size=400):
grad= np.zeros_like(image)
x_max,y_max,_=image.shape
#x-axis tile size
x_tile_size=get_tile_size(num_pixels=x_max,tile_size=tile_size)
x_tile_size4=x_tile_size//4
#y-axis tile size
y_tile_size=get_tile_size(num_pixels=y_max,tile_size=tile_size)
y_tile_size4=y_tile_size//4
#random start-position for the tiles on the x-axis b/w -3/4 and -1/4 of tile size
x_start=random.randint(-3*x_tile_size4, -x_tile_size4)
while(x_start < x_max):
x_end=x_start +x_tile_size
x_start_lim=max(x_start,0)
x_end_lim=min(x_end,x_max)
y_start=random.randint(-3*y_tile_size4,-y_tile_size4)
while(y_start<y_max):
y_end=y_start+y_tile_size
y_start_lim=max(y_start,0)
y_end_lim=min(y_end,y_max)
img_tile=image[x_start_lim:x_end_lim,y_start_lim:y_end_lim,:]
feed_dict=model.create_feed_dict(image=img_tile)
#calculate gradient value
g=session.run(gradient,feed_dict=feed_dict)
#normalize gradient for tile
g/=(np.std(g) + 1e-8)
#storing gradient of tile
grad[x_start_lim:x_end_lim,y_start_lim:y_end_lim,:]=g
#next y-start position
y_start=y_end
#next x-start position
x_start=x_end
return grad
#optimization for deep dream algorithm(i.e,calculating gradient of particular inception model with respect to input image and adding it to input image
#to increase the mean value of the layer tensor , it is repeated many times
#to get the features in the inception model layer)
#Using Gradient Ascent to optimize image
def optimize_image(layer_tensor,image,num_iterations=10,step_size=3.0,tile_size=400,show_gradient=False):
img=image.copy()
print("iamge before:")
plot_image(img)
print("processing_image",end="")
gradient=model.get_gradient(layer_tensor)
for i in range(num_iterations):
grad=tiled_gradient(gradient=gradient,image=img,tile_size=tile_size)
sigma=(i*4.0)/num_iterations+0.5
grad_smooth1=gaussian_filter(grad,sigma=sigma)
grad_smooth2=gaussian_filter(grad,sigma=sigma*2)
grad_smooth3=gaussian_filter(grad,sigma=sigma*0.5)
grad=(grad_smooth1+grad_smooth2+grad_smooth3)
#Scale the step-size according to the gradient-values(not necessary bec tiled_gradient is already normalized)
step_size_scaled=step_size/(np.std(grad)+1e-8)
#adding gradient to image
img=img+ grad*step_size_scaled
if show_gradient:
msg="gradient min:{0:>9.6f},max:{1:>9.6f},step_size:{2:>9.6f}".format(grad.min(),grad.max(),step_size_scaled)
print(msg)
plot_gradient(grad)
else:
print(".",end="")
print("image_after:")
plot_image(img)
return img
def recursive_optimize(layer_tensor, image,
num_repeats=4, rescale_factor=0.7, blend=0.2,
num_iterations=10, step_size=3.0,
tile_size=400):
"""
Recursively blur and downscale the input image.
Each downscaled image is run through the optimize_image()
function to amplify the patterns that the Inception model sees.
Parameters:
image: Input image used as the starting point.
rescale_factor: Downscaling factor for the image.
num_repeats: Number of times to downscale the image.
blend: Factor for blending the original and processed images.
Parameters passed to optimize_image():
layer_tensor: Reference to a tensor that will be maximized.
num_iterations: Number of optimization iterations to perform.
step_size: Scale for each step of the gradient ascent.
tile_size: Size of the tiles when calculating the gradient.
"""
# Do a recursive step?
if num_repeats>0:
# Blur the input image to prevent artifacts when downscaling.
# The blur amount is controlled by sigma. Note that the
# colour-channel is not blurred as it would make the image gray.
sigma = 0.5
img_blur = gaussian_filter(image, sigma=(sigma, sigma, 0.0))
# Downscale the image.
img_downscaled = resize_image(image=img_blur,
factor=rescale_factor)
# Recursive call to this function.
# Subtract one from num_repeats and use the downscaled image.
img_result = recursive_optimize(layer_tensor=layer_tensor,
image=img_downscaled,
num_repeats=num_repeats-1,
rescale_factor=rescale_factor,
blend=blend,
num_iterations=num_iterations,
step_size=step_size,
tile_size=tile_size)
# Upscale the resulting image back to its original size.
img_upscaled = resize_image(image=img_result, size=image.shape)
# Blend the original and processed images.
image = blend * image + (1.0 - blend) * img_upscaled
print("Recursive level:", num_repeats)
# Process the image using the DeepDream algorithm.
img_result = optimize_image(layer_tensor=layer_tensor,
image=image,
num_iterations=num_iterations,
step_size=step_size,
tile_size=tile_size)
return img_result
def recursive_optimize(layer_tensor,image,num_repeats=4,rescale_factor=0.7,blend=0.2,num_iterations=10,step_size=3,tile_size=400):
#here image is downscaled in each recursion and for ampliying is send to function optimize image
#base case
if num_repeats>0:
#blur image during downsampling , which is controlled by sigma
sigma=0.5
img_blur=gaussian_filter(image,sigma=(sigma,sigma,0.0))
#downscaling image
img_downscaled=resize_image(image=img_blur,factor=rescale_factor)
#recursive calls
img_result=recursive_optimize(layer_tensor=layer_tensor,image=img_downscaled,num_repeats=num_repeats-1,rescale_factor=rescale_factor,blend=blend,num_iterations=num_iterations,step_size=step_size,tile_size=tile_size)
#upscaling image
img_upscaled=resize_image(image=img_result,size=image.shape)
image=blend*image+(1.0-blend)*img_upscaled
print("recursive calls:",num_repeats)
#processing image using deepdream algorithm
img_result=optimize_image(layer_tensor=layer_tensor,image=image,num_iterations=num_iterations,step_size=step_size,tile_size=tile_size)
return img_result
session=tf.InteractiveSession(graph=model.graph)
image=load_image(filename='WhatsApp Image 2019-03-18 at 3.07.12 PM.jpeg')
plot_image(image)
layer_tensor=model.layer_tensors[4]
layer_tensor
img_result = optimize_image(layer_tensor, image,num_iterations=10, step_size=6.0, tile_size=400,show_gradient=True)
layer_tensor = model.layer_tensors[6]
img_result = recursive_optimize(layer_tensor=layer_tensor, image=image,num_iterations=10, step_size=3.0, rescale_factor=0.7,num_repeats=4, blend=0.2)
layer_tensor = model.layer_tensors[7][:,:,:,0:3]
img_result = recursive_optimize(layer_tensor=layer_tensor, image=image,
num_iterations=10, step_size=3.0, rescale_factor=0.7,
num_repeats=4, blend=0.2)
layer_tensor = model.layer_tensors[11][:,:,:,0]
img_result = recursive_optimize(layer_tensor=layer_tensor, image=image,
num_iterations=10, step_size=3.0, rescale_factor=0.7,
num_repeats=4, blend=0.2)
| [
"scipy.ndimage.filters.gaussian_filter",
"numpy.zeros_like",
"inception5h.maybe_download",
"matplotlib.pyplot.show",
"random.randint",
"math.ceil",
"numpy.std",
"matplotlib.pyplot.imshow",
"numpy.float32",
"numpy.clip",
"inception5h.Inception5h",
"numpy.array",
"tensorflow.InteractiveSession... | [((361, 389), 'inception5h.maybe_download', 'inception5h.maybe_download', ([], {}), '()\n', (387, 389), False, 'import inception5h\n'), ((397, 422), 'inception5h.Inception5h', 'inception5h.Inception5h', ([], {}), '()\n', (420, 422), False, 'import inception5h\n'), ((9264, 9304), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'graph': 'model.graph'}), '(graph=model.graph)\n', (9285, 9304), True, 'import tensorflow as tf\n'), ((585, 602), 'numpy.float32', 'np.float32', (['image'], {}), '(image)\n', (595, 602), True, 'import numpy as np\n'), ((658, 684), 'numpy.clip', 'np.clip', (['image', '(0.0)', '(255.0)'], {}), '(image, 0.0, 255.0)\n', (665, 684), True, 'import numpy as np\n'), ((1558, 1615), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gradient_normalized'], {'interpolation': '"""bilinear"""'}), "(gradient_normalized, interpolation='bilinear')\n", (1568, 1615), True, 'import matplotlib.pyplot as plt\n'), ((1617, 1627), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1625, 1627), True, 'import matplotlib.pyplot as plt\n'), ((1979, 2005), 'numpy.clip', 'np.clip', (['image', '(0.0)', '(255.0)'], {}), '(image, 0.0, 255.0)\n', (1986, 2005), True, 'import numpy as np\n'), ((2203, 2226), 'numpy.float32', 'np.float32', (['img_resized'], {}), '(img_resized)\n', (2213, 2226), True, 'import numpy as np\n'), ((2427, 2460), 'math.ceil', 'math.ceil', (['(num_pixels / num_tiles)'], {}), '(num_pixels / num_tiles)\n', (2436, 2460), False, 'import math\n'), ((2550, 2570), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (2563, 2570), True, 'import numpy as np\n'), ((2943, 2991), 'random.randint', 'random.randint', (['(-3 * x_tile_size4)', '(-x_tile_size4)'], {}), '(-3 * x_tile_size4, -x_tile_size4)\n', (2957, 2991), False, 'import random\n'), ((914, 946), 'numpy.clip', 'np.clip', (['(image / 255.0)', '(0.0)', '(1.0)'], {}), '(image / 255.0, 0.0, 1.0)\n', (921, 946), True, 'import numpy as np\n'), ((987, 1029), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'interpolation': '"""lanczos"""'}), "(image, interpolation='lanczos')\n", (997, 1029), True, 'import matplotlib.pyplot as plt\n'), ((1034, 1044), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1042, 1044), True, 'import matplotlib.pyplot as plt\n'), ((1088, 1114), 'numpy.clip', 'np.clip', (['image', '(0.0)', '(255.0)'], {}), '(image, 0.0, 255.0)\n', (1095, 1114), True, 'import numpy as np\n'), ((3134, 3182), 'random.randint', 'random.randint', (['(-3 * y_tile_size4)', '(-y_tile_size4)'], {}), '(-3 * y_tile_size4, -y_tile_size4)\n', (3148, 3182), False, 'import random\n'), ((4612, 4646), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['grad'], {'sigma': 'sigma'}), '(grad, sigma=sigma)\n', (4627, 4646), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((4663, 4701), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['grad'], {'sigma': '(sigma * 2)'}), '(grad, sigma=sigma * 2)\n', (4678, 4701), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((4716, 4756), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['grad'], {'sigma': '(sigma * 0.5)'}), '(grad, sigma=sigma * 0.5)\n', (4731, 4756), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((6607, 6656), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['image'], {'sigma': '(sigma, sigma, 0.0)'}), '(image, sigma=(sigma, sigma, 0.0))\n', (6622, 6656), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((8463, 8512), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['image'], {'sigma': '(sigma, sigma, 0.0)'}), '(image, sigma=(sigma, sigma, 0.0))\n', (8478, 8512), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((1738, 1764), 'numpy.array', 'np.array', (['image.shape[0:2]'], {}), '(image.shape[0:2])\n', (1746, 1764), True, 'import numpy as np\n'), ((3608, 3617), 'numpy.std', 'np.std', (['g'], {}), '(g)\n', (3614, 3617), True, 'import numpy as np\n'), ((4965, 4977), 'numpy.std', 'np.std', (['grad'], {}), '(grad)\n', (4971, 4977), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn.init as weight_init
from torch import nn
from torch.nn import Parameter
from src.models.samplers.arch_sampler import ArchSampler
class StaticArchGenerator(ArchSampler):
def __init__(self, initial_p, *args, **kwargs):
super().__init__(*args, **kwargs)
self.params = Parameter(torch.Tensor(1, self.distrib_dim))
logit = np.log(
initial_p / (1 - initial_p)) if initial_p < 1 else float('inf')
weight_init.constant_(self.params, logit)
def forward(self, z=None):
if self.frozen and self.training:
raise RuntimeError('Trying to sample from a frozen distrib gen in '
'training mode')
return self.params.sigmoid()
def entropy(self):
distrib = torch.distributions.Bernoulli(self.params.sigmoid())
return distrib.entropy()
def remove_var(self, name):
assert self.var_names
self.distrib_dim -= 1
remove_idx = self.var_names.index(name)
self.var_names.remove(name)
all_idx = torch.ones_like(self.params).bool()
all_idx[0, remove_idx] = 0
self.params = nn.Parameter(self.params[all_idx].unsqueeze(0))
def is_deterministic(self):
distrib = self()
return torch.equal(distrib, distrib**2)
| [
"torch.ones_like",
"numpy.log",
"torch.equal",
"torch.nn.init.constant_",
"torch.Tensor"
] | [((489, 530), 'torch.nn.init.constant_', 'weight_init.constant_', (['self.params', 'logit'], {}), '(self.params, logit)\n', (510, 530), True, 'import torch.nn.init as weight_init\n'), ((1307, 1341), 'torch.equal', 'torch.equal', (['distrib', '(distrib ** 2)'], {}), '(distrib, distrib ** 2)\n', (1318, 1341), False, 'import torch\n'), ((346, 379), 'torch.Tensor', 'torch.Tensor', (['(1)', 'self.distrib_dim'], {}), '(1, self.distrib_dim)\n', (358, 379), False, 'import torch\n'), ((397, 432), 'numpy.log', 'np.log', (['(initial_p / (1 - initial_p))'], {}), '(initial_p / (1 - initial_p))\n', (403, 432), True, 'import numpy as np\n'), ((1093, 1121), 'torch.ones_like', 'torch.ones_like', (['self.params'], {}), '(self.params)\n', (1108, 1121), False, 'import torch\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.