code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from __future__ import print_function
from __future__ import division
import numpy as np
import tapetool.helpers as tth
import tapetool.analysis as tta
import tapetool.filters as ttf
def main(wavfile, cal):
result = dict()
result['cal'] = cal
result['source'] = dict()
result['source']['filename'] = wavfile
fs, data = tth.read_wav(wavfile)
result['source']['fs'] = fs
result['source']['samples'] = len(data)
result['source']['seconds'] = len(data) / fs
# where to find what parts of audio in the test file
i_reflevel = tth.cut(fs, 0.5, 1.)
i_noise = tth.cut(fs, 2.0, 3.)
i_s01 = tth.cut(fs, 5.5, 1.)
i_s63 = tth.cut(fs, 6.5, 1.)
i_s10 = tth.cut(fs, 7.5, 1.)
i_s16 = tth.cut(fs, 8.5, 1.)
i_mol = tth.cut(fs, 10.0, 10.)
i_sol10 = tth.cut(fs, 20.5, 5.)
i_sol16 = tth.cut(fs, 26.0, 5.)
# select the right filter for THD measurements
filter_thd = ttf.thd_for_1k
# reference level
result['reflevel'] = tth.db(tth.rms(data[i_reflevel])) - cal
result['thd'] = tta.db(.01 * tta.thd(fs, data[i_reflevel], filter_thd))
# noise
result['noise'] = tth.db(tth.rms(data[i_noise])) - cal
# sensitivity
result['s01'] = tth.db(tth.rms(data[i_s01])) - cal
result['s63'] = tth.db(tth.rms(data[i_s63])) - cal
result['s10'] = tth.db(tth.rms(data[i_s10])) - cal
result['s16'] = tth.db(tth.rms(data[i_s16])) - cal
# THD
result['thd'] = tta.db(.01 * tta.thd(fs, data[i_reflevel], filter_thd))
# MOL
t, lvl, thd = tta.mol(fs, data[i_mol], 0.05, filter_thd)
result['mol'] = tta.find_mol(lvl, thd) - cal
result['mol_data'] = dict()
result['mol_data']['t'] = list(t)
result['mol_data']['lvl'] = list(lvl - cal)
result['mol_data']['thd'] = list(tta.db(.01 * thd))
# SOL10
x, y = tta.sol(fs, data[i_sol10], 0.1)
i = np.argmax(y)
result['sol10'] = y[i] - cal
result['sol10_data'] = dict()
result['sol10_data']['at'] = x[i]
result['sol10_data']['t'] = list(x)
result['sol10_data']['lvl'] = list(y)
# SOL16
x, y = tta.sol(fs, data[i_sol16], 0.1)
i = np.argmax(y)
result['sol16'] = y[i] - cal
result['sol16_data'] = dict()
result['sol16_data']['at'] = x[i]
result['sol16_data']['t'] = list(x)
result['sol16_data']['lvl'] = list(y)
return result
if __name__ == '__main__':
import json
out = list()
cal = -20.51
for line in open('bias.txt'):
if line.startswith('#'):
continue
b, wavfile = line.strip().split()
out.append((float(b), main(wavfile, cal)))
json.dump(out, open('test.json', 'w'))
| [
"tapetool.helpers.rms",
"tapetool.helpers.read_wav",
"tapetool.analysis.thd",
"tapetool.analysis.mol",
"tapetool.helpers.cut",
"numpy.argmax",
"tapetool.analysis.db",
"tapetool.analysis.find_mol",
"tapetool.analysis.sol"
] | [((343, 364), 'tapetool.helpers.read_wav', 'tth.read_wav', (['wavfile'], {}), '(wavfile)\n', (355, 364), True, 'import tapetool.helpers as tth\n'), ((565, 586), 'tapetool.helpers.cut', 'tth.cut', (['fs', '(0.5)', '(1.0)'], {}), '(fs, 0.5, 1.0)\n', (572, 586), True, 'import tapetool.helpers as tth\n'), ((605, 626), 'tapetool.helpers.cut', 'tth.cut', (['fs', '(2.0)', '(3.0)'], {}), '(fs, 2.0, 3.0)\n', (612, 626), True, 'import tapetool.helpers as tth\n'), ((645, 666), 'tapetool.helpers.cut', 'tth.cut', (['fs', '(5.5)', '(1.0)'], {}), '(fs, 5.5, 1.0)\n', (652, 666), True, 'import tapetool.helpers as tth\n'), ((685, 706), 'tapetool.helpers.cut', 'tth.cut', (['fs', '(6.5)', '(1.0)'], {}), '(fs, 6.5, 1.0)\n', (692, 706), True, 'import tapetool.helpers as tth\n'), ((725, 746), 'tapetool.helpers.cut', 'tth.cut', (['fs', '(7.5)', '(1.0)'], {}), '(fs, 7.5, 1.0)\n', (732, 746), True, 'import tapetool.helpers as tth\n'), ((765, 786), 'tapetool.helpers.cut', 'tth.cut', (['fs', '(8.5)', '(1.0)'], {}), '(fs, 8.5, 1.0)\n', (772, 786), True, 'import tapetool.helpers as tth\n'), ((805, 828), 'tapetool.helpers.cut', 'tth.cut', (['fs', '(10.0)', '(10.0)'], {}), '(fs, 10.0, 10.0)\n', (812, 828), True, 'import tapetool.helpers as tth\n'), ((845, 867), 'tapetool.helpers.cut', 'tth.cut', (['fs', '(20.5)', '(5.0)'], {}), '(fs, 20.5, 5.0)\n', (852, 867), True, 'import tapetool.helpers as tth\n'), ((885, 907), 'tapetool.helpers.cut', 'tth.cut', (['fs', '(26.0)', '(5.0)'], {}), '(fs, 26.0, 5.0)\n', (892, 907), True, 'import tapetool.helpers as tth\n'), ((1583, 1625), 'tapetool.analysis.mol', 'tta.mol', (['fs', 'data[i_mol]', '(0.05)', 'filter_thd'], {}), '(fs, data[i_mol], 0.05, filter_thd)\n', (1590, 1625), True, 'import tapetool.analysis as tta\n'), ((1873, 1904), 'tapetool.analysis.sol', 'tta.sol', (['fs', 'data[i_sol10]', '(0.1)'], {}), '(fs, data[i_sol10], 0.1)\n', (1880, 1904), True, 'import tapetool.analysis as tta\n'), ((1913, 1925), 'numpy.argmax', 'np.argmax', (['y'], {}), '(y)\n', (1922, 1925), True, 'import numpy as np\n'), ((2141, 2172), 'tapetool.analysis.sol', 'tta.sol', (['fs', 'data[i_sol16]', '(0.1)'], {}), '(fs, data[i_sol16], 0.1)\n', (2148, 2172), True, 'import tapetool.analysis as tta\n'), ((2181, 2193), 'numpy.argmax', 'np.argmax', (['y'], {}), '(y)\n', (2190, 2193), True, 'import numpy as np\n'), ((1646, 1668), 'tapetool.analysis.find_mol', 'tta.find_mol', (['lvl', 'thd'], {}), '(lvl, thd)\n', (1658, 1668), True, 'import tapetool.analysis as tta\n'), ((1830, 1848), 'tapetool.analysis.db', 'tta.db', (['(0.01 * thd)'], {}), '(0.01 * thd)\n', (1836, 1848), True, 'import tapetool.analysis as tta\n'), ((1047, 1072), 'tapetool.helpers.rms', 'tth.rms', (['data[i_reflevel]'], {}), '(data[i_reflevel])\n', (1054, 1072), True, 'import tapetool.helpers as tth\n'), ((1113, 1154), 'tapetool.analysis.thd', 'tta.thd', (['fs', 'data[i_reflevel]', 'filter_thd'], {}), '(fs, data[i_reflevel], filter_thd)\n', (1120, 1154), True, 'import tapetool.analysis as tta\n'), ((1198, 1220), 'tapetool.helpers.rms', 'tth.rms', (['data[i_noise]'], {}), '(data[i_noise])\n', (1205, 1220), True, 'import tapetool.helpers as tth\n'), ((1274, 1294), 'tapetool.helpers.rms', 'tth.rms', (['data[i_s01]'], {}), '(data[i_s01])\n', (1281, 1294), True, 'import tapetool.helpers as tth\n'), ((1329, 1349), 'tapetool.helpers.rms', 'tth.rms', (['data[i_s63]'], {}), '(data[i_s63])\n', (1336, 1349), True, 'import tapetool.helpers as tth\n'), ((1384, 1404), 'tapetool.helpers.rms', 'tth.rms', (['data[i_s10]'], {}), '(data[i_s10])\n', (1391, 1404), True, 'import tapetool.helpers as tth\n'), ((1439, 1459), 'tapetool.helpers.rms', 'tth.rms', (['data[i_s16]'], {}), '(data[i_s16])\n', (1446, 1459), True, 'import tapetool.helpers as tth\n'), ((1511, 1552), 'tapetool.analysis.thd', 'tta.thd', (['fs', 'data[i_reflevel]', 'filter_thd'], {}), '(fs, data[i_reflevel], filter_thd)\n', (1518, 1552), True, 'import tapetool.analysis as tta\n')] |
# MIT License
# Copyright (c) 2018 ZiyaoQiao
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import warnings
from os.path import split
import numpy as np
import pandas as pd
from glob import glob
from PIL import Image
from keras.applications.inception_v3 import preprocess_input
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder
from sklearn.model_selection import train_test_split
from subprocess import check_output
import keras
from keras.models import Sequential, load_model
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
train_images = glob(".\\input\\train\\*jpg")
test_images = glob(".\\input\\test\\*jpg")
df = pd.read_csv(".\\input\\train.csv")
df["Image"] = df["Image"].map(lambda x: ".\\input\\train\\" + x)
ImageToLabelDict = dict(zip(df["Image"], df["Id"]))
SIZE = 224
def ImportImage(filename):
img = Image.open(filename).convert("LA").resize((SIZE, SIZE))
return np.array(img)[:, :, 0]
class LabelOneHotEncoder():
def __init__(self):
self.ohe = OneHotEncoder()
self.le = LabelEncoder()
def fit_transform(self, x):
features = self.le.fit_transform(x)
return self.ohe.fit_transform(features.reshape(-1, 1))
def transform(self, x):
return self.ohe.transform(self.la.transform(x.reshape(-1, 1)))
def inverse_tranform(self, x):
return self.le.inverse_transform(self.ohe.inverse_tranform(x))
def inverse_labels(self, x):
return self.le.inverse_transform(x)
y = list(map(ImageToLabelDict.get, train_images))
lohe = LabelOneHotEncoder()
y_cat = lohe.fit_transform(y)
image_gen = ImageDataGenerator(
# featurewise_center=True,
# featurewise_std_normalization=True,
rescale=1. / 255,
rotation_range=15,
width_shift_range=.15,
height_shift_range=.15,
horizontal_flip=True)
model = load_model(".\\vgg16-transfer-ver1.model")
model.load_weights(".\\vgg16-transfer-ver1.model")
target_size = (224, 224)
def predict(model, img, target_size):
"""Run model prediction on image
Args:
model: keras model
img: PIL format image
target_size: (w,h) tuple
Returns:
list of predicted labels and their probabilities
"""
if img.size != target_size:
img = img.resize(target_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
return preds
with open("sample_submission.csv", "w") as f:
with warnings.catch_warnings():
f.write("Image,Id\n")
warnings.filterwarnings("ignore", category=DeprecationWarning)
for images in test_images:
img = Image.open(images)
img = img.convert("L")
img = img.convert("RGB")
y = predict(model, img, target_size)
predicted_args = np.argsort(y)[0][::-1][:5]
predicted_tags = lohe.inverse_labels(predicted_args)
images = split(images)[-1]
predicted_tags = " ".join(predicted_tags)
# if the model is trained without the new_whale class
# predicted_tags = "new_whale " + predicted_tags
f.write("%s,%s\n" % (images, predicted_tags))
| [
"keras.preprocessing.image.img_to_array",
"sklearn.preprocessing.LabelEncoder",
"PIL.Image.open",
"keras.models.load_model",
"pandas.read_csv",
"keras.applications.inception_v3.preprocess_input",
"sklearn.preprocessing.OneHotEncoder",
"warnings.catch_warnings",
"keras.preprocessing.image.ImageDataGe... | [((1663, 1692), 'glob.glob', 'glob', (['""".\\\\input\\\\train\\\\*jpg"""'], {}), "('.\\\\input\\\\train\\\\*jpg')\n", (1667, 1692), False, 'from glob import glob\n'), ((1708, 1736), 'glob.glob', 'glob', (['""".\\\\input\\\\test\\\\*jpg"""'], {}), "('.\\\\input\\\\test\\\\*jpg')\n", (1712, 1736), False, 'from glob import glob\n'), ((1743, 1777), 'pandas.read_csv', 'pd.read_csv', (['""".\\\\input\\\\train.csv"""'], {}), "('.\\\\input\\\\train.csv')\n", (1754, 1777), True, 'import pandas as pd\n'), ((2741, 2873), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'rotation_range': '(15)', 'width_shift_range': '(0.15)', 'height_shift_range': '(0.15)', 'horizontal_flip': '(True)'}), '(rescale=1.0 / 255, rotation_range=15, width_shift_range=\n 0.15, height_shift_range=0.15, horizontal_flip=True)\n', (2759, 2873), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((2978, 3020), 'keras.models.load_model', 'load_model', (['""".\\\\vgg16-transfer-ver1.model"""'], {}), "('.\\\\vgg16-transfer-ver1.model')\n", (2988, 3020), False, 'from keras.models import Sequential, load_model\n'), ((3438, 3461), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (3456, 3461), False, 'from keras.preprocessing import image\n'), ((3471, 3496), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (3485, 3496), True, 'import numpy as np\n'), ((3506, 3525), 'keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (3522, 3525), False, 'from keras.applications.inception_v3 import preprocess_input\n'), ((2022, 2035), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2030, 2035), True, 'import numpy as np\n'), ((2123, 2138), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (2136, 2138), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder\n'), ((2158, 2172), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2170, 2172), False, 'from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder\n'), ((3633, 3658), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3656, 3658), False, 'import warnings\n'), ((3700, 3762), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (3723, 3762), False, 'import warnings\n'), ((3818, 3836), 'PIL.Image.open', 'Image.open', (['images'], {}), '(images)\n', (3828, 3836), False, 'from PIL import Image\n'), ((4106, 4119), 'os.path.split', 'split', (['images'], {}), '(images)\n', (4111, 4119), False, 'from os.path import split\n'), ((1954, 1974), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (1964, 1974), False, 'from PIL import Image\n'), ((3991, 4004), 'numpy.argsort', 'np.argsort', (['y'], {}), '(y)\n', (4001, 4004), True, 'import numpy as np\n')] |
import numpy as np
import random
from math import sqrt, pi, exp
def gaussian_prob(obs, mu, sig):
num = (obs - mu)**2
denum = 2*sig**2
norm = 1 / sqrt(2*pi*sig**2)
return norm * exp(-num/denum)
class GNB():
def __init__(self):
self.classes = ['left', 'keep', 'right']
def process_vars(self,vars):
# could do something fancy in here, but right now
# s, d, s_dot and d_dot alone give good results
s, d, s_dot, d_dot = vars
return s, d, s_dot, d_dot
def train(self, X, Y):
"""
X is an array of training data, each entry of which is a
length 4 array which represents a snapshot of a vehicle's
s, d, s_dot, and d_dot coordinates.
Y is an array of labels, each of which is either 'left', 'keep',
or 'right'. These labels indicate what maneuver the vehicle was
engaged in during the corresponding training data snapshot.
"""
num_vars = 4
# initialize an empty array of arrays. For this problem
# we are looking at three labels and keeping track of 4
# variables for each (s,d,s_dot,d_dot), so the empty array
# totals_by_label will look like this:
# {
# "left" :[ [],[],[],[] ],
# "keep" :[ [],[],[],[] ],
# "right":[ [],[],[],[] ]
# }
totals_by_label = {
"left" : [],
"keep" : [],
"right": [],
}
for label in self.classes:
for i in range(num_vars):
totals_by_label[label].append([])
for x, label in zip(X,Y):
# process the raw s,d,s_dot,d_dot snapshot if desired.
x = self.process_vars(x)
# add this data into the appropriate place in the
# totals_by_label data structure.
for i, val in enumerate(x):
totals_by_label[label][i].append(val)
# Get the mean and standard deviation for each of the arrays
# we've built up. These will be used as our priors in GNB
means = []
stds = []
for i in self.classes:
means.append([])
stds.append([])
for arr in totals_by_label[i]:
mean = np.mean(arr)
std = np.std(arr)
means[-1].append(mean)
stds[-1].append(std)
self._means = means
self._stds = stds
def _predict(self, obs):
"""
Private method used to assign a probability to each class.
"""
probs = []
obs = self.process_vars(obs)
for (means, stds, lab) in zip(self._means, self._stds, self.classes):
product = 1
for mu, sig, o in zip(means, stds, obs):
likelihood = gaussian_prob(o, mu, sig)
product *= likelihood
probs.append(product)
t = sum(probs)
return [p/t for p in probs]
def predict(self, observation):
probs = self._predict(observation)
idx = 0
best_p = 0
for i, p in enumerate(probs):
if p > best_p:
best_p = p
idx = i
names = ['left', 'keep', 'right']
return names[idx] | [
"numpy.mean",
"math.exp",
"math.sqrt",
"numpy.std"
] | [((158, 181), 'math.sqrt', 'sqrt', (['(2 * pi * sig ** 2)'], {}), '(2 * pi * sig ** 2)\n', (162, 181), False, 'from math import sqrt, pi, exp\n'), ((194, 211), 'math.exp', 'exp', (['(-num / denum)'], {}), '(-num / denum)\n', (197, 211), False, 'from math import sqrt, pi, exp\n'), ((2287, 2299), 'numpy.mean', 'np.mean', (['arr'], {}), '(arr)\n', (2294, 2299), True, 'import numpy as np\n'), ((2322, 2333), 'numpy.std', 'np.std', (['arr'], {}), '(arr)\n', (2328, 2333), True, 'import numpy as np\n')] |
from pioneer.common.linalg import fit_cubic
from pioneer.common.types.calibration import SaturationCalibration
from enum import Enum
from scipy.signal import convolve2d
from typing import List
import numpy as np
class TraceProcessing():
def __init__(self, priority:int=0):
self.priority = priority
def __call__(self, traces):
return traces
class TraceProcessingCollection():
def __init__(self, list_trace_processing:List[TraceProcessing]=[]):
priorities = [trace_processing.priority for trace_processing in list_trace_processing]
self.apply_order = np.argsort(priorities)
self.list_trace_processing = list_trace_processing
def __call__(self, traces):
for i in self.apply_order:
traces = self.list_trace_processing[i](traces)
return traces
class RemoveStaticNoise(TraceProcessing):
def __init__(self, static_noise):
super(RemoveStaticNoise, self).__init__(priority=TraceProcessingPriorities.RemoveStaticNoise.value)
self.static_noise = static_noise
def __call__(self, traces):
if self.static_noise is not None:
traces['data'] -= self.static_noise
return traces
class Realign(TraceProcessing):
def __init__(self, target_time_base_delay=None):
super(Realign, self).__init__(priority=TraceProcessingPriorities.Realign.value)
self.target_time_base_delay = target_time_base_delay
def __call__(self, traces):
if type(traces['time_base_delays']) in [int, float, type(None)] and self.target_time_base_delay is None:
return traces
offsets_nb_pts = -traces['time_base_delays'] / traces['distance_scaling']
offsets_nb_pts -= np.min(offsets_nb_pts)
if self.target_time_base_delay is not None:
offset = (np.max(traces['time_base_delays']) - self.target_time_base_delay) / traces['distance_scaling']
if offset < 0:
offsets_nb_pts -= offset
while np.max(offsets_nb_pts) > 0:
ind = np.where(offsets_nb_pts//1==0.0)[0] #where offset between 0 and 1
traces['data'][ind,:-1] = np.multiply(traces['data'][ind,:-1].T, 1-offsets_nb_pts[ind]).T + np.multiply(traces['data'][ind,1:].T, offsets_nb_pts[ind]).T
traces['time_base_delays'][ind] += offsets_nb_pts[ind]*traces['distance_scaling']
ind = np.where(offsets_nb_pts > 1.0)[0] #where offset > 1
traces['data'][ind,:-1] = traces['data'][ind,1:]
traces['time_base_delays'][ind] += traces['distance_scaling']
offsets_nb_pts -= 1
return traces
class ZeroBaseline(TraceProcessing):
def __init__(self):
super(ZeroBaseline, self).__init__(priority=TraceProcessingPriorities.ZeroBaseline.value)
def __call__(self, traces):
traces['data'] = traces['data'].astype('float64')
traces['data'] -= np.mean(traces['data'][:,:10], axis=-1)[...,None]
return traces
class Clip(TraceProcessing):
def __init__(self, min_value=0, max_value=np.inf):
super(Clip, self).__init__(priority=TraceProcessingPriorities.Clip.value)
self.min_value = min_value
self.max_value = max_value
def __call__(self, traces):
traces['data'] = np.clip(traces['data'], self.min_value, self.max_value)
return traces
class CutInterval(TraceProcessing):
def __init__(self, min_indice:int=0, max_indice:int=-1):
super(CutInterval, self).__init__(priority=TraceProcessingPriorities.CutInterval.value)
self.min_indice = min_indice
self.max_indice = max_indice
def __call__(self, traces):
traces['data'] = traces['data'][...,self.min_indice:self.max_indice]
traces['time_base_delays'] += self.min_indice*traces['distance_scaling']
return traces
class Smooth(TraceProcessing):
def __init__(self):
super(Smooth, self).__init__(priority=TraceProcessingPriorities.Smooth.value)
def __call__(self, traces):
smoothing_kernel = np.expand_dims(traces['trace_smoothing_kernel'], 0)
if traces['data'].ndim == 1:
traces['data'] = np.expand_dims(traces['data'],0)
traces['data'] = convolve2d(traces['data'], smoothing_kernel, mode = "same", boundary='symm')[0]
else:
traces['data'] = convolve2d(traces['data'], smoothing_kernel, mode = "same", boundary='symm')
return traces
class Desaturate(TraceProcessing):
def __init__(self, saturation_calibration:SaturationCalibration=None):
super(Desaturate, self).__init__(priority=TraceProcessingPriorities.Desaturate.value)
self.saturation_calibration = saturation_calibration
def __call__(self, traces):
traces['data'] = traces['data'].astype('float64')
saturation_value = traces['data'].max()
if saturation_value == 0:
return traces
where_plateau = np.where(traces['data'] == saturation_value)
channels, ind, sizes = np.unique(where_plateau[0], return_index=True, return_counts=True)
positions = where_plateau[1][ind]
for channel, position, size in zip(channels, positions, sizes):
if size > 5 and position > 2 and position + size + 2 < traces['data'].shape[-1]:
# x axis for the fit
x = np.arange(0,traces['data'][channel].shape[0])*traces['distance_scaling']
x = x[position:position + size]
# Before plateau
x1 = (position - 1.5) * traces['distance_scaling']
y1 = (traces['data'][channel][position - 1] + traces['data'][channel][position - 2])/2 - saturation_value
dy1 = (traces['data'][channel][position - 1] - traces['data'][channel][position - 2])/traces['distance_scaling']
# After plateau
x2 = x1 + (size + 2.5)*traces['distance_scaling']
y2 = (traces['data'][channel][position + size + 2] + traces['data'][channel][position + size + 1])/2 - saturation_value
dy2 = (traces['data'][channel][position + size + 2] - traces['data'][channel][position + size + 1])/traces['distance_scaling']
if self.saturation_calibration is not None:
start_plateau_position = traces['distance_scaling']*(position -
(traces['data'][channel][ position ] - traces['data'][channel][position-1]) \
/(traces['data'][channel][position-1] - traces['data'][channel][position-2]))
end_plateau_position = traces['distance_scaling']*(position + size - 1 +
(traces['data'][channel][position+size-1] - traces['data'][channel][position+size]) \
/(traces['data'][channel][position+size] - traces['data'][channel][position+size+1]))
x0, y0 = self.saturation_calibration(end_plateau_position - start_plateau_position)
x0 += start_plateau_position
# Cubic fit between start of plateau and peak
a1, b1, c1, d1 = fit_cubic(p1=(x0,y0), p2=(x1,y1), d1=0, d2=dy1)
# Cubic fit between peak and end of plateau
a2, b2, c2, d2 = fit_cubic(p1=(x0,y0), p2=(x2,y2), d1=0, d2=dy2)
ind_x0 = np.argmax((x-x0)>0)
traces['data'][channel][position:position + size][:ind_x0] += a1*x[:ind_x0]**3 + b1*x[:ind_x0]**2 + c1*x[:ind_x0] + d1
traces['data'][channel][position:position + size][ind_x0:] += a2*x[ind_x0:]**3 + b2*x[ind_x0:]**2 + c2*x[ind_x0:] + d2
else:
# Cubic fit between start of plateau and peak
a, b, c, d = fit_cubic(p1=(x1,y1), p2=(x2,y2), d1=dy1, d2=dy2)
traces['data'][channel][position: position+size] += a*x**3 + b*x**2 + c*x + d
return traces
class Decimate(TraceProcessing):
def __init__(self, factor:int=1):
super(Decimate, self).__init__(priority=TraceProcessingPriorities.Decimate.value)
self.factor = factor
def __call__(self, traces):
traces['data'] = traces['data'][:,::self.factor]
traces['distance_scaling'] *= self.factor
return traces
class Binning(TraceProcessing):
def __init__(self, factor:int=1):
super(Binning, self).__init__(priority=TraceProcessingPriorities.Binning.value)
self.factor = factor
def __call__(self, traces):
min_len = int(np.floor(traces['data'].shape[-1]/self.factor))
traces['data'] = sum([traces['data'][:,i::self.factor][:,:min_len] for i in range(self.factor)])/self.factor
traces['time_base_delays'] += 0.5*(self.factor-1)*traces['distance_scaling']
traces['distance_scaling'] *= self.factor
return traces
class TraceProcessingPriorities(Enum):
Desaturate = 0
RemoveStaticNoise = 1
CutInterval = 2
Realign = 3
ZeroBaseline = 4
Clip = 5
Smooth = 6
Binning = 7
Decimate = 8 | [
"numpy.clip",
"numpy.mean",
"scipy.signal.convolve2d",
"numpy.multiply",
"numpy.unique",
"numpy.where",
"numpy.floor",
"numpy.argmax",
"numpy.max",
"numpy.argsort",
"numpy.expand_dims",
"numpy.min",
"pioneer.common.linalg.fit_cubic",
"numpy.arange"
] | [((603, 625), 'numpy.argsort', 'np.argsort', (['priorities'], {}), '(priorities)\n', (613, 625), True, 'import numpy as np\n'), ((1729, 1751), 'numpy.min', 'np.min', (['offsets_nb_pts'], {}), '(offsets_nb_pts)\n', (1735, 1751), True, 'import numpy as np\n'), ((3286, 3341), 'numpy.clip', 'np.clip', (["traces['data']", 'self.min_value', 'self.max_value'], {}), "(traces['data'], self.min_value, self.max_value)\n", (3293, 3341), True, 'import numpy as np\n'), ((4053, 4104), 'numpy.expand_dims', 'np.expand_dims', (["traces['trace_smoothing_kernel']", '(0)'], {}), "(traces['trace_smoothing_kernel'], 0)\n", (4067, 4104), True, 'import numpy as np\n'), ((4958, 5002), 'numpy.where', 'np.where', (["(traces['data'] == saturation_value)"], {}), "(traces['data'] == saturation_value)\n", (4966, 5002), True, 'import numpy as np\n'), ((5035, 5101), 'numpy.unique', 'np.unique', (['where_plateau[0]'], {'return_index': '(True)', 'return_counts': '(True)'}), '(where_plateau[0], return_index=True, return_counts=True)\n', (5044, 5101), True, 'import numpy as np\n'), ((2005, 2027), 'numpy.max', 'np.max', (['offsets_nb_pts'], {}), '(offsets_nb_pts)\n', (2011, 2027), True, 'import numpy as np\n'), ((2916, 2956), 'numpy.mean', 'np.mean', (["traces['data'][:, :10]"], {'axis': '(-1)'}), "(traces['data'][:, :10], axis=-1)\n", (2923, 2956), True, 'import numpy as np\n'), ((4171, 4204), 'numpy.expand_dims', 'np.expand_dims', (["traces['data']", '(0)'], {}), "(traces['data'], 0)\n", (4185, 4204), True, 'import numpy as np\n'), ((4356, 4430), 'scipy.signal.convolve2d', 'convolve2d', (["traces['data']", 'smoothing_kernel'], {'mode': '"""same"""', 'boundary': '"""symm"""'}), "(traces['data'], smoothing_kernel, mode='same', boundary='symm')\n", (4366, 4430), False, 'from scipy.signal import convolve2d\n'), ((8667, 8715), 'numpy.floor', 'np.floor', (["(traces['data'].shape[-1] / self.factor)"], {}), "(traces['data'].shape[-1] / self.factor)\n", (8675, 8715), True, 'import numpy as np\n'), ((2051, 2087), 'numpy.where', 'np.where', (['(offsets_nb_pts // 1 == 0.0)'], {}), '(offsets_nb_pts // 1 == 0.0)\n', (2059, 2087), True, 'import numpy as np\n'), ((2394, 2424), 'numpy.where', 'np.where', (['(offsets_nb_pts > 1.0)'], {}), '(offsets_nb_pts > 1.0)\n', (2402, 2424), True, 'import numpy as np\n'), ((4233, 4307), 'scipy.signal.convolve2d', 'convolve2d', (["traces['data']", 'smoothing_kernel'], {'mode': '"""same"""', 'boundary': '"""symm"""'}), "(traces['data'], smoothing_kernel, mode='same', boundary='symm')\n", (4243, 4307), False, 'from scipy.signal import convolve2d\n'), ((1827, 1861), 'numpy.max', 'np.max', (["traces['time_base_delays']"], {}), "(traces['time_base_delays'])\n", (1833, 1861), True, 'import numpy as np\n'), ((2155, 2219), 'numpy.multiply', 'np.multiply', (["traces['data'][ind, :-1].T", '(1 - offsets_nb_pts[ind])'], {}), "(traces['data'][ind, :-1].T, 1 - offsets_nb_pts[ind])\n", (2166, 2219), True, 'import numpy as np\n'), ((2221, 2280), 'numpy.multiply', 'np.multiply', (["traces['data'][ind, 1:].T", 'offsets_nb_pts[ind]'], {}), "(traces['data'][ind, 1:].T, offsets_nb_pts[ind])\n", (2232, 2280), True, 'import numpy as np\n'), ((5368, 5414), 'numpy.arange', 'np.arange', (['(0)', "traces['data'][channel].shape[0]"], {}), "(0, traces['data'][channel].shape[0])\n", (5377, 5414), True, 'import numpy as np\n'), ((7230, 7279), 'pioneer.common.linalg.fit_cubic', 'fit_cubic', ([], {'p1': '(x0, y0)', 'p2': '(x1, y1)', 'd1': '(0)', 'd2': 'dy1'}), '(p1=(x0, y0), p2=(x1, y1), d1=0, d2=dy1)\n', (7239, 7279), False, 'from pioneer.common.linalg import fit_cubic\n'), ((7380, 7429), 'pioneer.common.linalg.fit_cubic', 'fit_cubic', ([], {'p1': '(x0, y0)', 'p2': '(x2, y2)', 'd1': '(0)', 'd2': 'dy2'}), '(p1=(x0, y0), p2=(x2, y2), d1=0, d2=dy2)\n', (7389, 7429), False, 'from pioneer.common.linalg import fit_cubic\n'), ((7478, 7499), 'numpy.argmax', 'np.argmax', (['(x - x0 > 0)'], {}), '(x - x0 > 0)\n', (7487, 7499), True, 'import numpy as np\n'), ((7898, 7949), 'pioneer.common.linalg.fit_cubic', 'fit_cubic', ([], {'p1': '(x1, y1)', 'p2': '(x2, y2)', 'd1': 'dy1', 'd2': 'dy2'}), '(p1=(x1, y1), p2=(x2, y2), d1=dy1, d2=dy2)\n', (7907, 7949), False, 'from pioneer.common.linalg import fit_cubic\n')] |
# -*- coding: utf-8 -*-
""" Simple intensity adjustment for primary colours of all of the rings to
produce pulses.
"""
import math
import numpy as np
from ..engine import Animation
class GradientPattern(Animation):
ANIMATION = __name__
ARGS = {
}
def ring_render(self, colour_1):
lights = range(self.fc.leds_per_ring)
max_brightness = 255
return [
int((
(max_brightness / 2)
* math.sin(math.pi * float(i) / (self.fc.leds_per_ring / 2))
) + max_brightness / 2) * colour_1
for i in lights
]
def post_init(self):
self._rings = np.array([
self.ring_render(self.fc.colour(1, 0, 0)),
self.ring_render(self.fc.colour(0, 1, 0)),
self.ring_render(self.fc.colour(0, 0, 1)),
self.ring_render(self.fc.colour(1, 1, 0)),
self.ring_render(self.fc.colour(0, 1, 1)),
self.ring_render(self.fc.colour(1, 0, 1)),
], dtype=np.uint8)
def render(self, frame):
return self.animation(frame)
def animation(self, frame):
""" 6 rings spinning in and out of phase
:param frame:
:return:
"""
for i in range(self.fc.n_rings):
self._rings[i] = np.roll(self._rings[i], 4, axis=0)
frame[:] = self._rings
| [
"numpy.roll"
] | [((1307, 1341), 'numpy.roll', 'np.roll', (['self._rings[i]', '(4)'], {'axis': '(0)'}), '(self._rings[i], 4, axis=0)\n', (1314, 1341), True, 'import numpy as np\n')] |
#!/usr/bin/env python
""" LBR iiwa poke an object tracked by RealSense D435
Depend on:
roslaunch iiwa_gazebo iiwa_gazebo_with_sunrise.launch (tf defined in 'iiwa_gazebo' package)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import os
import time
import cv2
import pyrealsense2 as rs
import numpy as np
from numpy import pi
from glob import glob
import torch
from pysot.core.config import cfg
from pysot.models.model_builder import ModelBuilder
from pysot.tracker.tracker_builder import build_tracker
import rospy
import tf
from robots.lbr import iiwaRobot
from geometry_msgs.msg import PoseStamped, Quaternion
from iiwa_msgs.msg import JointPosition
# iiwa's initial perching pose
JOINT_PERCH = JointPosition()
JOINT_PERCH.position.a1 = -pi/6
JOINT_PERCH.position.a2 = pi/3
JOINT_PERCH.position.a3 = pi/6
JOINT_PERCH.position.a4 = -pi/2
JOINT_PERCH.position.a5 = -pi/6
JOINT_PERCH.position.a6 = -pi/4
JOINT_PERCH.position.a7 = -pi/6
def quat_from_vecs(vec_0, vec_1):
"""
Compute quaternion from two known vectors
"""
quat = Quaternion()
norms = np.sqrt(np.linalg.norm(vec_0)*np.linalg.norm(vec_1)) # |u||v|
cp = np.cross(vec_0, vec_1) # cross product
w = norms + np.dot(vec_0,vec_1)
x, y, z = cp[0], cp[1], cp[2]
q = np.array([x,y,z,w])
norm_q = q/np.linalg.norm(q)
quat.x = norm_q[0]
quat.y = norm_q[1]
quat.z = norm_q[2]
quat.w = norm_q[3]
return quat
def main():
# instantiate iiwa
iiwa = iiwaRobot()
time.sleep(4) # allow iiwa taking some time to wake up
# zero joints
iiwa.move_joint(commit=True)
# iiwa get ready
iiwa.move_joint(JOINT_PERCH, commit=True)
rospy.loginfo("iiwa is ready")
# Configure realsense D435 depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
profile = pipeline.start(config)
# Create an align object
align_to = rs.stream.color
align = rs.align(align_to)
# load siammask config
cfg.merge_from_file(sys.path[0]+"/siammask_r50_l3/config.yaml")
cfg.CUDA = torch.cuda.is_available()
device = torch.device('cuda' if cfg.CUDA else 'cpu')
# create model
model = ModelBuilder()
# load model
model.load_state_dict(torch.load(sys.path[0]+"/siammask_r50_l3/model.pth",
map_location=lambda storage, loc: storage.cpu()))
model.eval().to(device)
# build tracker
tracker = build_tracker(model)
# label object
video_name = 'D435_color'
cv2.namedWindow(video_name, cv2.WND_PROP_FULLSCREEN)
first_frame = True
FIN_FLAG = False
GOAL_SET_FLAG = False
while not FIN_FLAG:
# wait image stream and select object of interest
frames = pipeline.wait_for_frames()
# Align the depth frame to color frame
aligned_frames = align.process(frames)
color_frame = aligned_frames.get_color_frame()
depth_frame = aligned_frames.get_depth_frame()
depth_intrinsics = rs.video_stream_profile(depth_frame.profile).get_intrinsics()
# convert image to numpy arrays
if color_frame:
color_image = np.asanyarray(color_frame.get_data())
depth_image = np.asanyarray(depth_frame.get_data())
if first_frame:
try:
init_rect = cv2.selectROI(video_name, color_image, False, False)
except:
exit()
tracker.init(color_image, init_rect)
first_frame = False
else:
# start tracking
outputs = tracker.track(color_image)
polygon = np.array(outputs['polygon']).astype(np.int32)
cv2.polylines(color_image, [polygon.reshape((-1, 1, 2))],
True, (0, 255, 0), 3)
mask = ((outputs['mask'] > cfg.TRACK.MASK_THERSHOLD) * 255)
mask = mask.astype(np.uint8)
mask = np.stack([mask, mask*255, mask]).transpose(1, 2, 0)
color_image = cv2.addWeighted(color_image, 0.77, mask, 0.23, -1)
bbox = list(map(int, outputs['bbox']))
poi_pixel = [int(bbox[0]+0.5*bbox[2]), int(bbox[1]+0.5*bbox[3])]
poi_depth = depth_frame.get_distance(poi_pixel[0], poi_pixel[1])
poi_rs = rs.rs2_deproject_pixel_to_point(depth_intrinsics, poi_pixel, poi_depth)
print("Object 3D position w.r.t. camera frame: {}".format(poi_rs))
if not np.allclose(poi_rs, np.zeros(3)):
# compute transformed position of poi w.r.t. iiwa_link_0
transfrom = iiwa.tf_listener.getLatestCommonTime('/iiwa_link_0', '/rs_d435')
pos_rs = PoseStamped()
pos_rs.header.frame_id = 'rs_d435'
pos_rs.pose.orientation.w = 1.
pos_rs.pose.position.x = poi_rs[0]
pos_rs.pose.position.y = poi_rs[1]
pos_rs.pose.position.z = poi_rs[2]
pos_iiwa = iiwa.tf_listener.transformPose('/iiwa_link_0', pos_rs)
rospy.loginfo("Object 3D position w.r.t. iiwa base from: {}\n ee w.r.t. iiwa base: {}".format(pos_iiwa.pose.position, iiwa.cartesian_pose.position))
# vec_ee_poi = np.array([pos_iiwa.pose.position.x, pos_iiwa.pose.position.y,pos_iiwa.pose.position.z]) - np.array([iiwa.cartesian_pose.position.x,iiwa.cartesian_pose.position.y,iiwa.cartesian_pose.position.z])
# goal_pos = np.array([pos_iiwa.pose.position.x, pos_iiwa.pose.position.y,pos_iiwa.pose.position.z]) - vec_ee_poi/np.linalg.norm(vec_ee_poi)*0.167
# # compute orientation w.r.t. iiwa_link_0
# goal_quat = quat_from_vecs(vec_ee_poi, np.array([0,0,1]))
# set cartesian goal
iiwa.goal_carte_pose.pose.position.x = pos_iiwa.pose.position.x
iiwa.goal_carte_pose.pose.position.y = pos_iiwa.pose.position.y
iiwa.goal_carte_pose.pose.position.z = pos_iiwa.pose.position.z
# iiwa.goal_carte_pose.pose.position.x = goal_pos[0]
# iiwa.goal_carte_pose.pose.position.y = goal_pos[1]
# iiwa.goal_carte_pose.pose.position.z = goal_pos[2]
# iiwa.goal_carte_pose.pose.orientation = goal_quat
# iiwa.goal_carte_pose.pose.orientation.x = goal_quat[0]
# iiwa.goal_carte_pose.pose.orientation.y = goal_quat[1]
# iiwa.goal_carte_pose.pose.orientation.z = goal_quat[2]
# iiwa.goal_carte_pose.pose.orientation.w = goal_quat[3]
iiwa.goal_carte_pose.pose.orientation.x = iiwa.cartesian_pose.orientation.x
iiwa.goal_carte_pose.pose.orientation.y = iiwa.cartesian_pose.orientation.y
iiwa.goal_carte_pose.pose.orientation.z = iiwa.cartesian_pose.orientation.z
iiwa.goal_carte_pose.pose.orientation.w = iiwa.cartesian_pose.orientation.w
iiwa.goal_carte_pose.header.frame_id = 'iiwa_link_0'
FIN_FLAG = True
GOAL_SET_FLAG = True
# display image stream, press 'ESC' or 'q' to terminate
cv2.imshow(video_name, color_image)
key = cv2.waitKey(40)
if key in (27, ord("q")):
break
iiwa.move_cartesian(cartesian_pose=iiwa.goal_carte_pose, commit=True)
time.sleep(1)
iiwa.move_joint(joint_position=JOINT_PERCH)
pipeline.stop()
rospy.loginfo("Finished")
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| [
"time.sleep",
"cv2.imshow",
"numpy.array",
"torch.cuda.is_available",
"numpy.linalg.norm",
"pysot.tracker.tracker_builder.build_tracker",
"numpy.cross",
"iiwa_msgs.msg.JointPosition",
"pysot.core.config.cfg.merge_from_file",
"numpy.dot",
"geometry_msgs.msg.Quaternion",
"cv2.addWeighted",
"nu... | [((767, 782), 'iiwa_msgs.msg.JointPosition', 'JointPosition', ([], {}), '()\n', (780, 782), False, 'from iiwa_msgs.msg import JointPosition\n'), ((1114, 1126), 'geometry_msgs.msg.Quaternion', 'Quaternion', ([], {}), '()\n', (1124, 1126), False, 'from geometry_msgs.msg import PoseStamped, Quaternion\n'), ((1210, 1232), 'numpy.cross', 'np.cross', (['vec_0', 'vec_1'], {}), '(vec_0, vec_1)\n', (1218, 1232), True, 'import numpy as np\n'), ((1327, 1349), 'numpy.array', 'np.array', (['[x, y, z, w]'], {}), '([x, y, z, w])\n', (1335, 1349), True, 'import numpy as np\n'), ((1537, 1548), 'robots.lbr.iiwaRobot', 'iiwaRobot', ([], {}), '()\n', (1546, 1548), False, 'from robots.lbr import iiwaRobot\n'), ((1553, 1566), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (1563, 1566), False, 'import time\n'), ((1730, 1760), 'rospy.loginfo', 'rospy.loginfo', (['"""iiwa is ready"""'], {}), "('iiwa is ready')\n", (1743, 1760), False, 'import rospy\n'), ((1831, 1844), 'pyrealsense2.pipeline', 'rs.pipeline', ([], {}), '()\n', (1842, 1844), True, 'import pyrealsense2 as rs\n'), ((1858, 1869), 'pyrealsense2.config', 'rs.config', ([], {}), '()\n', (1867, 1869), True, 'import pyrealsense2 as rs\n'), ((2122, 2140), 'pyrealsense2.align', 'rs.align', (['align_to'], {}), '(align_to)\n', (2130, 2140), True, 'import pyrealsense2 as rs\n'), ((2172, 2237), 'pysot.core.config.cfg.merge_from_file', 'cfg.merge_from_file', (["(sys.path[0] + '/siammask_r50_l3/config.yaml')"], {}), "(sys.path[0] + '/siammask_r50_l3/config.yaml')\n", (2191, 2237), False, 'from pysot.core.config import cfg\n'), ((2251, 2276), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2274, 2276), False, 'import torch\n'), ((2290, 2333), 'torch.device', 'torch.device', (["('cuda' if cfg.CUDA else 'cpu')"], {}), "('cuda' if cfg.CUDA else 'cpu')\n", (2302, 2333), False, 'import torch\n'), ((2365, 2379), 'pysot.models.model_builder.ModelBuilder', 'ModelBuilder', ([], {}), '()\n', (2377, 2379), False, 'from pysot.models.model_builder import ModelBuilder\n'), ((2596, 2616), 'pysot.tracker.tracker_builder.build_tracker', 'build_tracker', (['model'], {}), '(model)\n', (2609, 2616), False, 'from pysot.tracker.tracker_builder import build_tracker\n'), ((2670, 2722), 'cv2.namedWindow', 'cv2.namedWindow', (['video_name', 'cv2.WND_PROP_FULLSCREEN'], {}), '(video_name, cv2.WND_PROP_FULLSCREEN)\n', (2685, 2722), False, 'import cv2\n'), ((7478, 7491), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7488, 7491), False, 'import time\n'), ((7564, 7589), 'rospy.loginfo', 'rospy.loginfo', (['"""Finished"""'], {}), "('Finished')\n", (7577, 7589), False, 'import rospy\n'), ((1265, 1285), 'numpy.dot', 'np.dot', (['vec_0', 'vec_1'], {}), '(vec_0, vec_1)\n', (1271, 1285), True, 'import numpy as np\n'), ((1362, 1379), 'numpy.linalg.norm', 'np.linalg.norm', (['q'], {}), '(q)\n', (1376, 1379), True, 'import numpy as np\n'), ((7281, 7316), 'cv2.imshow', 'cv2.imshow', (['video_name', 'color_image'], {}), '(video_name, color_image)\n', (7291, 7316), False, 'import cv2\n'), ((7331, 7346), 'cv2.waitKey', 'cv2.waitKey', (['(40)'], {}), '(40)\n', (7342, 7346), False, 'import cv2\n'), ((1147, 1168), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_0'], {}), '(vec_0)\n', (1161, 1168), True, 'import numpy as np\n'), ((1169, 1190), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_1'], {}), '(vec_1)\n', (1183, 1190), True, 'import numpy as np\n'), ((4138, 4188), 'cv2.addWeighted', 'cv2.addWeighted', (['color_image', '(0.77)', 'mask', '(0.23)', '(-1)'], {}), '(color_image, 0.77, mask, 0.23, -1)\n', (4153, 4188), False, 'import cv2\n'), ((4415, 4486), 'pyrealsense2.rs2_deproject_pixel_to_point', 'rs.rs2_deproject_pixel_to_point', (['depth_intrinsics', 'poi_pixel', 'poi_depth'], {}), '(depth_intrinsics, poi_pixel, poi_depth)\n', (4446, 4486), True, 'import pyrealsense2 as rs\n'), ((3150, 3194), 'pyrealsense2.video_stream_profile', 'rs.video_stream_profile', (['depth_frame.profile'], {}), '(depth_frame.profile)\n', (3173, 3194), True, 'import pyrealsense2 as rs\n'), ((3473, 3525), 'cv2.selectROI', 'cv2.selectROI', (['video_name', 'color_image', '(False)', '(False)'], {}), '(video_name, color_image, False, False)\n', (3486, 3525), False, 'import cv2\n'), ((4810, 4823), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (4821, 4823), False, 'from geometry_msgs.msg import PoseStamped, Quaternion\n'), ((3764, 3792), 'numpy.array', 'np.array', (["outputs['polygon']"], {}), "(outputs['polygon'])\n", (3772, 3792), True, 'import numpy as np\n'), ((4060, 4094), 'numpy.stack', 'np.stack', (['[mask, mask * 255, mask]'], {}), '([mask, mask * 255, mask])\n', (4068, 4094), True, 'import numpy as np\n'), ((4605, 4616), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4613, 4616), True, 'import numpy as np\n')] |
# coding:utf-8
import os
import gym
import random
import numpy as np
import tensorflow as tf
from collections import deque
from skimage.color import rgb2gray
from skimage.transform import resize
from keras.models import Model
from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate
from keras import backend as K
import time
from gym import wrappers
import threading
import cv2
ENV_NAME = 'Breakout-v0' # Environment name
TRAIN = True
LOAD_NETWORK = False
SAVE_NETWORK_PATH = 'saved_networks/' + ENV_NAME
NUM_ACTORS = 1
NUM_EPISODES = 12000 # Number of episodes the agent plays
INITIAL_REPLAY_SIZE = 50000 # The Learner awaits for this size of transitions to be accumulated.
NUM_REPLAY_MEMORY = 200000 # Remote memory size
MEMORY_REMOVE_INTERVAL = 100
PARAMETER_COPY_INTERVAL = 400
EPSILON_EXPOENT_ALPHA = 7
EPSILON = 0.4
SEND_BATCH_SIZE = 50
PRINT_INTERVAL = 300
N_STEP_RETURN = 3
GAMMA = 0.99 # Discount factor
GAMMA_N = GAMMA ** N_STEP_RETURN
PRIORITY_ALPHA = 0.6
# About epsilon-greedy
ANEALING_EPSILON = True
EXPLORATION_STEPS = 1000000 # Number of steps over which the initial value of epsilon is linearly annealed to its final value
INITIAL_EPSILON = 1.0 # Initial value of epsilon in epsilon-greedy
FINAL_EPSILON = 0.1 # Final value of epsilon in epsilon-greedy
FRAME_WIDTH = 84 # Resized frame width
FRAME_HEIGHT = 84 # Resized frame height
STATE_LENGTH = 4 # Number of most recent frames to produce the input to the network
BATCH_SIZE = 32 # Mini batch size, 512 is the best.
TARGET_UPDATE_INTERVAL = 2500 # The frequency with which the target network is updated
ACTION_INTERVAL = 4 # The agent sees only every () input
LEARNING_RATE = 0.00025 / 4 # Learning rate used by RMSProp
SAVE_INTERVAL = 50000 # The frequency with which the network is saved
NO_OP_STEPS = 30 # Maximum number of "do nothing" actions to be performed by the agent at the start of an episode
NUM_EPISODES_AT_TEST = 10 # Number of episodes the agent plays at test time
class Memory:
def __init__(self):
self.transition = deque()
self.priorities = deque()
self.total_p = 0
def _error_to_priority(self, error_batch):
priority_batch = []
for error in error_batch:
priority_batch.append(error**PRIORITY_ALPHA)
return priority_batch
def length(self):
return len(self.transition)
def add(self, transiton_batch, error_batch):
priority_batch = self._error_to_priority(error_batch)
self.total_p += sum(priority_batch)
self.transition.extend(transiton_batch)
self.priorities.extend(priority_batch)
def sample(self, n):
batch = []
idx_batch = []
segment = self.total_p / n
idx = -1
sum_p = 0
for i in range(n):
a = segment * i
b = segment * (i + 1)
s = random.uniform(a, b)
while sum_p < s:
sum_p += self.priorities[idx]
idx += 1
idx_batch.append(idx)
batch.append(self.transition[idx])
return batch, idx_batch
def update(self, idx_batch, error_batch):
priority_batch = self._error_to_priority(error_batch)
for i in range(len(idx_batch)):
change = priority_batch[i] - self.priorities[idx_batch[i]]
self.total_p += change
self.priorities[idx_batch[i]] = priority_batch[i]
def remove(self):
print("Excess Memory: ", (len(self.priorities) - NUM_REPLAY_MEMORY))
for _ in range(len(self.priorities) - NUM_REPLAY_MEMORY):
self.transition.popleft()
p = self.priorities.popleft()
self.total_p -= p
class Learner:
def __init__(self, sess):
self.sess = sess
self.f_end = False
self.env = gym.make(ENV_NAME)
self.num_actions = self.env.action_space.n
self.t = 0
self.total_time = 0
# Parameters used for summary
self.total_reward = 0
self.total_q_max = 0
self.total_loss = 0
self.duration = 0
self.episode = 0
self.start = 0
with tf.variable_scope("learner_parameters", reuse=True):
self.s, self.q_values, q_network = self.build_network()
q_network_weights = self.bubble_sort_parameters(q_network.trainable_weights)
# Create target network
with tf.variable_scope("learner_target_parameters", reuse=True):
self.st, self.target_q_values, target_network = self.build_network()
target_network_weights = self.bubble_sort_parameters(target_network.trainable_weights)
# Define target network update operation
self.update_target_network = [target_network_weights[i].assign(q_network_weights[i]) for i in range(len(target_network_weights))]
# Define loss and gradient update operation
self.a, self.y, self.error, self.loss, self.grad_update, self.gv, self.cl = self.build_training_op(q_network_weights)
if not os.path.exists(SAVE_NETWORK_PATH):
os.makedirs(SAVE_NETWORK_PATH)
with tf.device("/cpu:0"):
self.saver = tf.train.Saver(q_network_weights)
self.sess.run(tf.global_variables_initializer())
# Initialize target network
self.sess.run(self.update_target_network)
def bubble_sort_parameters(self, arr):
change = True
while change:
change = False
for i in range(len(arr) - 1):
if arr[i].name > arr[i + 1].name:
arr[i], arr[i + 1] = arr[i + 1], arr[i]
change = True
return arr
def build_network(self):
l_input = Input(shape=(4,84,84))
conv2d = Conv2D(32,8,strides=(4,4),activation='relu', data_format="channels_first")(l_input)
conv2d = Conv2D(64,4,strides=(2,2),activation='relu', data_format="channels_first")(conv2d)
conv2d = Conv2D(64,3,strides=(1,1),activation='relu', data_format="channels_first")(conv2d)
fltn = Flatten()(conv2d)
v = Dense(512, activation='relu', name="dense_v1")(fltn)
v = Dense(1, name="dense_v2")(v)
adv = Dense(512, activation='relu', name="dense_adv1")(fltn)
adv = Dense(self.num_actions, name="dense_adv2")(adv)
y = concatenate([v,adv])
l_output = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - tf.stop_gradient(K.mean(a[:,1:],keepdims=True)), output_shape=(self.num_actions,))(y)
model = Model(input=l_input,output=l_output)
s = tf.placeholder(tf.float32, [None, STATE_LENGTH, FRAME_WIDTH, FRAME_HEIGHT])
q_values = model(s)
return s, q_values, model
def build_training_op(self, q_network_weights):
a = tf.placeholder(tf.int64, [None])
y = tf.placeholder(tf.float32, [None])
# Convert action to one hot vector. shape=(BATCH_SIZE, num_actions)
a_one_hot = tf.one_hot(a, self.num_actions, 1.0, 0.0)
# shape = (BATCH_SIZE,)
q_value = tf.reduce_sum(tf.multiply(self.q_values, a_one_hot), reduction_indices=1)
# Clip the error, the loss is quadratic when the error is in (-1, 1), and linear outside of that region
error = tf.abs(y - q_value)
# error_is = (w / tf.reduce_max(w)) * error
quadratic_part = tf.clip_by_value(error, 0.0, 1.0)
linear_part = error - quadratic_part
loss = tf.reduce_mean(0.5 * tf.square(quadratic_part) + linear_part)
optimizer = tf.train.RMSPropOptimizer(LEARNING_RATE, decay=0.95, epsilon=1.5e-7, centered=True)
grads_and_vars = optimizer.compute_gradients(loss, var_list=q_network_weights)
capped_gvs = [(grad if grad is None else tf.clip_by_norm(grad, clip_norm=40), var) for grad, var in grads_and_vars]
grad_update = optimizer.apply_gradients(capped_gvs)
return a, y, error, loss, grad_update ,grads_and_vars, capped_gvs
def load_network(self):
checkpoint = tf.train.get_checkpoint_state(SAVE_NETWORK_PATH)
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
print('Successfully loaded: ' + checkpoint.model_checkpoint_path)
else:
print('Training new network...')
def run(self):
global total_episode
# This should be done after Actors were generated.
if LOAD_NETWORK:
self.load_network()
if remote_memory.length() < INITIAL_REPLAY_SIZE:
print("Learner Waiting...")
time.sleep(10)
self.run()
if not self.f_end:
print("Learner Starts!")
while not self.f_end:
start = time.time()
state_batch = []
action_batch = []
reward_batch = []
next_state_batch = []
terminal_batch = []
minibatch, idx_batch = remote_memory.sample(BATCH_SIZE)
for data in minibatch:
state_batch.append(data[0])
action_batch.append(data[1])
reward_batch.append(data[2])
#shape = (BATCH_SIZE, 4, 32, 32)
next_state_batch.append(data[3])
terminal_batch.append(data[4])
self.total_q_max += np.max(self.q_values.eval(feed_dict={self.s: [np.float32(data[0] / 255.0)]},session=self.sess))
# Convert True to 1, False to 0
terminal_batch = np.array(terminal_batch) + 0
# shape = (BATCH_SIZE, num_actions)
target_q_values_batch = self.target_q_values.eval(feed_dict={self.st: np.float32(np.array(next_state_batch) / 255.0)}, session=self.sess)
# DDQN
actions = np.argmax(self.q_values.eval(feed_dict={self.s: np.float32(np.array(next_state_batch) / 255.0)}, session=self.sess), axis=1)
target_q_values_batch = np.array([target_q_values_batch[i][action] for i, action in enumerate(actions)])
# shape = (BATCH_SIZE,)
y_batch = reward_batch + (1 - terminal_batch) * GAMMA_N * target_q_values_batch
error_batch = self.error.eval(feed_dict={
self.s: np.float32(np.array(state_batch) / 255.0),
self.a: action_batch,
self.y: y_batch
}, session=self.sess)
loss, _ = self.sess.run([self.loss, self.grad_update], feed_dict={
self.s: np.float32(np.array(state_batch) / 255.0),
self.a: action_batch,
self.y: y_batch
})
self.total_loss += loss
self.total_time += time.time() - start
# Memory update
remote_memory.update(idx_batch, error_batch)
self.t += 1
if self.t % PRINT_INTERVAL == 0:
text_l = 'AVERAGE LOSS: {0:.5F} / AVG_MAX_Q: {1:2.4F} / LEARN PER SECOND: {2:.1F} / NUM LEARN: {3:5d}'.format(
self.total_loss/PRINT_INTERVAL, self.total_q_max/(PRINT_INTERVAL*BATCH_SIZE), PRINT_INTERVAL/self.total_time, self.t)
print(text_l)
with open(ENV_NAME+'_output.txt','a') as f:
f.write(text_l+"\n")
self.total_loss = 0
self.total_time = 0
self.total_q_max = 0
# Remove excess memory
if self.t % MEMORY_REMOVE_INTERVAL == 0 and remote_memory.length() > NUM_REPLAY_MEMORY:
remote_memory.remove()
# Update target network
if self.t % TARGET_UPDATE_INTERVAL == 0:
self.sess.run(self.update_target_network)
# Save network
if self.t % SAVE_INTERVAL == 0:
save_path = self.saver.save(self.sess, SAVE_NETWORK_PATH + '/' + ENV_NAME, global_step=(self.t))
print('Successfully saved: ' + save_path)
if total_episode >= NUM_EPISODES:
self.f_end = True
print("The Learning is Over.")
time.sleep(0.5)
class Actor:
def __init__(self, number, sess):
self.sess = sess
self.f_end = False
self.env = gym.make(ENV_NAME)
self.num = number
self.num_actions = self.env.action_space.n
self.t = 0
self.repeated_action = 0
self.total_reward = 0
self.total_q_max = 0
self.total_loss = 0
self.duration = 0
self.episode = 0
if NUM_ACTORS != 1:
self.epsilon = EPSILON **(1+(self.num/(NUM_ACTORS-1))*EPSILON_EXPOENT_ALPHA)
else:
self.epsilon = EPSILON
if ANEALING_EPSILON:
self.epsilon = INITIAL_EPSILON
self.epsilon_step = (INITIAL_EPSILON -FINAL_EPSILON)/ EXPLORATION_STEPS
self.local_memory = deque(maxlen=100)
self.buffer = []
self.R = 0
self.s, self.q_values, q_network = self.build_network()
q_network_weights = self.bubble_sort_parameters(q_network.trainable_weights)
self.st, self.target_q_values, target_network = self.build_network()
target_network_weights = self.bubble_sort_parameters(target_network.trainable_weights)
q_parameters = self.bubble_sort_parameters(tf.trainable_variables(scope="learner_parameters"))
target_parameters = self.bubble_sort_parameters(tf.trainable_variables(scope="learner_target_parameters"))
self.obtain_q_parameters = [q_network_weights[i].assign(q_parameters[i]) for i in range(len(q_parameters))]
self.obtain_target_parameters = [target_network_weights[i].assign(target_parameters[i]) for i in range(len(target_parameters))]
self.a, self.y, self.q, self.error = self.td_error_op()
self.sess.run(tf.global_variables_initializer())
def bubble_sort_parameters(self, arr):
change = True
while change:
change = False
for i in range(len(arr) - 1):
if arr[i].name > arr[i + 1].name:
arr[i], arr[i + 1] = arr[i + 1], arr[i]
change = True
return arr
def td_error_op(self):
a = tf.placeholder(tf.int64, [None])
y = tf.placeholder(tf.float32, [None])
q = tf.placeholder(tf.float32, [None,None])
# Convert action to one hot vector. shape=(BATCH_SIZE, num_actions)
a_one_hot = tf.one_hot(a, self.num_actions, 1.0, 0.0)
# shape = (BATCH_SIZE,)
q_value = tf.reduce_sum(tf.multiply(q, a_one_hot), reduction_indices=1)
# Clip the error, the loss is quadratic when the error is in (-1, 1), and linear outside of that region
error = tf.abs(y - q_value)
return a, y, q, error
def build_network(self):
l_input = Input(shape=(4,84,84))
conv2d = Conv2D(32,8,strides=(4,4),activation='relu', data_format="channels_first")(l_input)
conv2d = Conv2D(64,4,strides=(2,2),activation='relu', data_format="channels_first")(conv2d)
conv2d = Conv2D(64,3,strides=(1,1),activation='relu', data_format="channels_first")(conv2d)
fltn = Flatten()(conv2d)
v = Dense(512, activation='relu', name="dense_v1_"+str(self.num))(fltn)
v = Dense(1, name="dense_v2_"+str(self.num))(v)
adv = Dense(512, activation='relu', name="dense_adv1_"+str(self.num))(fltn)
adv = Dense(self.num_actions, name="dense_adv2_"+str(self.num))(adv)
y = concatenate([v,adv])
l_output = Lambda(lambda a: K.expand_dims(a[:, 0], -1) + a[:, 1:] - tf.stop_gradient(K.mean(a[:,1:],keepdims=True)), output_shape=(self.num_actions,))(y)
model = Model(input=l_input,output=l_output)
s = tf.placeholder(tf.float32, [None, STATE_LENGTH, FRAME_WIDTH, FRAME_HEIGHT])
q_values = model(s)
return s, q_values, model
def get_initial_state(self, observation, last_observation):
processed_observation = np.maximum(observation, last_observation)
processed_observation = np.uint8(resize(rgb2gray(processed_observation), (FRAME_WIDTH, FRAME_HEIGHT)) * 255)
state = [processed_observation for _ in range(STATE_LENGTH)]
return np.stack(state, axis=0)
def preprocess(self, observation, last_observation):
processed_observation = np.maximum(observation, last_observation)
processed_observation = np.uint8(resize(rgb2gray(processed_observation), (FRAME_WIDTH, FRAME_HEIGHT)) * 255)
return np.reshape(processed_observation, (1, FRAME_WIDTH, FRAME_HEIGHT))
def get_action_and_q(self, state):
action = self.repeated_action
q = self.q_values.eval(feed_dict={self.s: [np.float32(state / 255.0)]}, session=self.sess)
if self.t % ACTION_INTERVAL == 0:
if self.epsilon >= random.random() or self.t < INITIAL_REPLAY_SIZE:
action = random.randrange(self.num_actions)
else:
action = np.argmax(q[0])
self.repeated_action = action
return action, q[0]
def get_action_at_test(self, state):
action = self.repeated_action
if self.t % ACTION_INTERVAL == 0:
if random.random() <= 0.05:
action = random.randrange(self.num_actions)
else:
action = np.argmax(self.q_values.eval(feed_dict={self.s: [np.float32(state / 255.0)]}))
self.repeated_action = action
self.t += 1
return action
def get_sample(self, n):
s, a, _, _, _, q = self.buffer[0]
_, _, _, s_, done, q_ = self.buffer[n-1]
return s, a, self.R, s_, done, q, q_
def run(self):
global total_episode
if TRAIN: # Train mode
while not self.f_end:
terminal = False
observation = self.env.reset()
for _ in range(random.randint(1, NO_OP_STEPS)):
last_observation = observation
observation, _, _, _ = self.env.step(0) # Do nothing
state = self.get_initial_state(observation, last_observation)
start = time.time()
while not terminal:
last_observation = observation
action, q = self.get_action_and_q(state)
observation, reward, terminal, _ = self.env.step(action)
# cv2.imshow('observation', observation)
# cv2.waitKey(1)
reward = np.sign(reward)
self.env.render()
processed_observation = self.preprocess(observation, last_observation)
next_state = np.append(state[1:, :, :], processed_observation, axis=0)
self.buffer.append((state, action, reward, next_state, terminal, q))
self.R = (self.R + reward * GAMMA_N) / GAMMA
# n-step transition
if terminal: # terminal state
while len(self.buffer) > 0:
n = len(self.buffer)
s, a, r, s_, done, q, q_ = self.get_sample(n)
self.local_memory.append((s, a, r, s_, done, q, q_))
self.R = (self.R - self.buffer[0][2]) / GAMMA
self.buffer.pop(0)
self.R = 0
if len(self.buffer) >= N_STEP_RETURN:
s, a, r, s_, done, q, q_ = self.get_sample(N_STEP_RETURN)
self.local_memory.append((s, a, r, s_, done, q, q_))
self.R = self.R - self.buffer[0][2]
self.buffer.pop(0)
# Add experience and priority to remote memory
if len(self.local_memory) > 50:
state_batch = []
action_batch = []
reward_batch = []
next_state_batch = []
terminal_batch = []
q_batch = []
qn_batch = []
for _ in range(SEND_BATCH_SIZE):
data = self.local_memory.popleft()
state_batch.append(data[0])
action_batch.append(data[1])
reward_batch.append(data[2])
#shape = (BATCH_SIZE, 4, 32, 32)
next_state_batch.append(data[3])
terminal_batch.append(data[4])
q_batch.append(data[5])
qn_batch.append(data[6])
terminal_batch = np.array(terminal_batch) + 0
# shape = (BATCH_SIZE, num_actions)
target_q_values_batch = self.target_q_values.eval(feed_dict={self.st: np.float32(np.array(next_state_batch) / 255.0)}, session=self.sess)
# DDQN
actions = np.argmax(qn_batch, axis=1)
target_q_values_batch = np.array([target_q_values_batch[i][action] for i, action in enumerate(actions)])
# shape = (BATCH_SIZE,)
y_batch = reward_batch + (1 - terminal_batch) * GAMMA_N * target_q_values_batch
error_batch = self.error.eval(feed_dict={
self.s: np.float32(np.array(state_batch) / 255.0),
self.a: action_batch,
self.q: q_batch,
self.y: y_batch
}, session=self.sess)
send = [(state_batch[i],action_batch[i],reward_batch[i],next_state_batch[i],terminal_batch[i]) for i in range(SEND_BATCH_SIZE)]
remote_memory.add(send, error_batch)
state = next_state
self.t += 1
if self.t % PARAMETER_COPY_INTERVAL == 0:
self.sess.run(self.obtain_q_parameters)
self.sess.run(self.obtain_target_parameters)
if ANEALING_EPSILON and EXPLORATION_STEPS + INITIAL_REPLAY_SIZE > self.t >= INITIAL_REPLAY_SIZE:
self.epsilon -= self.epsilon_step
self.total_reward += reward
self.total_q_max += np.max(self.q_values.eval(feed_dict={self.s: [np.float32(state / 255.0)]}, session=self.sess))
self.duration += 1
elapsed = time.time() - start
text = 'EPISODE: {0:6d} / ACTOR: {1:3d} / TIMESTEP: {2:8d} / DURATION: {3:5d} / EPSILON: {4:.5f} / TOTAL_REWARD: {5:3.0f} / AVG_MAX_Q: {6:2.4f} / STEP_PER_SECOND: {7:.1f}'.format(
self.episode + 1, self.num, self.t, self.duration, self.epsilon,
self.total_reward, self.total_q_max / float(self.duration),
self.duration/elapsed)
print(text)
with open(ENV_NAME+'_output.txt','a') as f:
f.write(text+"\n")
self.total_reward = 0
self.total_q_max = 0
self.total_loss = 0
self.duration = 0
self.episode += 1
total_episode += 1
if total_episode >= NUM_EPISODES:
self.f_end = True
print("Actor",self.num,"is Over.")
time.sleep(0.5)
total_episode = 0
remote_memory = Memory()
# Train Mode
if TRAIN:
sess = tf.InteractiveSession()
#with tf.device("/gpu:0"):
threads = [Learner(sess)]
#with tf.device("/cpu:0"):
for i in range(NUM_ACTORS):
threads.append(Actor(number=i, sess=sess))
jobs = []
for worker in threads:
job = lambda: worker.run()
t = threading.Thread(target=job)
jobs.append(t)
t.start()
for t in jobs:
t.join()
# Test Mode
else:
env = gym.make(ENV_NAME)
env = wrappers.Monitor(env, SAVE_NETWORK_PATH, force=True)
sess = tf.InteractiveSession()
leaner = Learner(sess)
agent = Actor(number=0,sess=sess)
leaner.load_network()
agent.sess.run(agent.obtain_q_parameters)
for _ in range(NUM_EPISODES_AT_TEST):
terminal = False
observation = env.reset()
for _ in range(random.randint(1, NO_OP_STEPS)):
last_observation = observation
observation, _, _, _ = env.step(0) # Do nothing
state = agent.get_initial_state(observation, last_observation)
while not terminal:
last_observation = observation
action = agent.get_action_at_test(state)
observation, _, terminal, _ = env.step(action)
env.render()
processed_observation = agent.preprocess(observation, last_observation)
state =np.append(state[1:, :, :], processed_observation, axis=0)
| [
"keras.layers.Conv2D",
"tensorflow.multiply",
"time.sleep",
"numpy.array",
"gym.wrappers.Monitor",
"keras.layers.Dense",
"gym.make",
"os.path.exists",
"collections.deque",
"numpy.reshape",
"tensorflow.placeholder",
"numpy.stack",
"keras.layers.concatenate",
"keras.models.Model",
"tensorf... | [((19812, 19835), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (19833, 19835), True, 'import tensorflow as tf\n'), ((20177, 20195), 'gym.make', 'gym.make', (['ENV_NAME'], {}), '(ENV_NAME)\n', (20185, 20195), False, 'import gym\n'), ((20203, 20255), 'gym.wrappers.Monitor', 'wrappers.Monitor', (['env', 'SAVE_NETWORK_PATH'], {'force': '(True)'}), '(env, SAVE_NETWORK_PATH, force=True)\n', (20219, 20255), False, 'from gym import wrappers\n'), ((20264, 20287), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (20285, 20287), True, 'import tensorflow as tf\n'), ((2049, 2056), 'collections.deque', 'deque', ([], {}), '()\n', (2054, 2056), False, 'from collections import deque\n'), ((2077, 2084), 'collections.deque', 'deque', ([], {}), '()\n', (2082, 2084), False, 'from collections import deque\n'), ((3510, 3528), 'gym.make', 'gym.make', (['ENV_NAME'], {}), '(ENV_NAME)\n', (3518, 3528), False, 'import gym\n'), ((5136, 5160), 'keras.layers.Input', 'Input', ([], {'shape': '(4, 84, 84)'}), '(shape=(4, 84, 84))\n', (5141, 5160), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((5688, 5709), 'keras.layers.concatenate', 'concatenate', (['[v, adv]'], {}), '([v, adv])\n', (5699, 5709), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((5875, 5912), 'keras.models.Model', 'Model', ([], {'input': 'l_input', 'output': 'l_output'}), '(input=l_input, output=l_output)\n', (5880, 5912), False, 'from keras.models import Model\n'), ((5919, 5994), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, STATE_LENGTH, FRAME_WIDTH, FRAME_HEIGHT]'], {}), '(tf.float32, [None, STATE_LENGTH, FRAME_WIDTH, FRAME_HEIGHT])\n', (5933, 5994), True, 'import tensorflow as tf\n'), ((6102, 6134), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[None]'], {}), '(tf.int64, [None])\n', (6116, 6134), True, 'import tensorflow as tf\n'), ((6141, 6175), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {}), '(tf.float32, [None])\n', (6155, 6175), True, 'import tensorflow as tf\n'), ((6261, 6302), 'tensorflow.one_hot', 'tf.one_hot', (['a', 'self.num_actions', '(1.0)', '(0.0)'], {}), '(a, self.num_actions, 1.0, 0.0)\n', (6271, 6302), True, 'import tensorflow as tf\n'), ((6532, 6551), 'tensorflow.abs', 'tf.abs', (['(y - q_value)'], {}), '(y - q_value)\n', (6538, 6551), True, 'import tensorflow as tf\n'), ((6617, 6650), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['error', '(0.0)', '(1.0)'], {}), '(error, 0.0, 1.0)\n', (6633, 6650), True, 'import tensorflow as tf\n'), ((6776, 6864), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['LEARNING_RATE'], {'decay': '(0.95)', 'epsilon': '(1.5e-07)', 'centered': '(True)'}), '(LEARNING_RATE, decay=0.95, epsilon=1.5e-07,\n centered=True)\n', (6801, 6864), True, 'import tensorflow as tf\n'), ((7223, 7271), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['SAVE_NETWORK_PATH'], {}), '(SAVE_NETWORK_PATH)\n', (7252, 7271), True, 'import tensorflow as tf\n'), ((10521, 10536), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (10531, 10536), False, 'import time\n'), ((10641, 10659), 'gym.make', 'gym.make', (['ENV_NAME'], {}), '(ENV_NAME)\n', (10649, 10659), False, 'import gym\n'), ((11170, 11187), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (11175, 11187), False, 'from collections import deque\n'), ((12350, 12382), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int64', '[None]'], {}), '(tf.int64, [None])\n', (12364, 12382), True, 'import tensorflow as tf\n'), ((12389, 12423), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {}), '(tf.float32, [None])\n', (12403, 12423), True, 'import tensorflow as tf\n'), ((12430, 12470), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None]'], {}), '(tf.float32, [None, None])\n', (12444, 12470), True, 'import tensorflow as tf\n'), ((12555, 12596), 'tensorflow.one_hot', 'tf.one_hot', (['a', 'self.num_actions', '(1.0)', '(0.0)'], {}), '(a, self.num_actions, 1.0, 0.0)\n', (12565, 12596), True, 'import tensorflow as tf\n'), ((12814, 12833), 'tensorflow.abs', 'tf.abs', (['(y - q_value)'], {}), '(y - q_value)\n', (12820, 12833), True, 'import tensorflow as tf\n'), ((12899, 12923), 'keras.layers.Input', 'Input', ([], {'shape': '(4, 84, 84)'}), '(shape=(4, 84, 84))\n', (12904, 12923), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((13511, 13532), 'keras.layers.concatenate', 'concatenate', (['[v, adv]'], {}), '([v, adv])\n', (13522, 13532), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((13698, 13735), 'keras.models.Model', 'Model', ([], {'input': 'l_input', 'output': 'l_output'}), '(input=l_input, output=l_output)\n', (13703, 13735), False, 'from keras.models import Model\n'), ((13742, 13817), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, STATE_LENGTH, FRAME_WIDTH, FRAME_HEIGHT]'], {}), '(tf.float32, [None, STATE_LENGTH, FRAME_WIDTH, FRAME_HEIGHT])\n', (13756, 13817), True, 'import tensorflow as tf\n'), ((13957, 13998), 'numpy.maximum', 'np.maximum', (['observation', 'last_observation'], {}), '(observation, last_observation)\n', (13967, 13998), True, 'import numpy as np\n'), ((14182, 14205), 'numpy.stack', 'np.stack', (['state'], {'axis': '(0)'}), '(state, axis=0)\n', (14190, 14205), True, 'import numpy as np\n'), ((14288, 14329), 'numpy.maximum', 'np.maximum', (['observation', 'last_observation'], {}), '(observation, last_observation)\n', (14298, 14329), True, 'import numpy as np\n'), ((14450, 14515), 'numpy.reshape', 'np.reshape', (['processed_observation', '(1, FRAME_WIDTH, FRAME_HEIGHT)'], {}), '(processed_observation, (1, FRAME_WIDTH, FRAME_HEIGHT))\n', (14460, 14515), True, 'import numpy as np\n'), ((20064, 20092), 'threading.Thread', 'threading.Thread', ([], {'target': 'job'}), '(target=job)\n', (20080, 20092), False, 'import threading\n'), ((2724, 2744), 'random.uniform', 'random.uniform', (['a', 'b'], {}), '(a, b)\n', (2738, 2744), False, 'import random\n'), ((3778, 3829), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""learner_parameters"""'], {'reuse': '(True)'}), "('learner_parameters', reuse=True)\n", (3795, 3829), True, 'import tensorflow as tf\n'), ((4003, 4061), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""learner_target_parameters"""'], {'reuse': '(True)'}), "('learner_target_parameters', reuse=True)\n", (4020, 4061), True, 'import tensorflow as tf\n'), ((4581, 4614), 'os.path.exists', 'os.path.exists', (['SAVE_NETWORK_PATH'], {}), '(SAVE_NETWORK_PATH)\n', (4595, 4614), False, 'import os\n'), ((4619, 4649), 'os.makedirs', 'os.makedirs', (['SAVE_NETWORK_PATH'], {}), '(SAVE_NETWORK_PATH)\n', (4630, 4649), False, 'import os\n'), ((4658, 4677), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (4667, 4677), True, 'import tensorflow as tf\n'), ((4695, 4728), 'tensorflow.train.Saver', 'tf.train.Saver', (['q_network_weights'], {}), '(q_network_weights)\n', (4709, 4728), True, 'import tensorflow as tf\n'), ((4746, 4779), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4777, 4779), True, 'import tensorflow as tf\n'), ((5170, 5248), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(8)'], {'strides': '(4, 4)', 'activation': '"""relu"""', 'data_format': '"""channels_first"""'}), "(32, 8, strides=(4, 4), activation='relu', data_format='channels_first')\n", (5176, 5248), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((5265, 5343), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(4)'], {'strides': '(2, 2)', 'activation': '"""relu"""', 'data_format': '"""channels_first"""'}), "(64, 4, strides=(2, 2), activation='relu', data_format='channels_first')\n", (5271, 5343), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((5359, 5437), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'strides': '(1, 1)', 'activation': '"""relu"""', 'data_format': '"""channels_first"""'}), "(64, 3, strides=(1, 1), activation='relu', data_format='channels_first')\n", (5365, 5437), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((5451, 5460), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5458, 5460), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((5475, 5521), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""', 'name': '"""dense_v1"""'}), "(512, activation='relu', name='dense_v1')\n", (5480, 5521), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((5534, 5559), 'keras.layers.Dense', 'Dense', (['(1)'], {'name': '"""dense_v2"""'}), "(1, name='dense_v2')\n", (5539, 5559), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((5571, 5619), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""', 'name': '"""dense_adv1"""'}), "(512, activation='relu', name='dense_adv1')\n", (5576, 5619), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((5634, 5676), 'keras.layers.Dense', 'Dense', (['self.num_actions'], {'name': '"""dense_adv2"""'}), "(self.num_actions, name='dense_adv2')\n", (5639, 5676), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((6355, 6392), 'tensorflow.multiply', 'tf.multiply', (['self.q_values', 'a_one_hot'], {}), '(self.q_values, a_one_hot)\n', (6366, 6392), True, 'import tensorflow as tf\n'), ((7730, 7744), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (7740, 7744), False, 'import time\n'), ((7846, 7857), 'time.time', 'time.time', ([], {}), '()\n', (7855, 7857), False, 'import time\n'), ((11565, 11615), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {'scope': '"""learner_parameters"""'}), "(scope='learner_parameters')\n", (11587, 11615), True, 'import tensorflow as tf\n'), ((11667, 11724), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {'scope': '"""learner_target_parameters"""'}), "(scope='learner_target_parameters')\n", (11689, 11724), True, 'import tensorflow as tf\n'), ((12043, 12076), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (12074, 12076), True, 'import tensorflow as tf\n'), ((12649, 12674), 'tensorflow.multiply', 'tf.multiply', (['q', 'a_one_hot'], {}), '(q, a_one_hot)\n', (12660, 12674), True, 'import tensorflow as tf\n'), ((12933, 13011), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(8)'], {'strides': '(4, 4)', 'activation': '"""relu"""', 'data_format': '"""channels_first"""'}), "(32, 8, strides=(4, 4), activation='relu', data_format='channels_first')\n", (12939, 13011), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((13028, 13106), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(4)'], {'strides': '(2, 2)', 'activation': '"""relu"""', 'data_format': '"""channels_first"""'}), "(64, 4, strides=(2, 2), activation='relu', data_format='channels_first')\n", (13034, 13106), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((13122, 13200), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'strides': '(1, 1)', 'activation': '"""relu"""', 'data_format': '"""channels_first"""'}), "(64, 3, strides=(1, 1), activation='relu', data_format='channels_first')\n", (13128, 13200), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((13214, 13223), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (13221, 13223), False, 'from keras.layers import Conv2D, Flatten, Dense, Input, Lambda, concatenate\n'), ((19719, 19734), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (19729, 19734), False, 'import time\n'), ((20516, 20546), 'random.randint', 'random.randint', (['(1)', 'NO_OP_STEPS'], {}), '(1, NO_OP_STEPS)\n', (20530, 20546), False, 'import random\n'), ((20951, 21008), 'numpy.append', 'np.append', (['state[1:, :, :]', 'processed_observation'], {'axis': '(0)'}), '(state[1:, :, :], processed_observation, axis=0)\n', (20960, 21008), True, 'import numpy as np\n'), ((8440, 8464), 'numpy.array', 'np.array', (['terminal_batch'], {}), '(terminal_batch)\n', (8448, 8464), True, 'import numpy as np\n'), ((9417, 9428), 'time.time', 'time.time', ([], {}), '()\n', (9426, 9428), False, 'import time\n'), ((14800, 14834), 'random.randrange', 'random.randrange', (['self.num_actions'], {}), '(self.num_actions)\n', (14816, 14834), False, 'import random\n'), ((14857, 14872), 'numpy.argmax', 'np.argmax', (['q[0]'], {}), '(q[0])\n', (14866, 14872), True, 'import numpy as np\n'), ((15042, 15057), 'random.random', 'random.random', ([], {}), '()\n', (15055, 15057), False, 'import random\n'), ((15080, 15114), 'random.randrange', 'random.randrange', (['self.num_actions'], {}), '(self.num_actions)\n', (15096, 15114), False, 'import random\n'), ((15801, 15812), 'time.time', 'time.time', ([], {}), '()\n', (15810, 15812), False, 'import time\n'), ((6720, 6745), 'tensorflow.square', 'tf.square', (['quadratic_part'], {}), '(quadratic_part)\n', (6729, 6745), True, 'import tensorflow as tf\n'), ((6984, 7019), 'tensorflow.clip_by_norm', 'tf.clip_by_norm', (['grad'], {'clip_norm': '(40)'}), '(grad, clip_norm=40)\n', (6999, 7019), True, 'import tensorflow as tf\n'), ((14041, 14072), 'skimage.color.rgb2gray', 'rgb2gray', (['processed_observation'], {}), '(processed_observation)\n', (14049, 14072), False, 'from skimage.color import rgb2gray\n'), ((14372, 14403), 'skimage.color.rgb2gray', 'rgb2gray', (['processed_observation'], {}), '(processed_observation)\n', (14380, 14403), False, 'from skimage.color import rgb2gray\n'), ((14738, 14753), 'random.random', 'random.random', ([], {}), '()\n', (14751, 14753), False, 'import random\n'), ((15595, 15625), 'random.randint', 'random.randint', (['(1)', 'NO_OP_STEPS'], {}), '(1, NO_OP_STEPS)\n', (15609, 15625), False, 'import random\n'), ((16063, 16078), 'numpy.sign', 'np.sign', (['reward'], {}), '(reward)\n', (16070, 16078), True, 'import numpy as np\n'), ((16196, 16253), 'numpy.append', 'np.append', (['state[1:, :, :]', 'processed_observation'], {'axis': '(0)'}), '(state[1:, :, :], processed_observation, axis=0)\n', (16205, 16253), True, 'import numpy as np\n'), ((19013, 19024), 'time.time', 'time.time', ([], {}), '()\n', (19022, 19024), False, 'import time\n'), ((14632, 14657), 'numpy.float32', 'np.float32', (['(state / 255.0)'], {}), '(state / 255.0)\n', (14642, 14657), True, 'import numpy as np\n'), ((17844, 17871), 'numpy.argmax', 'np.argmax', (['qn_batch'], {'axis': '(1)'}), '(qn_batch, axis=1)\n', (17853, 17871), True, 'import numpy as np\n'), ((5739, 5765), 'keras.backend.expand_dims', 'K.expand_dims', (['a[:, 0]', '(-1)'], {}), '(a[:, 0], -1)\n', (5752, 5765), True, 'from keras import backend as K\n'), ((5796, 5827), 'keras.backend.mean', 'K.mean', (['a[:, 1:]'], {'keepdims': '(True)'}), '(a[:, 1:], keepdims=True)\n', (5802, 5827), True, 'from keras import backend as K\n'), ((13562, 13588), 'keras.backend.expand_dims', 'K.expand_dims', (['a[:, 0]', '(-1)'], {}), '(a[:, 0], -1)\n', (13575, 13588), True, 'from keras import backend as K\n'), ((13619, 13650), 'keras.backend.mean', 'K.mean', (['a[:, 1:]'], {'keepdims': '(True)'}), '(a[:, 1:], keepdims=True)\n', (13625, 13650), True, 'from keras import backend as K\n'), ((17600, 17624), 'numpy.array', 'np.array', (['terminal_batch'], {}), '(terminal_batch)\n', (17608, 17624), True, 'import numpy as np\n'), ((8592, 8618), 'numpy.array', 'np.array', (['next_state_batch'], {}), '(next_state_batch)\n', (8600, 8618), True, 'import numpy as np\n'), ((9085, 9106), 'numpy.array', 'np.array', (['state_batch'], {}), '(state_batch)\n', (9093, 9106), True, 'import numpy as np\n'), ((9282, 9303), 'numpy.array', 'np.array', (['state_batch'], {}), '(state_batch)\n', (9290, 9303), True, 'import numpy as np\n'), ((8334, 8361), 'numpy.float32', 'np.float32', (['(data[0] / 255.0)'], {}), '(data[0] / 255.0)\n', (8344, 8361), True, 'import numpy as np\n'), ((8731, 8757), 'numpy.array', 'np.array', (['next_state_batch'], {}), '(next_state_batch)\n', (8739, 8757), True, 'import numpy as np\n'), ((15186, 15211), 'numpy.float32', 'np.float32', (['(state / 255.0)'], {}), '(state / 255.0)\n', (15196, 15211), True, 'import numpy as np\n'), ((18925, 18950), 'numpy.float32', 'np.float32', (['(state / 255.0)'], {}), '(state / 255.0)\n', (18935, 18950), True, 'import numpy as np\n'), ((17758, 17784), 'numpy.array', 'np.array', (['next_state_batch'], {}), '(next_state_batch)\n', (17766, 17784), True, 'import numpy as np\n'), ((18174, 18195), 'numpy.array', 'np.array', (['state_batch'], {}), '(state_batch)\n', (18182, 18195), True, 'import numpy as np\n')] |
"""
@author jhancock1975
We wrote this code for the
Kaggle titanic contest, for beginners
"""
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import logging
import argparse
from sklearn.metrics import accuracy_score
import constant
class Classifier(object):
def __init__(self, train_csv_name, test_csv_name):
"""
constructor
:param train_csv_name: path to, and name of training data
:param test_csv_name: path to, and name of test data
"""
logger.debug('created %s classifier object' % self)
self.train_csv_name = train_csv_name
self.test_csv_name = test_csv_name
logger.debug('training csv file name: %s' % train_csv_name)
logger.debug('validation data file name: %s' % test_csv_name)
self.trained_model=None
class GenderClassifier(Classifier):
def train_and_eval(self):
"""
uses gender as predictor for survival
:return:
"""
df = pd.read_csv(self.train_csv_name)
logger.debug("gender classifier accuracy: %s" % accuracy_score(df.Survived, df.Sex=='female'))
class TitanicRf(Classifier):
def clean_data(self, df):
"""
clean data before training,
for this simple case we drop any non-numeric columns, except for
the target value, and we drop any NaN values so we can train with
random forest
:param df:
:return: dataframe with cleaned data
"""
for column_name in df.columns:
if not np.issubdtype(df[column_name], np.number) and (column_name != 'Survived'):
df = df.drop([column_name], axis=1)
df.dropna(inplace=True)
return df
def train_and_eval(self):
"""
trains and tests a random forest classifier, for use as the
baseline classifier for this project
:return:
"""
logger.debug('starting model fitting')
train_csv = pd.read_csv(self.train_csv_name)
train_csv = self.clean_data(train_csv)
X = train_csv.drop(['Survived'], axis=1)
# the ...values.ravel() is to suppress warning
# titanic-rf.py:67: DataConversionWarning: A column-vector y was passed when a 1d array
# was expected. Please change the shape of y to (n_samples,), for example using ravel().
# solution from https://stackoverflow.com/posts/36120015/revisions
# StackOverflow.com user: <NAME>,
# edited by StackOverflow user: <NAME>
# accessed December 24th, 2018
y = train_csv[['Survived']].values.ravel()
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1,
random_state=constant.RANDOM_STATE)
logger.debug("X_train dimensions: %s", X_train.shape)
logger.debug("X_val dimensions: %s", X_val.shape)
logger.debug("y_train length: %s", len(y_train))
logger.debug("y_val length: %s", len(y_val))
rf = RandomForestClassifier(random_state=constant.RANDOM_STATE, n_estimators=10)
logger.debug('fitting classifier')
rf.fit(X_train, y_train)
self.trained_model=rf
logger.debug('starting predictions')
predictions = rf.predict(X_val)
logger.debug("random forest accuracy: %s" % accuracy_score(y_val, predictions))
def test(self):
"""
evaluates accuracy of trained model on test data
"""
logger.debug('starting test predictions')
test_csv = pd.read_csv(self.test_csv_name)
test_csv = self.clean_data(test_csv)
#test_csv = test_csv.concat(self.trained_model.predict,axis=1)
logger.debug(test_csv.head())
# parse command line arguments
# this program expects the user
# to supply the file path, and name of
# test and training data
parser = argparse.ArgumentParser()
parser.add_argument("train_data", metavar="train-data",
help="path and name of csv file containing training data")
parser.add_argument("test_data", metavar="test-data",
help="path and name of csv file containing test data")
args = parser.parse_args()
# logging setup
logger = logging.getLogger(__name__)
c_handler = logging.StreamHandler()
c_format = logging.Formatter('%(asctime)s %(name)s - %(levelname)s - %(message)s')
c_handler.setFormatter(c_format)
logger.addHandler(c_handler)
logger.setLevel(logging.DEBUG)
if __name__ == "__main__":
# show all the columns when printing
pd.set_option('display.max_columns', None)
logger.debug('starting up')
titanicRf = TitanicRf(args.train_data, args.test_data)
genderClassifier = GenderClassifier(args.train_data, args.test_data)
for clf in [titanicRf, genderClassifier]:
clf.train_and_eval()
titanicRf.test()
| [
"logging.getLogger",
"logging.StreamHandler",
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"logging.Formatter",
"sklearn.ensemble.RandomForestClassifier",
"pandas.set_option",
"numpy.issubdtype",
"sklearn.metrics.accuracy_score"
] | [((3947, 3972), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3970, 3972), False, 'import argparse\n'), ((4290, 4317), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4307, 4317), False, 'import logging\n'), ((4330, 4353), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (4351, 4353), False, 'import logging\n'), ((4365, 4436), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s %(name)s - %(levelname)s - %(message)s')\n", (4382, 4436), False, 'import logging\n'), ((4605, 4647), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (4618, 4647), True, 'import pandas as pd\n'), ((1067, 1099), 'pandas.read_csv', 'pd.read_csv', (['self.train_csv_name'], {}), '(self.train_csv_name)\n', (1078, 1099), True, 'import pandas as pd\n'), ((2044, 2076), 'pandas.read_csv', 'pd.read_csv', (['self.train_csv_name'], {}), '(self.train_csv_name)\n', (2055, 2076), True, 'import pandas as pd\n'), ((2718, 2791), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)', 'random_state': 'constant.RANDOM_STATE'}), '(X, y, test_size=0.1, random_state=constant.RANDOM_STATE)\n', (2734, 2791), False, 'from sklearn.model_selection import train_test_split\n'), ((3094, 3169), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': 'constant.RANDOM_STATE', 'n_estimators': '(10)'}), '(random_state=constant.RANDOM_STATE, n_estimators=10)\n', (3116, 3169), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3621, 3652), 'pandas.read_csv', 'pd.read_csv', (['self.test_csv_name'], {}), '(self.test_csv_name)\n', (3632, 3652), True, 'import pandas as pd\n'), ((1156, 1203), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['df.Survived', "(df.Sex == 'female')"], {}), "(df.Survived, df.Sex == 'female')\n", (1170, 1203), False, 'from sklearn.metrics import accuracy_score\n'), ((3414, 3448), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (3428, 3448), False, 'from sklearn.metrics import accuracy_score\n'), ((1615, 1656), 'numpy.issubdtype', 'np.issubdtype', (['df[column_name]', 'np.number'], {}), '(df[column_name], np.number)\n', (1628, 1656), True, 'import numpy as np\n')] |
# Standard library imports ...
from collections import OrderedDict
import datetime
import struct
# Third party library imports ...
import numpy as np
class _ICCProfile(object):
"""
Container for ICC profile information.
"""
profile_class = {
b'scnr': 'input device profile',
b'mntr': 'display device profile',
b'prtr': 'output device profile',
b'link': 'devicelink profile',
b'spac': 'colorspace conversion profile',
b'abst': 'abstract profile',
b'nmcl': 'name colour profile'
}
colour_space_dict = {
b'XYZ ': 'XYZ',
b'Lab ': 'Lab',
b'Luv ': 'Luv',
b'YCbr': 'YCbCr',
b'Yxy ': 'Yxy',
b'RGB ': 'RGB',
b'GRAY': 'gray',
b'HSV ': 'hsv',
b'HLS ': 'hls',
b'CMYK': 'CMYK',
b'CMY ': 'cmy',
b'2CLR': '2colour',
b'3CLR': '3colour',
b'4CLR': '4colour',
b'5CLR': '5colour',
b'6CLR': '6colour',
b'7CLR': '7colour',
b'8CLR': '8colour',
b'9CLR': '9colour',
b'ACLR': '10colour',
b'BCLR': '11colour',
b'CCLR': '12colour',
b'DCLR': '13colour',
b'ECLR': '14colour',
b'FCLR': '15colour'
}
rendering_intent_dict = {
0: 'perceptual',
1: 'media-relative colorimetric',
2: 'saturation',
3: 'ICC-absolute colorimetric'
}
def __init__(self, read_buffer):
self._raw_buffer = read_buffer
header = OrderedDict()
data = struct.unpack('>IIBB', self._raw_buffer[0:10])
header['Size'] = data[0]
header['Preferred CMM Type'] = data[1]
major = data[2]
minor = (data[3] & 0xf0) >> 4
bugfix = (data[3] & 0x0f)
header['Version'] = f'{major}.{minor}.{bugfix}'
header['Device Class'] = self.profile_class[self._raw_buffer[12:16]]
header['Color Space'] = self.colour_space_dict[self._raw_buffer[16:20]]
data = self.colour_space_dict[self._raw_buffer[20:24]]
header['Connection Space'] = data
data = struct.unpack('>HHHHHH', self._raw_buffer[24:36])
try:
header['Datetime'] = datetime.datetime(*data)
except ValueError:
header['Datetime'] = None
header['File Signature'] = read_buffer[36:40].decode('utf-8')
if read_buffer[40:44] == b'\x00\x00\x00\x00':
header['Platform'] = 'unrecognized'
else:
header['Platform'] = read_buffer[40:44].decode('utf-8')
fval, = struct.unpack('>I', read_buffer[44:48])
header['Flags'] = (
f"{'' if fval & 0x01 else 'not '}embedded, "
f"{'cannot' if fval & 0x02 else 'can'} be used independently"
)
header['Device Manufacturer'] = read_buffer[48:52].decode('utf-8')
if read_buffer[52:56] == b'\x00\x00\x00\x00':
device_model = ''
else:
device_model = read_buffer[52:56].decode('utf-8')
header['Device Model'] = device_model
val, = struct.unpack('>Q', read_buffer[56:64])
attr = (
f"{'transparency' if val & 0x01 else 'reflective'}, "
f"{'matte' if val & 0x02 else 'glossy'}, "
f"{'negative' if val & 0x04 else 'positive'} media polarity, "
f"{'black and white' if val & 0x08 else 'color'} media"
)
header['Device Attributes'] = attr
rval, = struct.unpack('>I', read_buffer[64:68])
try:
header['Rendering Intent'] = self.rendering_intent_dict[rval]
except KeyError:
header['Rendering Intent'] = 'unknown'
data = struct.unpack('>iii', read_buffer[68:80])
header['Illuminant'] = np.array(data, dtype=np.float64) / 65536
if read_buffer[80:84] == b'\x00\x00\x00\x00':
creator = 'unrecognized'
else:
creator = read_buffer[80:84].decode('utf-8')
header['Creator'] = creator
if header['Version'][0] == '4':
header['Profile Id'] = read_buffer[84:100]
# Final 27 bytes are reserved.
self.header = header
| [
"datetime.datetime",
"numpy.array",
"collections.OrderedDict",
"struct.unpack"
] | [((1518, 1531), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1529, 1531), False, 'from collections import OrderedDict\n'), ((1548, 1594), 'struct.unpack', 'struct.unpack', (['""">IIBB"""', 'self._raw_buffer[0:10]'], {}), "('>IIBB', self._raw_buffer[0:10])\n", (1561, 1594), False, 'import struct\n'), ((2106, 2155), 'struct.unpack', 'struct.unpack', (['""">HHHHHH"""', 'self._raw_buffer[24:36]'], {}), "('>HHHHHH', self._raw_buffer[24:36])\n", (2119, 2155), False, 'import struct\n'), ((2563, 2602), 'struct.unpack', 'struct.unpack', (['""">I"""', 'read_buffer[44:48]'], {}), "('>I', read_buffer[44:48])\n", (2576, 2602), False, 'import struct\n'), ((3070, 3109), 'struct.unpack', 'struct.unpack', (['""">Q"""', 'read_buffer[56:64]'], {}), "('>Q', read_buffer[56:64])\n", (3083, 3109), False, 'import struct\n'), ((3461, 3500), 'struct.unpack', 'struct.unpack', (['""">I"""', 'read_buffer[64:68]'], {}), "('>I', read_buffer[64:68])\n", (3474, 3500), False, 'import struct\n'), ((3680, 3721), 'struct.unpack', 'struct.unpack', (['""">iii"""', 'read_buffer[68:80]'], {}), "('>iii', read_buffer[68:80])\n", (3693, 3721), False, 'import struct\n'), ((2202, 2226), 'datetime.datetime', 'datetime.datetime', (['*data'], {}), '(*data)\n', (2219, 2226), False, 'import datetime\n'), ((3753, 3785), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float64'}), '(data, dtype=np.float64)\n', (3761, 3785), True, 'import numpy as np\n')] |
import os
import threading
import imagehash
import numpy as np
from PIL import Image, UnidentifiedImageError
def slices(lista, steps=5):
x = len(lista)
sl = []
bef_steps = 0
curr_steps = steps
while True:
sl.append(lista[bef_steps:curr_steps])
if curr_steps + steps > x - 1:
sl.append(lista[curr_steps:])
break
bef_steps = curr_steps
curr_steps += steps
return sl
HASHING_METHODS = {
"AHASHING" : imagehash.average_hash,
"PHASHING" : imagehash.phash,
"DHASHING" : imagehash.dhash,
"WHASHING" : imagehash.whash,
"COLORHASHING" : imagehash.colorhash
}
class DuplInFolder:
def __init__(self, hash_method="AHASHING", similarity=50):
self.hashing = HASHING_METHODS[hash_method]
self.similarity = similarity
def setPath(self, path):
self.path = path
def getFiles(self):
files = [
os.path.join(self.path, x)
for x in os.listdir(self.path)
if os.path.isfile(self.path + os.sep + x) and \
'.json' not in x
]
_files = list(range(len(files)))
hashings = []
rm_ind = set()
for sli in slices(_files):
ths = [
threading.Thread(self._load(files[i], rm_ind, hashings, i))
for i in sli
]
[x.start() for x in ths]
[x.join() for x in ths]
self.erase([files[i] for i in rm_ind])
files = np.array([x for i,x in enumerate(files) if i not in rm_ind])
self.similarity = hashings[0].shape[0]*hashings[0].shape[1]*self.similarity/100
return files, np.array(hashings)
def _load(self, file, rm_files, hashings, n):
loaded = None
try:
loaded = self.hashing(Image.open(file)).hash
except UnidentifiedImageError:
print('No image format : ', file)
rm_files.add(n)
except:
rm_files.add(n)
else:
hashings.append(loaded)
def erase(self, rm_files):
def rm(file_path):
os.remove(file_path)
rm_files = slices(rm_files)
for sli in rm_files:
ths = [
threading.Thread(target=rm, args=(x,))
for x in sli
]
[x.start() for x in ths]
[x.join() for x in ths]
def check(self):
file_paths, hashing = self.getFiles()
base_dt = file_paths[0].split(os.sep)[-1].split('-')[0]+'-'
ind = sum([1 for x in file_paths if base_dt in x])
res = np.unique(
np.where(
np.array(
[
np.sum(np.sum(x == hashing, axis=1),axis=1)
for x in hashing
]
) > self.similarity
)[1],
return_counts=True
)
self.erase(
list(
file_paths[res[0][ind:][np.where(res[1][ind:] > 1)]]
)
) | [
"os.listdir",
"PIL.Image.open",
"numpy.where",
"os.path.join",
"os.path.isfile",
"numpy.array",
"numpy.sum",
"threading.Thread",
"os.remove"
] | [((941, 967), 'os.path.join', 'os.path.join', (['self.path', 'x'], {}), '(self.path, x)\n', (953, 967), False, 'import os\n'), ((1695, 1713), 'numpy.array', 'np.array', (['hashings'], {}), '(hashings)\n', (1703, 1713), True, 'import numpy as np\n'), ((2135, 2155), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (2144, 2155), False, 'import os\n'), ((989, 1010), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (999, 1010), False, 'import os\n'), ((2257, 2295), 'threading.Thread', 'threading.Thread', ([], {'target': 'rm', 'args': '(x,)'}), '(target=rm, args=(x,))\n', (2273, 2295), False, 'import threading\n'), ((1027, 1065), 'os.path.isfile', 'os.path.isfile', (['(self.path + os.sep + x)'], {}), '(self.path + os.sep + x)\n', (1041, 1065), False, 'import os\n'), ((1834, 1850), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (1844, 1850), False, 'from PIL import Image, UnidentifiedImageError\n'), ((3053, 3079), 'numpy.where', 'np.where', (['(res[1][ind:] > 1)'], {}), '(res[1][ind:] > 1)\n', (3061, 3079), True, 'import numpy as np\n'), ((2755, 2783), 'numpy.sum', 'np.sum', (['(x == hashing)'], {'axis': '(1)'}), '(x == hashing, axis=1)\n', (2761, 2783), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import print_function
import json
import logging
import os
import shutil
import sys
import uuid
import hyperopt as hp
import numpy as np
import crnn
import preprocess
# Nones will be replaced with sampled values; see below for hyperparameter space definition
hyperparam_template = crnn.Hyperparameters(
min_token_occurrences=1,
min_token_documents=1,
class_weight=None,
embed_dim=None,
conv_filters=None,
conv_kernel_size=None,
conv_padding=None,
conv_strides=None,
conv_pool_size=None,
lstm_size=None,
batch_size=16,
max_length=2048,
dropout=None,
es_min_delta=0.0001,
es_patience=2,
max_epochs=50,
)
HP_SPACE = hp.hp.choice('all', [{
'class_weight': {0: 1, 1: hp.hp.uniform('1', 1, 30)},
'embed_dim': 2**hp.hp.quniform('2', 2, 6, 2),
'conv_filters': 2**hp.hp.quniform('3', 3, 8, 1),
'conv_kernel_size': hp.hp.quniform('4', 2, 10, 1),
'conv_padding': hp.hp.choice('5', ['valid', 'same']),
'conv_strides': hp.hp.quniform('6', 1, 8, 1),
'conv_pool_size': 2**hp.hp.quniform('7', 1, 5, 1),
'lstm_size': 2**hp.hp.quniform('8', 3, 11, 1),
'dropout': hp.hp.choice('10', [0.0, 0.5])
}])
MAX_EVALS = 100
RNG_SEED = 1214
# logging
logging.basicConfig(format='%(asctime)s %(process)s %(levelname)-8s %(message)s', stream=sys.stdout)
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def main(data_file: str, output_dir: str):
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
data = _preprocess_data(data_file, hyperparam_template, RNG_SEED)
hp.fmin(_objective_fn(output_dir, data), HP_SPACE, hp.tpe.suggest, max_evals=MAX_EVALS, rstate=np.random.RandomState(RNG_SEED))
best_hyperparams = _find_best_hyperparams(output_dir)
with open(os.path.join(output_dir, "best_hyperparameters.json"), 'w') as f:
f.write(json.dumps(best_hyperparams._asdict(), indent=2))
def _preprocess_data(data_file: str, hyperparams: crnn.Hyperparameters, rng_seed: int) -> crnn.PreprocessedData:
datasets, vocab_size, _ = preprocess.tokenise_data([preprocess.DatasetSpec(data_file, None)],
hyperparams.min_token_occurrences,
hyperparams.min_token_documents,
random_state=rng_seed)
devset = datasets['train']
log.info("loaded %d examples", len(devset))
rng = np.random.RandomState(rng_seed)
devset['group'] = rng.randint(0, 100, devset.shape[0])
train = devset[devset['group'] < 80]
test = devset[devset['group'] >= 80]
x_train, y_train, _ = crnn.format_data(train, int(hyperparams.max_length))
x_test, y_test, id_test = crnn.format_data(test, int(hyperparams.max_length))
return crnn.PreprocessedData(x_train, y_train, x_test, y_test, id_test, vocab_size)
def _objective_fn(output_dir: str, data: crnn.PreprocessedData):
def objective(sampled: dict) -> dict:
hyperparams = hyperparam_template._replace(**sampled)
log.info("evaluating %s", hyperparams)
try:
results = crnn.train_and_evaluate_preprocessed(data, hyperparams)
del(results['test']['examples'])
results['hyperparameters'] = hyperparams._asdict()
with open(os.path.join(output_dir, "{}.json".format(uuid.uuid4().hex[:8])), 'w') as f:
f.write(json.dumps(results, indent=2))
return {'status': hp.STATUS_OK, 'loss': 1.0 - results['test']['average_precision']}
except:
log.error("error while evaluating: %s", sys.exc_info()[1])
return {'status': hp.STATUS_FAIL}
return objective
def _find_best_hyperparams(output_dir: str) -> crnn.Hyperparameters:
best_avg_precision = 0
result = None
for file in os.listdir(output_dir):
with open(os.path.join(output_dir, file)) as f:
results = json.load(f)
avg_precision = results['test']['average_precision']
if avg_precision > best_avg_precision:
best_avg_precision = avg_precision
result = crnn.Hyperparameters(**results['hyperparameters'])
log.info("best average precision: %f for hyperparameters %s", best_avg_precision, result)
return result
if __name__ == '__main__':
main(*sys.argv[1:])
| [
"logging.getLogger",
"crnn.PreprocessedData",
"sys.exc_info",
"numpy.random.RandomState",
"os.path.exists",
"os.listdir",
"json.dumps",
"hyperopt.hp.choice",
"preprocess.DatasetSpec",
"crnn.Hyperparameters",
"hyperopt.hp.quniform",
"uuid.uuid4",
"hyperopt.hp.uniform",
"crnn.train_and_evalu... | [((324, 658), 'crnn.Hyperparameters', 'crnn.Hyperparameters', ([], {'min_token_occurrences': '(1)', 'min_token_documents': '(1)', 'class_weight': 'None', 'embed_dim': 'None', 'conv_filters': 'None', 'conv_kernel_size': 'None', 'conv_padding': 'None', 'conv_strides': 'None', 'conv_pool_size': 'None', 'lstm_size': 'None', 'batch_size': '(16)', 'max_length': '(2048)', 'dropout': 'None', 'es_min_delta': '(0.0001)', 'es_patience': '(2)', 'max_epochs': '(50)'}), '(min_token_occurrences=1, min_token_documents=1,\n class_weight=None, embed_dim=None, conv_filters=None, conv_kernel_size=\n None, conv_padding=None, conv_strides=None, conv_pool_size=None,\n lstm_size=None, batch_size=16, max_length=2048, dropout=None,\n es_min_delta=0.0001, es_patience=2, max_epochs=50)\n', (344, 658), False, 'import crnn\n'), ((1268, 1373), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(process)s %(levelname)-8s %(message)s"""', 'stream': 'sys.stdout'}), "(format=\n '%(asctime)s %(process)s %(levelname)-8s %(message)s', stream=sys.stdout)\n", (1287, 1373), False, 'import logging\n'), ((1375, 1402), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1392, 1402), False, 'import logging\n'), ((1482, 1508), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (1496, 1508), False, 'import os\n'), ((1548, 1571), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (1559, 1571), False, 'import os\n'), ((2537, 2568), 'numpy.random.RandomState', 'np.random.RandomState', (['rng_seed'], {}), '(rng_seed)\n', (2558, 2568), True, 'import numpy as np\n'), ((2882, 2958), 'crnn.PreprocessedData', 'crnn.PreprocessedData', (['x_train', 'y_train', 'x_test', 'y_test', 'id_test', 'vocab_size'], {}), '(x_train, y_train, x_test, y_test, id_test, vocab_size)\n', (2903, 2958), False, 'import crnn\n'), ((3912, 3934), 'os.listdir', 'os.listdir', (['output_dir'], {}), '(output_dir)\n', (3922, 3934), False, 'import os\n'), ((1518, 1543), 'shutil.rmtree', 'shutil.rmtree', (['output_dir'], {}), '(output_dir)\n', (1531, 1543), False, 'import shutil\n'), ((929, 958), 'hyperopt.hp.quniform', 'hp.hp.quniform', (['"""4"""', '(2)', '(10)', '(1)'], {}), "('4', 2, 10, 1)\n", (943, 958), True, 'import hyperopt as hp\n'), ((980, 1016), 'hyperopt.hp.choice', 'hp.hp.choice', (['"""5"""', "['valid', 'same']"], {}), "('5', ['valid', 'same'])\n", (992, 1016), True, 'import hyperopt as hp\n'), ((1038, 1066), 'hyperopt.hp.quniform', 'hp.hp.quniform', (['"""6"""', '(1)', '(8)', '(1)'], {}), "('6', 1, 8, 1)\n", (1052, 1066), True, 'import hyperopt as hp\n'), ((1189, 1219), 'hyperopt.hp.choice', 'hp.hp.choice', (['"""10"""', '[0.0, 0.5]'], {}), "('10', [0.0, 0.5])\n", (1201, 1219), True, 'import hyperopt as hp\n'), ((1741, 1772), 'numpy.random.RandomState', 'np.random.RandomState', (['RNG_SEED'], {}), '(RNG_SEED)\n', (1762, 1772), True, 'import numpy as np\n'), ((1847, 1900), 'os.path.join', 'os.path.join', (['output_dir', '"""best_hyperparameters.json"""'], {}), "(output_dir, 'best_hyperparameters.json')\n", (1859, 1900), False, 'import os\n'), ((2150, 2189), 'preprocess.DatasetSpec', 'preprocess.DatasetSpec', (['data_file', 'None'], {}), '(data_file, None)\n', (2172, 2189), False, 'import preprocess\n'), ((3212, 3267), 'crnn.train_and_evaluate_preprocessed', 'crnn.train_and_evaluate_preprocessed', (['data', 'hyperparams'], {}), '(data, hyperparams)\n', (3248, 3267), False, 'import crnn\n'), ((4014, 4026), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4023, 4026), False, 'import json\n'), ((774, 799), 'hyperopt.hp.uniform', 'hp.hp.uniform', (['"""1"""', '(1)', '(30)'], {}), "('1', 1, 30)\n", (787, 799), True, 'import hyperopt as hp\n'), ((822, 850), 'hyperopt.hp.quniform', 'hp.hp.quniform', (['"""2"""', '(2)', '(6)', '(2)'], {}), "('2', 2, 6, 2)\n", (836, 850), True, 'import hyperopt as hp\n'), ((875, 903), 'hyperopt.hp.quniform', 'hp.hp.quniform', (['"""3"""', '(3)', '(8)', '(1)'], {}), "('3', 3, 8, 1)\n", (889, 903), True, 'import hyperopt as hp\n'), ((1093, 1121), 'hyperopt.hp.quniform', 'hp.hp.quniform', (['"""7"""', '(1)', '(5)', '(1)'], {}), "('7', 1, 5, 1)\n", (1107, 1121), True, 'import hyperopt as hp\n'), ((1143, 1172), 'hyperopt.hp.quniform', 'hp.hp.quniform', (['"""8"""', '(3)', '(11)', '(1)'], {}), "('8', 3, 11, 1)\n", (1157, 1172), True, 'import hyperopt as hp\n'), ((3954, 3984), 'os.path.join', 'os.path.join', (['output_dir', 'file'], {}), '(output_dir, file)\n', (3966, 3984), False, 'import os\n'), ((4219, 4269), 'crnn.Hyperparameters', 'crnn.Hyperparameters', ([], {}), "(**results['hyperparameters'])\n", (4239, 4269), False, 'import crnn\n'), ((3499, 3528), 'json.dumps', 'json.dumps', (['results'], {'indent': '(2)'}), '(results, indent=2)\n', (3509, 3528), False, 'import json\n'), ((3694, 3708), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3706, 3708), False, 'import sys\n'), ((3440, 3452), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3450, 3452), False, 'import uuid\n')] |
import plotly.express as px
from _datetime import datetime
import pandas as pd
import numpy as np
import os
if not os.path.exists('Logs'):
os.makedirs('Logs')
if not os.path.exists('Logs/imgs'):
os.makedirs('Logs/imgs')
def log(df):#,df2,df3):
now = datetime.now()
#dt_string = now.strftime("%d%m%Y%H:%M:%S")
dt_string = now.strftime("%d%m%Y%H_%M_%S")
img_names = ['Logs/imgs/loss'+dt_string+'.png','Logs/imgs/train_vars' + dt_string + '.png']#,'Logs/imgs/train_var2' + dt_string + '.png','Logs/imgs/test_var1' + dt_string + '.png','Logs/imgs/test_var2' + dt_string + '.png']
title_str ='#Log'+dt_string+'\n'
vars = open('config1.py','r')
vars_str = '##Variables: \n' + vars.read() +'\n'
vars.close()
print('creating plots')
fig = px.line(df,y=['train_loss','test_loss'],x=df.index.values)
fig.write_image(img_names[0])
imgs =[]
final = title_str + vars_str
for i in range(len(img_names)):
#final =final + '\n\n'
final =final + '\n\n'
file = open('Logs/log'+dt_string+'.md','w+')
file.write(final)
file.close()
np.save('Logs/loss_datalog_'+dt_string,df) | [
"os.path.exists",
"os.makedirs",
"_datetime.datetime.now",
"plotly.express.line",
"numpy.save"
] | [((115, 137), 'os.path.exists', 'os.path.exists', (['"""Logs"""'], {}), "('Logs')\n", (129, 137), False, 'import os\n'), ((143, 162), 'os.makedirs', 'os.makedirs', (['"""Logs"""'], {}), "('Logs')\n", (154, 162), False, 'import os\n'), ((170, 197), 'os.path.exists', 'os.path.exists', (['"""Logs/imgs"""'], {}), "('Logs/imgs')\n", (184, 197), False, 'import os\n'), ((203, 227), 'os.makedirs', 'os.makedirs', (['"""Logs/imgs"""'], {}), "('Logs/imgs')\n", (214, 227), False, 'import os\n'), ((262, 276), '_datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (274, 276), False, 'from _datetime import datetime\n'), ((779, 840), 'plotly.express.line', 'px.line', (['df'], {'y': "['train_loss', 'test_loss']", 'x': 'df.index.values'}), "(df, y=['train_loss', 'test_loss'], x=df.index.values)\n", (786, 840), True, 'import plotly.express as px\n'), ((1270, 1315), 'numpy.save', 'np.save', (["('Logs/loss_datalog_' + dt_string)", 'df'], {}), "('Logs/loss_datalog_' + dt_string, df)\n", (1277, 1315), True, 'import numpy as np\n')] |
from __future__ import print_function
from orphics import maps,io,cosmology,stats
from pixell import enmap
import numpy as np
import os,sys
from tilec import utils as tutils
solutions = ['tSZ','CMB','CMB-tSZ','CMB-CIB','tSZ-CMB','tSZ-CIB']
combs = ['joint','act','planck']
regions = ['boss','deep56']
name_map = {'CMB':'cmb','tSZ':'comptony','CIB':'cib'}
root = "/scratch/r/rbond/msyriac/data/depot/tilec/"
components = {}
input_names = []
for solution in solutions:
components[solution] = solution.split('-')
input_names.append( components[solution][0] )
input_names = sorted(list(set(input_names)))
print(components)
print(input_names)
for region in regions:
for comb in combs:
version = "map_$VERSION_%s" % (comb)
savedir = tutils.get_save_path(version,region)
for solution in solutions:
pl = io.Plotter(xlabel='l',ylabel='r',xyscale='loglin')
comps = "tilec_single_tile_"+region+"_"
comps = comps + name_map[components[solution][0]]+"_"
if len(components[solution])>1: comps = comps + "deprojects_"+ '_'.join([name_map[x] for x in components[solution][1:]]) + "_"
comps = comps + version
fname = "%s%s.fits" % (savedir,comps)
cs = []
for isotype in ['iso_v1.0.0_rc','v1.0.0_rc']:
lname = fname.replace('$VERSION',isotype)
print(lname)
imap = enmap.read_map(lname)
kmask = maps.mask_kspace(imap.shape,imap.wcs, lxcut = 40, lycut = 40)
k = enmap.fft(imap,normalize='phys') #* kmask
p = (k*k.conj()).real
modlmap = imap.modlmap()
if '_act_' in fname:
bin_edges = np.arange(500,8000,20)
else:
bin_edges = np.arange(20,8000,20)
binner = stats.bin2D(modlmap,bin_edges)
cents,c1d = binner.bin(p)
cs.append(c1d.copy())
r = (cs[0]-cs[1])/cs[1]
pl.add(cents,r)
pl.hline(y=0)
pl.done("isocomp_%s_%s_%s.png" % (region,comb,solution) )
| [
"orphics.maps.mask_kspace",
"orphics.io.Plotter",
"orphics.stats.bin2D",
"tilec.utils.get_save_path",
"pixell.enmap.read_map",
"pixell.enmap.fft",
"numpy.arange"
] | [((763, 800), 'tilec.utils.get_save_path', 'tutils.get_save_path', (['version', 'region'], {}), '(version, region)\n', (783, 800), True, 'from tilec import utils as tutils\n'), ((854, 906), 'orphics.io.Plotter', 'io.Plotter', ([], {'xlabel': '"""l"""', 'ylabel': '"""r"""', 'xyscale': '"""loglin"""'}), "(xlabel='l', ylabel='r', xyscale='loglin')\n", (864, 906), False, 'from orphics import maps, io, cosmology, stats\n'), ((1460, 1481), 'pixell.enmap.read_map', 'enmap.read_map', (['lname'], {}), '(lname)\n', (1474, 1481), False, 'from pixell import enmap\n'), ((1508, 1566), 'orphics.maps.mask_kspace', 'maps.mask_kspace', (['imap.shape', 'imap.wcs'], {'lxcut': '(40)', 'lycut': '(40)'}), '(imap.shape, imap.wcs, lxcut=40, lycut=40)\n', (1524, 1566), False, 'from orphics import maps, io, cosmology, stats\n'), ((1590, 1623), 'pixell.enmap.fft', 'enmap.fft', (['imap'], {'normalize': '"""phys"""'}), "(imap, normalize='phys')\n", (1599, 1623), False, 'from pixell import enmap\n'), ((1906, 1937), 'orphics.stats.bin2D', 'stats.bin2D', (['modlmap', 'bin_edges'], {}), '(modlmap, bin_edges)\n', (1917, 1937), False, 'from orphics import maps, io, cosmology, stats\n'), ((1781, 1805), 'numpy.arange', 'np.arange', (['(500)', '(8000)', '(20)'], {}), '(500, 8000, 20)\n', (1790, 1805), True, 'import numpy as np\n'), ((1858, 1881), 'numpy.arange', 'np.arange', (['(20)', '(8000)', '(20)'], {}), '(20, 8000, 20)\n', (1867, 1881), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File : core.classifiers.RCNLPTextClassifier.py
# Description : Echo State Network for text classification.
# Auteur : <NAME> <<EMAIL>>
# Date : 01.02.2017 17:59:05
# Lieu : Nyon, Suisse
#
# This file is part of the Reservoir Computing NLP Project.
# The Reservoir Computing Memory Project is a set of free software:
# you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
#
import nsNLP
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
####################################################
# Main function
####################################################
# Main function
if __name__ == "__main__":
# Argument builder
args = nsNLP.tools.ArgumentBuilder(desc="Sklearn classifier baseline benchmark")
# Dataset arguments
args.add_argument(command="--dataset", name="dataset", type=str,
help="JSON file with the file description for each authors", required=True, extended=False)
args.add_argument(command="--k", name="k", type=int, help="K-Fold Cross Validation", extended=False, default=10)
# Naive Bayes classifier arguments
args.add_argument(command="--n-grams-min", name="n_grams_min", type=int, help="N-grams", required=True,
extended=True)
args.add_argument(command="--n-grams-max", name="n_grams_max", type=int, help="N-grams", required=True,
extended=True)
args.add_argument(command="--criterion", name="criterion", type=str, help="gini,entropy", required=True,
extended=True)
args.add_argument(command="--n-estimators", name="n_estimators", type=int, help="How many trees", default=100,
required=False, extended=True)
args.add_argument(command="--max-depth", name="max_depth", type=int, help="Max depth", default=None,
required=False, extended=True)
args.add_argument(command="--tfidf", name="tfidf", type=str, help="tfidf or none", default=False, required=False,
extended=True)
args.add_argument(command="--feature", name="feature", type=str, help="word,char,char_wb", default='word',
required=False,
extended=True)
# Experiment output parameters
args.add_argument(command="--name", name="name", type=str, help="Experiment's name", extended=False, required=True)
args.add_argument(command="--description", name="description", type=str, help="Experiment's description",
extended=False, required=True)
args.add_argument(command="--output", name="output", type=str, help="Experiment's output directory", required=True,
extended=False)
args.add_argument(command="--tweets", name="tweets", action='store_true',
help="Test tweet classification rate?", default=False, extended=False)
args.add_argument(command="--verbose", name="verbose", type=int, help="Verbose level", default=2, extended=False)
# Parse arguments
args.parse()
# Corpus
pan17corpus = nsNLP.data.Corpus(args.dataset)
# Parameter space
param_space = nsNLP.tools.ParameterSpace(args.get_space())
# Experiment
xp = nsNLP.tools.ResultManager\
(
args.output,
args.name,
args.description,
args.get_space(),
1,
args.k,
verbose=args.verbose
)
# Author list
authors = pan17corpus.get_authors()
# Bag of word features
bow = nsNLP.features.BagOfWords()
# Iterate
for space in param_space:
# Params
ngrams_min = int(space['n_grams_min'])
ngrams_max = int(space['n_grams_max'])
criterion = space['criterion'][0][0]
if space['max_depth'] == [['None']]:
max_depth = None
else:
max_depth = int(space['max_depth'])
# end if
n_estimators = int(space['n_estimators'])
tfidf = space['tfidf'][0][0]
feature = space['feature'][0][0]
# Set experience state
xp.set_state(space)
# Average sample
average_sample = np.array([])
# Set sample
xp.set_sample_state(0)
# 10 fold cross validation
cross_validation = nsNLP.validation.CrossValidation(authors)
# Average
average_k_fold = np.array([])
# For each fold
for k, (train_set, test_set) in enumerate(cross_validation):
# Set k
xp.set_fold_state(k)
# Classifier
classifier = RandomForestClassifier(
n_estimators=n_estimators,
criterion=criterion,
max_depth=max_depth
)
# Pipeline
text_clf = Pipeline([
('vect', CountVectorizer(ngram_range=(ngrams_min, ngrams_max), analyzer=feature)),
('tfidf', TfidfTransformer(use_idf=True if tfidf == 'tfidf' else False)),
('clf', classifier)
])
# Total text for each gender
profile_X = list()
profile_Y = list()
# Add to author
for index, author in enumerate(train_set):
profile_X.append(author.get_texts()[0].x())
profile_Y.append(author.truth('gender'))
# end for
# Fit
text_clf.fit(profile_X, profile_Y)
# Print parameters
print(classifier.n_features_)
exit()
# Counters
successes = 0.0
# Test the classifier
for author in test_set:
# Prediction
prediction = text_clf.predict([author.get_texts()[0].x()])
# Compare
if prediction == author.truth('gender'):
successes += 1.0
# end if
# end for
# Print success rate
xp.add_result(successes / float(len(test_set)))
# end for
# end for
# Save experiment results
xp.save()
# end if
| [
"sklearn.feature_extraction.text.TfidfTransformer",
"nsNLP.features.BagOfWords",
"nsNLP.validation.CrossValidation",
"sklearn.feature_extraction.text.CountVectorizer",
"nsNLP.tools.ArgumentBuilder",
"sklearn.ensemble.RandomForestClassifier",
"numpy.array",
"nsNLP.data.Corpus"
] | [((1360, 1433), 'nsNLP.tools.ArgumentBuilder', 'nsNLP.tools.ArgumentBuilder', ([], {'desc': '"""Sklearn classifier baseline benchmark"""'}), "(desc='Sklearn classifier baseline benchmark')\n", (1387, 1433), False, 'import nsNLP\n'), ((3740, 3771), 'nsNLP.data.Corpus', 'nsNLP.data.Corpus', (['args.dataset'], {}), '(args.dataset)\n', (3757, 3771), False, 'import nsNLP\n'), ((4169, 4196), 'nsNLP.features.BagOfWords', 'nsNLP.features.BagOfWords', ([], {}), '()\n', (4194, 4196), False, 'import nsNLP\n'), ((4790, 4802), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4798, 4802), True, 'import numpy as np\n'), ((4919, 4960), 'nsNLP.validation.CrossValidation', 'nsNLP.validation.CrossValidation', (['authors'], {}), '(authors)\n', (4951, 4960), False, 'import nsNLP\n'), ((5005, 5017), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5013, 5017), True, 'import numpy as np\n'), ((5216, 5311), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_estimators', 'criterion': 'criterion', 'max_depth': 'max_depth'}), '(n_estimators=n_estimators, criterion=criterion,\n max_depth=max_depth)\n', (5238, 5311), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((5453, 5524), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'ngram_range': '(ngrams_min, ngrams_max)', 'analyzer': 'feature'}), '(ngram_range=(ngrams_min, ngrams_max), analyzer=feature)\n', (5468, 5524), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((5553, 5614), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'use_idf': "(True if tfidf == 'tfidf' else False)"}), "(use_idf=True if tfidf == 'tfidf' else False)\n", (5569, 5614), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n')] |
# -*- coding: utf-8 -*-
""" Boltzman machine relaxation variational mixture fitting."""
import numpy as np
import scipy.linalg as la
import bmtools.exact.variational as var
def mixture_of_variational_distributions_moments(
relaxation, rng, n_init=100, n_step=1000, step_size=0.2,
init_scale=0.5, tol=1e-4):
"""
Fits a Gaussian mixture to a Boltzmann machine relaxation by forming
a weighted mixture of Gaussian variational distributions fitted from random
initialisations, weighted according to variational objective (and with
simple heuristic to avoid multiple equivalent mixture components).
"""
var_obj_list = []
var_first_mom_list = []
var_biases_list = []
for j in range(n_init):
var_biases = rng.normal(size=(relaxation.n_unit,)) * init_scale
for i in range(n_step):
var_obj, grads_wrt_var_biases, var_first_mom = (
var.var_obj_and_grads_mean_field(
var_biases, relaxation.W, relaxation.b)
)
grads_wrt_var_biases = np.array(grads_wrt_var_biases)
var_biases -= step_size * grads_wrt_var_biases
in_list = False
for vm in var_first_mom_list:
diff = vm - var_first_mom
dist = diff.dot(diff)**0.5
if dist < tol:
in_list = True
if not in_list:
var_obj_list.append(var_obj)
var_first_mom_list.append(np.array(var_first_mom))
var_biases_list.append(var_biases)
var_weights = np.exp(-np.array(var_obj_list))
var_weights /= var_weights.sum()
var_mean = np.zeros((relaxation.n_dim_r))
var_covar = np.zeros((relaxation.n_dim_r, relaxation.n_dim_r))
for var_first_mom, w in zip(var_first_mom_list, var_weights):
m = relaxation.Q.T.dot(var_first_mom)
var_mean += w * m
var_covar += w * np.outer(m, m)
var_covar += np.eye(relaxation.n_dim_r) - np.outer(var_mean, var_mean)
var_covar_chol = la.cholesky(var_covar, True)
var_log_norm = (
0.5 * relaxation.n_dim_r * np.log(2 * np.pi) -
relaxation.n_unit * np.log(2) +
0.5 * relaxation.D.diagonal().sum() +
np.log(np.exp(-np.array(var_obj_list)).sum())
)
return var_mean, var_covar_chol, var_log_norm
| [
"numpy.eye",
"numpy.log",
"scipy.linalg.cholesky",
"numpy.zeros",
"numpy.outer",
"numpy.array",
"bmtools.exact.variational.var_obj_and_grads_mean_field"
] | [((1632, 1660), 'numpy.zeros', 'np.zeros', (['relaxation.n_dim_r'], {}), '(relaxation.n_dim_r)\n', (1640, 1660), True, 'import numpy as np\n'), ((1679, 1729), 'numpy.zeros', 'np.zeros', (['(relaxation.n_dim_r, relaxation.n_dim_r)'], {}), '((relaxation.n_dim_r, relaxation.n_dim_r))\n', (1687, 1729), True, 'import numpy as np\n'), ((2004, 2032), 'scipy.linalg.cholesky', 'la.cholesky', (['var_covar', '(True)'], {}), '(var_covar, True)\n', (2015, 2032), True, 'import scipy.linalg as la\n'), ((1925, 1951), 'numpy.eye', 'np.eye', (['relaxation.n_dim_r'], {}), '(relaxation.n_dim_r)\n', (1931, 1951), True, 'import numpy as np\n'), ((1954, 1982), 'numpy.outer', 'np.outer', (['var_mean', 'var_mean'], {}), '(var_mean, var_mean)\n', (1962, 1982), True, 'import numpy as np\n'), ((925, 997), 'bmtools.exact.variational.var_obj_and_grads_mean_field', 'var.var_obj_and_grads_mean_field', (['var_biases', 'relaxation.W', 'relaxation.b'], {}), '(var_biases, relaxation.W, relaxation.b)\n', (957, 997), True, 'import bmtools.exact.variational as var\n'), ((1068, 1098), 'numpy.array', 'np.array', (['grads_wrt_var_biases'], {}), '(grads_wrt_var_biases)\n', (1076, 1098), True, 'import numpy as np\n'), ((1556, 1578), 'numpy.array', 'np.array', (['var_obj_list'], {}), '(var_obj_list)\n', (1564, 1578), True, 'import numpy as np\n'), ((1893, 1907), 'numpy.outer', 'np.outer', (['m', 'm'], {}), '(m, m)\n', (1901, 1907), True, 'import numpy as np\n'), ((1458, 1481), 'numpy.array', 'np.array', (['var_first_mom'], {}), '(var_first_mom)\n', (1466, 1481), True, 'import numpy as np\n'), ((2089, 2106), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2095, 2106), True, 'import numpy as np\n'), ((2137, 2146), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2143, 2146), True, 'import numpy as np\n'), ((2218, 2240), 'numpy.array', 'np.array', (['var_obj_list'], {}), '(var_obj_list)\n', (2226, 2240), True, 'import numpy as np\n')] |
from typing import List
import numpy
import PIL.Image
import torch
import torchvision
def rand(min: float, max: float) -> float:
return numpy.random.rand() * (max - min) + min
class Compose(object):
"""
复合多种变换操作
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, boxes):
for transform in self.transforms:
image, boxes = transform(image, boxes)
return image, boxes
def get_transforms(config: dict, train: bool, enhancement: bool) -> Compose:
"""
:param train: 是否是训练集,训练集包含额外的数据增强变换,且训练集只返回标注框,验证集返回标注字典
:return:
"""
transforms = []
if train and enhancement:
transforms.append(ReformAndExtractBoxes())
# transforms.append(ScaleImageAndBoxes(config=config))
transforms.append(RandomScaleImageAndBoxes(config=config))
transforms.append(RandomTransformImage())
transforms.append(RandomFlipImageAndBoxes(config=config))
transforms.append(NormImageAndBoxes(config=config))
elif train:
transforms.append(ReformAndExtractBoxes())
transforms.append(ScaleImageAndBoxes(config=config))
transforms.append(NormImageAndBoxes(config=config))
else:
transforms.append(ScaleImage(config=config))
transforms.append(NormImage(config=config))
return Compose(transforms)
class ReformAndExtractBoxes(object):
"""
从标注数据中提取包围盒,并变换包围盒的格式
boxes (xmin, ymin, xmax, ymax, label) -> (x, y, w, h, label)
"""
def __call__(self, raw_image: PIL.Image.Image, truth_annotation: dict) -> (PIL.Image.Image, numpy.ndarray):
raw_boxes = []
for box in truth_annotation["boxes"]:
xmin, ymin, xmax, ymax, label = box
raw_x = (xmax + xmin) / 2
raw_y = (ymax + ymin) / 2
raw_w = xmax - xmin
raw_h = ymax - ymin
raw_boxes.append([raw_x, raw_y, raw_w, raw_h, label])
return raw_image, numpy.asarray(raw_boxes).astype(numpy.float32)
class ScaleImageAndBoxes(object):
"""
boxes 和 image 的 等比例放缩
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, raw_image: PIL.Image.Image, raw_boxes: numpy.ndarray) -> (PIL.Image.Image, numpy.ndarray):
# 1. 图像原始大小,图像放缩后大小
raw_width, raw_height = raw_image.size
scaled_width = self.config["image_width"]
scaled_height = self.config["image_height"]
# 2. 计算图像放缩倍数,取最小的那个放缩值
scale = min(scaled_width / raw_width, scaled_height / raw_height)
# 3. 等比例放缩后的图像大小
nw = int(raw_width * scale)
nh = int(raw_height * scale)
# 4. 图像等比例放缩
scaled_image = raw_image.resize((nw, nh), PIL.Image.BICUBIC)
# 5. 填补图像边缘
new_image = PIL.Image.new("RGB", (scaled_width, scaled_height), (128, 128, 128)) # 创建一张灰色底板作为返回的图像
new_image.paste(scaled_image, ((scaled_width - nw) // 2, (scaled_height - nh) // 2)) # 等比例放缩后的图像粘贴到底板中央
# 6. 变换 boxes
scaled_boxes = raw_boxes.copy()
scaled_boxes[:, 0:4] = raw_boxes[:, 0:4] * scale
scaled_boxes[:, 0] += (scaled_width - nw) // 2
scaled_boxes[:, 1] += (scaled_height - nh) // 2
return new_image, scaled_boxes
class RandomScaleImageAndBoxes(object):
"""
boxes 和 image 的 等随机比例放缩
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, raw_image: PIL.Image.Image, raw_boxes: numpy.ndarray) -> (PIL.Image.Image, numpy.ndarray):
# 1. 图像原始大小,图像放缩后大小
raw_width, raw_height = raw_image.size
scaled_width = self.config["image_width"]
scaled_height = self.config["image_height"]
# 2. 计算图像放缩倍数,取最小的那个放缩值
scale = min(scaled_width / raw_width, scaled_height / raw_height)
scale = rand(0.1, 1.0) * scale # 0.1 ~ 1.0 scale
# 3. 等比例放缩后的图像大小
nw = int(raw_width * scale)
nh = int(raw_height * scale)
# 4. 图像等比例放缩
scaled_image = raw_image.resize((nw, nh), PIL.Image.BICUBIC)
# 4.5 随机平移
dx = int(rand(0.0, scaled_width - nw))
dy = int(rand(0.0, scaled_height - nh))
# 5. 填补图像边缘
new_image = PIL.Image.new("RGB", (scaled_width, scaled_height), (128, 128, 128)) # 创建一张灰色底板作为返回的图像
new_image.paste(scaled_image, (dx, dy)) # 等比例放缩后的图像粘贴到底板中央
# 6. 变换 boxes
scaled_boxes = raw_boxes.copy()
scaled_boxes[:, 0:4] = raw_boxes[:, 0:4] * scale
scaled_boxes[:, 0] += dx
scaled_boxes[:, 1] += dy
return new_image, scaled_boxes
class RandomTransformImage(object):
"""
随机变换图片
"""
def __call__(self, scaled_image: PIL.Image.Image, scaled_boxes: numpy.ndarray) -> (PIL.Image.Image, numpy.ndarray):
new_image = scaled_image
if rand(0.0, 1.0) < 0.5:
new_image = torchvision.transforms.ColorJitter(
brightness=(1.0, 10.0), # 亮度的偏移幅度
# contrast=(1.0, 10.0), # 对比度偏移幅度
# saturation=(1.0, 10.0), # 饱和度偏移幅度
# hue=(0.2, 0.4), # 色相偏移幅度
)(scaled_image)
if rand(0.0, 1.0) < 0.5:
new_image = torchvision.transforms.ColorJitter(
# brightness=(1.0, 10.0), # 亮度的偏移幅度
contrast=(1.0, 10.0), # 对比度偏移幅度
# saturation=(1.0, 10.0), # 饱和度偏移幅度
# hue=(0.2, 0.4), # 色相偏移幅度
)(scaled_image)
if rand(0.0, 1.0) < 0.5:
new_image = torchvision.transforms.ColorJitter(
# brightness=(1.0, 10.0), # 亮度的偏移幅度
# contrast=(1.0, 10.0), # 对比度偏移幅度
saturation=(1.0, 10.0), # 饱和度偏移幅度
# hue=(0.2, 0.4), # 色相偏移幅度
)(scaled_image)
if rand(0.0, 1.0) < 0.01:
new_image = torchvision.transforms.Grayscale(num_output_channels=3)(new_image)
return new_image, scaled_boxes
class RandomFlipImageAndBoxes(object):
"""
随机翻转图片
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, scaled_image: PIL.Image.Image, scaled_boxes: numpy.ndarray) -> (PIL.Image.Image, numpy.ndarray):
new_image = scaled_image
if rand(0.0, 1.0) < 0.5:
new_image = torchvision.transforms.RandomHorizontalFlip(p=2)(new_image)
scaled_boxes[:, 0] = self.config["image_width"] - scaled_boxes[:, 0]
if rand(0.0, 1.0) < 0.5:
new_image = torchvision.transforms.RandomVerticalFlip(p=2)(new_image)
scaled_boxes[:, 1] = self.config["image_height"] - scaled_boxes[:, 1]
return new_image, scaled_boxes
class ScaleImage(object):
"""
boxes 的 等比例放缩
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, raw_image: PIL.Image.Image, truth_annotation: dict) -> (PIL.Image.Image, dict):
# 1. 图像原始大小,图像放缩后大小
raw_width, raw_height = raw_image.size
scaled_width = self.config["image_width"]
scaled_height = self.config["image_height"]
# 2. 计算图像放缩倍数,取最小的那个放缩值
scale = min(scaled_width / raw_width, scaled_height / raw_height)
# 3. 等比例放缩后的图像大小
nw = int(raw_width * scale)
nh = int(raw_height * scale)
# 4. 图像等比例放缩
scaled_image = raw_image.resize((nw, nh), PIL.Image.BICUBIC)
# 5. 填补图像边缘
new_image = PIL.Image.new("RGB", (scaled_width, scaled_height), (128, 128, 128)) # 创建一张灰色底板作为返回的图像
new_image.paste(scaled_image, ((scaled_width - nw) // 2, (scaled_height - nh) // 2)) # 等比例放缩后的图像粘贴到底板中央
return new_image, truth_annotation
class RescaleBoxes(object):
"""
boxes 等比例放缩(反向)
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, raw_image: PIL.Image.Image, scaled_boxes: numpy.ndarray) -> numpy.ndarray:
# 1. 图像原始大小,图像放缩后大小
raw_width, raw_height = raw_image.size
scaled_width = self.config["image_width"]
scaled_height = self.config["image_height"]
# 2. 计算图像放缩倍数,取最小的那个放缩值
scale = min(scaled_width / raw_width, scaled_height / raw_height)
# 3. 等比例放缩后的图像大小
nw = int(raw_width * scale)
nh = int(raw_height * scale)
# 4. 变换 boxes
rescaled_boxes = scaled_boxes.copy()
rescaled_boxes[:, 0] -= (scaled_width - nw) // 2
rescaled_boxes[:, 1] -= (scaled_height - nh) // 2
rescaled_boxes[:, 2] -= (scaled_width - nw) // 2
rescaled_boxes[:, 3] -= (scaled_height - nh) // 2
rescaled_boxes[:, 0:4] = rescaled_boxes[:, 0:4] / scale
rescaled_boxes = numpy.around(rescaled_boxes).astype(numpy.int)
return rescaled_boxes
class NormImageAndBoxes(object):
"""
boxes 和 image 的 归一化
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, scaled_image: PIL.Image.Image, scaled_boxes: numpy.ndarray) -> (
numpy.ndarray, numpy.ndarray):
# 1. 归一化 PIL.Image.Image,width * height * RGB -> channels(RGB) * height * width
norm_image = numpy.asarray(torchvision.transforms.ToTensor()(scaled_image))
# 2. 归一化 boxes
norm_boxes = scaled_boxes.copy()
norm_boxes[:, 0] /= self.config["image_width"]
norm_boxes[:, 1] /= self.config["image_height"]
norm_boxes[:, 2] /= self.config["image_width"]
norm_boxes[:, 3] /= self.config["image_height"]
return norm_image, norm_boxes
class NormImage(object):
"""
image 的 归一化
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, scaled_image: PIL.Image.Image, truth_annotation: dict) -> (numpy.ndarray, dict):
# 1. 归一化 PIL.Image.Image,width * height * RGB -> channels(RGB) * height * width
norm_image = numpy.asarray(torchvision.transforms.ToTensor()(scaled_image))
return norm_image, truth_annotation
class RenormAndReformBoxes(object):
"""
从训练集的训练数据中恢复包围盒,并变换包围盒的格式
box_num * (norm_x, norm_y, norm_w, norm_h, label) -> box_num * (xmin, ymin, xmax, ymax, label)
"""
def __init__(self, config: dict) -> None:
super().__init__()
self.config = config
def __call__(self, tensord_boxes: torch.Tensor) -> numpy.ndarray:
numpy_boxes = tensord_boxes.numpy().copy()
numpy_boxes[:, 0] *= self.config["image_width"]
numpy_boxes[:, 1] *= self.config["image_height"]
numpy_boxes[:, 2] *= self.config["image_width"]
numpy_boxes[:, 3] *= self.config["image_height"]
scaled_boxes = numpy_boxes.copy()
scaled_boxes[:, 0] = numpy_boxes[:, 0] - numpy_boxes[:, 2] / 2
scaled_boxes[:, 1] = numpy_boxes[:, 1] - numpy_boxes[:, 3] / 2
scaled_boxes[:, 2] = numpy_boxes[:, 0] + numpy_boxes[:, 2] / 2
scaled_boxes[:, 3] = numpy_boxes[:, 1] + numpy_boxes[:, 3] / 2
return numpy.around(scaled_boxes).astype(numpy.int)
def train_collate_fn(batch: List[tuple]) -> (torch.Tensor, torch.Tensor):
"""
数据集工具函数,对一个批次的数据进行解包后打包
:param batch:
:return:
"""
# print("1:", type(batch), batch) # batch 是一个返回值的数组:[(image, boxes), ……]
# print("2:", *batch) # *batch 将数组解包为:(image, boxes), ……
# print("3:", type(zip(*batch)), list(zip(*batch))) # zip 再次打包为:(image, ……) and (boxes, ……)
norm_images, norm_boxess = zip(*batch)
tensord_images = torch.as_tensor(norm_images)
tensord_boxes_list = [torch.as_tensor(norm_boxes) for norm_boxes in norm_boxess]
return tensord_images, tensord_boxes_list
def eval_collate_fn(batch: List[tuple]) -> (torch.Tensor, List[dict]):
"""
数据集工具函数,对一个批次的数据进行解包后打包
:param batch:
:return:
"""
# print("1:", type(batch), batch) # batch 是一个返回值的数组:[(image, boxes), ……]
# print("2:", *batch) # *batch 将数组解包为:(image, boxes), ……
# print("3:", type(zip(*batch)), list(zip(*batch))) # zip 再次打包为:(image, ……) and (boxes, ……)
norm_images, truth_annotations = zip(*batch)
tensord_images = torch.as_tensor(norm_images)
truth_annotation_list = list(truth_annotations)
return tensord_images, truth_annotation_list
| [
"torch.as_tensor",
"numpy.random.rand",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.Grayscale",
"numpy.asarray",
"torchvision.transforms.ColorJitter",
"torchvision.transforms.RandomVerticalFlip",
"numpy.around",
"torchvision.transforms.ToTensor"
] | [((11807, 11835), 'torch.as_tensor', 'torch.as_tensor', (['norm_images'], {}), '(norm_images)\n', (11822, 11835), False, 'import torch\n'), ((12511, 12539), 'torch.as_tensor', 'torch.as_tensor', (['norm_images'], {}), '(norm_images)\n', (12526, 12539), False, 'import torch\n'), ((11862, 11889), 'torch.as_tensor', 'torch.as_tensor', (['norm_boxes'], {}), '(norm_boxes)\n', (11877, 11889), False, 'import torch\n'), ((144, 163), 'numpy.random.rand', 'numpy.random.rand', ([], {}), '()\n', (161, 163), False, 'import numpy\n'), ((4974, 5032), 'torchvision.transforms.ColorJitter', 'torchvision.transforms.ColorJitter', ([], {'brightness': '(1.0, 10.0)'}), '(brightness=(1.0, 10.0))\n', (5008, 5032), False, 'import torchvision\n'), ((5294, 5350), 'torchvision.transforms.ColorJitter', 'torchvision.transforms.ColorJitter', ([], {'contrast': '(1.0, 10.0)'}), '(contrast=(1.0, 10.0))\n', (5328, 5350), False, 'import torchvision\n'), ((5614, 5672), 'torchvision.transforms.ColorJitter', 'torchvision.transforms.ColorJitter', ([], {'saturation': '(1.0, 10.0)'}), '(saturation=(1.0, 10.0))\n', (5648, 5672), False, 'import torchvision\n'), ((5935, 5990), 'torchvision.transforms.Grayscale', 'torchvision.transforms.Grayscale', ([], {'num_output_channels': '(3)'}), '(num_output_channels=3)\n', (5967, 5990), False, 'import torchvision\n'), ((6425, 6473), 'torchvision.transforms.RandomHorizontalFlip', 'torchvision.transforms.RandomHorizontalFlip', ([], {'p': '(2)'}), '(p=2)\n', (6468, 6473), False, 'import torchvision\n'), ((6624, 6670), 'torchvision.transforms.RandomVerticalFlip', 'torchvision.transforms.RandomVerticalFlip', ([], {'p': '(2)'}), '(p=2)\n', (6665, 6670), False, 'import torchvision\n'), ((8873, 8901), 'numpy.around', 'numpy.around', (['rescaled_boxes'], {}), '(rescaled_boxes)\n', (8885, 8901), False, 'import numpy\n'), ((9385, 9418), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (9416, 9418), False, 'import torchvision\n'), ((10150, 10183), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (10181, 10183), False, 'import torchvision\n'), ((11222, 11248), 'numpy.around', 'numpy.around', (['scaled_boxes'], {}), '(scaled_boxes)\n', (11234, 11248), False, 'import numpy\n'), ((1992, 2016), 'numpy.asarray', 'numpy.asarray', (['raw_boxes'], {}), '(raw_boxes)\n', (2005, 2016), False, 'import numpy\n')] |
# encoding: utf-8
from utils import audio
from hparams import hparams
import numpy as np
import os
import lws
def main():
data_foler = "data"
wavs = [os.path.join(data_foler, file[:-4]) for file in os.listdir(data_foler) if file.endswith(".wav")]
outputs_lws = [file + ".lws.gen.wav" for file in wavs]
wavs = [audio.load_wav(wav_path + ".wav", hparams.sample_rate) for wav_path in wavs]
lws_processor = lws.lws(512, 128, mode="speech") # 512: window length; 128: window shift
i = 0
for x in wavs:
X = lws_processor.stft(x) # where x is a single-channel waveform
X0 = np.abs(X) # Magnitude spectrogram
print('{:6}: {:5.2f} dB'.format('Abs(X)', lws_processor.get_consistency(X0)))
X1 = lws_processor.run_lws(
X0) # reconstruction from magnitude (in general, one can reconstruct from an initial complex spectrogram)
print(X1.shape)
print('{:6}: {:5.2f} dB'.format('LWS', lws_processor.get_consistency(X1)))
print(X1.shape)
wav = lws_processor.istft(X1).astype(np.float32)
audio.save_wav(wav, outputs_lws[i])
i += 1
if __name__ == '__main__':
main()
| [
"numpy.abs",
"os.listdir",
"os.path.join",
"lws.lws",
"utils.audio.save_wav",
"utils.audio.load_wav"
] | [((426, 458), 'lws.lws', 'lws.lws', (['(512)', '(128)'], {'mode': '"""speech"""'}), "(512, 128, mode='speech')\n", (433, 458), False, 'import lws\n'), ((160, 195), 'os.path.join', 'os.path.join', (['data_foler', 'file[:-4]'], {}), '(data_foler, file[:-4])\n', (172, 195), False, 'import os\n'), ((328, 382), 'utils.audio.load_wav', 'audio.load_wav', (["(wav_path + '.wav')", 'hparams.sample_rate'], {}), "(wav_path + '.wav', hparams.sample_rate)\n", (342, 382), False, 'from utils import audio\n'), ((616, 625), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (622, 625), True, 'import numpy as np\n'), ((1089, 1124), 'utils.audio.save_wav', 'audio.save_wav', (['wav', 'outputs_lws[i]'], {}), '(wav, outputs_lws[i])\n', (1103, 1124), False, 'from utils import audio\n'), ((208, 230), 'os.listdir', 'os.listdir', (['data_foler'], {}), '(data_foler)\n', (218, 230), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# @Time : 2020/7/28 17:17
# @Author : sen
import numpy as np
import pylab as pl
from scipy import signal
from numpy import fft
def fourierExtrapolation(x, n_predict):
n = len(x)
n_harm = 50 # number of harmonics in model
t = np.arange(0, n)
p = np.polyfit(t, x, 1) # find linear trend in x
x_notrend = x - p[0] * t # detrended x
x_freqdom = fft.fft(x_notrend) # detrended x in frequency domain
f = fft.fftfreq(n) # frequencies
indexes = list(range(n))
# sort indexes by frequency, lower -> higher
indexes.sort(key = lambda i: np.absolute(f[i]))
t = np.arange(0, n + n_predict)
restored_sig = np.zeros(t.size)
for i in indexes[:1 + n_harm * 2]:
ampli = np.absolute(x_freqdom[i]) / n # amplitude
phase = np.angle(x_freqdom[i]) # phase
restored_sig += ampli * np.cos(2 * np.pi * f[i] * t + phase)
return ((restored_sig + p[0] * t),x_freqdom/100)
def main():
t = np.linspace(0, 1, 500)
#y = list(signal.sawtooth((2 * np.pi * 5 * t ),0.5))
y = list(np.sin(2 * np.pi * 5 * t))
n_predict = 1000
extrapolation, frequency = fourierExtrapolation(y, n_predict)
pl.plot(np.arange(0, 500), y, 'r', label = 'triangle')
pl.plot(np.arange(0, 500), frequency, 'g', label = 'frequency')
pl.plot(np.arange(0, extrapolation.size), extrapolation, 'b', label = 'extrapolation')
pl.legend()
pl.show()
if __name__ == "__main__":
main()
| [
"numpy.polyfit",
"numpy.fft.fftfreq",
"numpy.fft.fft",
"numpy.absolute",
"pylab.legend",
"numpy.angle",
"numpy.zeros",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"numpy.arange",
"pylab.show"
] | [((289, 304), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (298, 304), True, 'import numpy as np\n'), ((313, 332), 'numpy.polyfit', 'np.polyfit', (['t', 'x', '(1)'], {}), '(t, x, 1)\n', (323, 332), True, 'import numpy as np\n'), ((435, 453), 'numpy.fft.fft', 'fft.fft', (['x_notrend'], {}), '(x_notrend)\n', (442, 453), False, 'from numpy import fft\n'), ((499, 513), 'numpy.fft.fftfreq', 'fft.fftfreq', (['n'], {}), '(n)\n', (510, 513), False, 'from numpy import fft\n'), ((681, 708), 'numpy.arange', 'np.arange', (['(0)', '(n + n_predict)'], {}), '(0, n + n_predict)\n', (690, 708), True, 'import numpy as np\n'), ((728, 744), 'numpy.zeros', 'np.zeros', (['t.size'], {}), '(t.size)\n', (736, 744), True, 'import numpy as np\n'), ((1047, 1069), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(500)'], {}), '(0, 1, 500)\n', (1058, 1069), True, 'import numpy as np\n'), ((1476, 1487), 'pylab.legend', 'pl.legend', ([], {}), '()\n', (1485, 1487), True, 'import pylab as pl\n'), ((1492, 1501), 'pylab.show', 'pl.show', ([], {}), '()\n', (1499, 1501), True, 'import pylab as pl\n'), ((861, 883), 'numpy.angle', 'np.angle', (['x_freqdom[i]'], {}), '(x_freqdom[i])\n', (869, 883), True, 'import numpy as np\n'), ((1140, 1165), 'numpy.sin', 'np.sin', (['(2 * np.pi * 5 * t)'], {}), '(2 * np.pi * 5 * t)\n', (1146, 1165), True, 'import numpy as np\n'), ((1266, 1283), 'numpy.arange', 'np.arange', (['(0)', '(500)'], {}), '(0, 500)\n', (1275, 1283), True, 'import numpy as np\n'), ((1325, 1342), 'numpy.arange', 'np.arange', (['(0)', '(500)'], {}), '(0, 500)\n', (1334, 1342), True, 'import numpy as np\n'), ((1393, 1425), 'numpy.arange', 'np.arange', (['(0)', 'extrapolation.size'], {}), '(0, extrapolation.size)\n', (1402, 1425), True, 'import numpy as np\n'), ((800, 825), 'numpy.absolute', 'np.absolute', (['x_freqdom[i]'], {}), '(x_freqdom[i])\n', (811, 825), True, 'import numpy as np\n'), ((936, 972), 'numpy.cos', 'np.cos', (['(2 * np.pi * f[i] * t + phase)'], {}), '(2 * np.pi * f[i] * t + phase)\n', (942, 972), True, 'import numpy as np\n'), ((654, 671), 'numpy.absolute', 'np.absolute', (['f[i]'], {}), '(f[i])\n', (665, 671), True, 'import numpy as np\n')] |
import numpy as np
def dice_numpy(targets, outputs, threshold=None, min_area=None,
empty_one: bool = True, eps=1e-6):
if threshold is not None:
# noinspection PyUnresolvedReferences
outputs = (outputs >= threshold).astype(np.uint8)
targets_sum = targets.sum()
outputs_sum = outputs.sum()
if min_area and outputs_sum < min_area:
outputs = np.zeros(outputs.shape, dtype=np.uint8)
outputs_sum = 0
if empty_one and targets_sum == 0 and outputs_sum == 0:
return 1
intersection = (targets * outputs).sum()
union = targets_sum + outputs_sum
dice = 2 * intersection / (union + eps)
return dice
| [
"numpy.zeros"
] | [((397, 436), 'numpy.zeros', 'np.zeros', (['outputs.shape'], {'dtype': 'np.uint8'}), '(outputs.shape, dtype=np.uint8)\n', (405, 436), True, 'import numpy as np\n')] |
import sys
sys.path.append('..')
import torch
import torch.nn as nn
import numpy as np
from dataclasses import dataclass
from trphysx.transformer import PhysformerGPT2
@dataclass
class PhooConfig:
n_ctx:int = 16
n_embd:int = 16
n_layer:int = 2
n_head:int = 2
activation_function:str = "gelu_new"
resid_pdrop:float = 0.0
embd_pdrop:float = 0.0
attn_pdrop:float = 0.0
layer_norm_epsilon:float = 1e-5
initializer_range:float = 0.1
output_hidden_states:bool = False
output_attentions:bool = True
use_cache:bool = False
model_type:str = "Phoo"
if __name__ == "__main__":
# === GPT2 Tests ===
config = PhooConfig()
model = PhysformerGPT2(config)
# === Forward test ===
batch_size = np.random.randint(1, 10)
n_steps = np.random.randint(1, config.n_ctx)
x = torch.randn(batch_size, n_steps, config.n_embd) # Batch, time-steps, embed
output = model(x, use_cache=False, output_attentions=True)
# Test output tensor size is correct
assert output[0].size() == torch.Size((batch_size, n_steps, config.n_ctx))
# Test attention matrix sizes
assert type(output[1]) == tuple
assert len(output[1]) == config.n_layer
for i in range(config.n_layer):
assert output[1][i].size() == torch.Size((batch_size, config.n_head, n_steps, n_steps))
# Make sure attention scores at each step are summing up to 1 (approx.)
assert (torch.abs(torch.mean(1.0 - torch.sum(output[1][i], dim=-1))) < 1e-6).item()
# Test generation
n_steps = np.random.randint(config.n_ctx, 2*config.n_ctx)
inputs_embeds = torch.randn(batch_size, 1, config.n_embd)
output = model.generate(inputs_embeds=inputs_embeds, max_length=n_steps, use_cache=False)
assert output[0].size() == torch.Size((batch_size, n_steps, config.n_embd))
| [
"trphysx.transformer.PhysformerGPT2",
"numpy.random.randint",
"torch.sum",
"sys.path.append",
"torch.Size",
"torch.randn"
] | [((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((688, 710), 'trphysx.transformer.PhysformerGPT2', 'PhysformerGPT2', (['config'], {}), '(config)\n', (702, 710), False, 'from trphysx.transformer import PhysformerGPT2\n'), ((756, 780), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (773, 780), True, 'import numpy as np\n'), ((795, 829), 'numpy.random.randint', 'np.random.randint', (['(1)', 'config.n_ctx'], {}), '(1, config.n_ctx)\n', (812, 829), True, 'import numpy as np\n'), ((838, 885), 'torch.randn', 'torch.randn', (['batch_size', 'n_steps', 'config.n_embd'], {}), '(batch_size, n_steps, config.n_embd)\n', (849, 885), False, 'import torch\n'), ((1553, 1602), 'numpy.random.randint', 'np.random.randint', (['config.n_ctx', '(2 * config.n_ctx)'], {}), '(config.n_ctx, 2 * config.n_ctx)\n', (1570, 1602), True, 'import numpy as np\n'), ((1621, 1662), 'torch.randn', 'torch.randn', (['batch_size', '(1)', 'config.n_embd'], {}), '(batch_size, 1, config.n_embd)\n', (1632, 1662), False, 'import torch\n'), ((1049, 1096), 'torch.Size', 'torch.Size', (['(batch_size, n_steps, config.n_ctx)'], {}), '((batch_size, n_steps, config.n_ctx))\n', (1059, 1096), False, 'import torch\n'), ((1789, 1837), 'torch.Size', 'torch.Size', (['(batch_size, n_steps, config.n_embd)'], {}), '((batch_size, n_steps, config.n_embd))\n', (1799, 1837), False, 'import torch\n'), ((1285, 1342), 'torch.Size', 'torch.Size', (['(batch_size, config.n_head, n_steps, n_steps)'], {}), '((batch_size, config.n_head, n_steps, n_steps))\n', (1295, 1342), False, 'import torch\n'), ((1466, 1497), 'torch.sum', 'torch.sum', (['output[1][i]'], {'dim': '(-1)'}), '(output[1][i], dim=-1)\n', (1475, 1497), False, 'import torch\n')] |
import numpy as np
class MLText():
def __init__(self, text, vocab=[]):
"""
:param text: Takes text as input.
:param vocab: Takes vocab as input.
"""
self.text = text
if vocab != []:
self.vocab = vocab
else:
self.vocab = []
def onehotencode(self, spliter = "/n"):
"""
:param spliter: What to split text by.
:return onehotencoded: Return one hot encoded text.
"""
if isinstance(self.text, str):
if self.vocab == []:
vocab = list(set(list(self.text)))
self.vocab = vocab
else:
vocab = self.vocab
list_text = list(map(list, self.text.split(spliter)))
onehotencoded = []
for line in list_text:
onehotlist = []
for i in line:
zeros = np.zeros(len(vocab))
zeros[vocab.index(i)] = 1
onehotlist.append(zeros)
onehotencoded.append(np.asarray(onehotlist))
onehotencoded = np.asarray(onehotencoded)
return onehotencoded
else:
raise ValueError("Onehotencode only takes a string.")
def onehotdecode(self, encodedvalue):
text = ""
for i in encodedvalue[0]:
index = np.where(i==1)[0][0]
text = text + self.vocab[index]
return text
def loadtextfiles(path):
f = open(path, "r")
return MLText(f.read()) | [
"numpy.where",
"numpy.asarray"
] | [((1119, 1144), 'numpy.asarray', 'np.asarray', (['onehotencoded'], {}), '(onehotencoded)\n', (1129, 1144), True, 'import numpy as np\n'), ((1067, 1089), 'numpy.asarray', 'np.asarray', (['onehotlist'], {}), '(onehotlist)\n', (1077, 1089), True, 'import numpy as np\n'), ((1373, 1389), 'numpy.where', 'np.where', (['(i == 1)'], {}), '(i == 1)\n', (1381, 1389), True, 'import numpy as np\n')] |
import numpy as np
import open3d as o3d
import os
import trimesh
from bricks_modeling.bricks.bricktemplate import BrickTemplate
from bricks_modeling.connections.connpoint import CPoint
from bricks_modeling.connections.conn_type import compute_conn_type
from bricks_modeling.database.ldraw_colors import color_phraser
import util.geometry_util as geo_util
import itertools as iter
import json
from util.geometry_util import get_random_transformation
from bricks_modeling.file_IO.util import to_ldr_format
from bricks_modeling import config
import util.cuboid_collision as cuboid_col
# return a list of bbox corners
def get_corner_pos(brick, four_point=False):
bbox_ls = brick.get_col_bbox()
cub_corner = []
if four_point:
corner_transform = np.array([[1, 1, 1], [1, 1, -1], [-1, 1, -1], [-1, 1, 1]])
else:
corner_transform = np.array([[1, 1, 1], [-1, -1, -1]])
for bbox in bbox_ls:
cuboid_center = np.array([bbox["Dimension"][0] / 2, bbox["Dimension"][1] / 2, bbox["Dimension"][2] / 2])
if four_point:
cuboid_corner_relative = (np.tile(cuboid_center, (4, 1))) * corner_transform
else:
cuboid_corner_relative = (np.tile(cuboid_center, (2, 1))) * corner_transform
cub_corners_pos = np.array(bbox["Rotation"] @ cuboid_corner_relative.transpose()).transpose() + np.array(
bbox["Origin"])
cub_corner.append(cub_corners_pos[0])
cub_corner.append(cub_corners_pos[1])
if four_point:
cub_corner.append(cub_corners_pos[2])
cub_corner.append(cub_corners_pos[3])
return cub_corner
class BrickInstance:
def __init__(self, template: BrickTemplate, trans_matrix, color=15):
self.template = template
self.trans_matrix = trans_matrix
self.color = color
def get_col_bbox(self):
bbox = []
brick_id = self.template.id
brick_rot = self.get_rotation()
brick_trans = self.get_translation()
if os.path.exists(os.path.join(config.col_folder, f"{brick_id}.col")):
for line in open(os.path.join(config.col_folder, f"{brick_id}.col")):
line = (line.split(" "))[:17]
line = [float(x) for x in line]
init_orient = (np.array(line[2:11])).reshape((3, 3))
init_origin = np.array(line[11:14])
init_dim = init_orient @ np.array(line[14:17]) # in (x,y,z) format
origin = brick_rot @ init_origin + brick_trans
rotation = brick_rot @ init_orient
dim = init_dim * 2 + 1 # why plus 1?
bbox.append({"Origin": origin, "Rotation": rotation, "Dimension": dim})
return bbox
""" 读取bounding box """
else:
return []
def get_brick_bbox(self):
corner_pos = np.array(get_corner_pos(self))
max_x = np.amax(corner_pos[:, 0])
min_x = np.amin(corner_pos[:, 0])
max_y = np.amax(corner_pos[:, 1])
min_y = np.amin(corner_pos[:, 1])
max_z = np.amax(corner_pos[:, 2])
min_z = np.amin(corner_pos[:, 2])
origin = [(max_x + min_x) / 2, (max_y + min_y) / 2, (max_z + min_z) / 2]
dim = [max_x - min_x, max_y - min_y, max_z - min_z]
return {"Origin": origin, "Rotation": np.identity(3), "Dimension": dim}
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, BrickInstance) and self.template.id == other.template.id:
if (
np.max(self.trans_matrix - other.trans_matrix)
- np.min(self.trans_matrix - other.trans_matrix)
< 1e-6
): # transformation matrix the same
return True
else:
self_c_points = self.get_current_conn_points()
other_c_points = other.get_current_conn_points()
for i in range(len(self_c_points)):
if self_c_points[i] not in other_c_points: # cpoint is not the same
return False
if len(self_c_points) == 1:
return False
return True
else:
return False
def connect(self, other):
for p_self, p_other in iter.product(self.get_current_conn_points(), other.get_current_conn_points()):
if not compute_conn_type(p_self, p_other) == None:
return True
return False
def collide(self, other):
self_brick_bbox = self.get_brick_bbox()
other_brick_bbox = other.get_brick_bbox()
if not cuboid_col.cub_collision_detect(self_brick_bbox, other_brick_bbox):
return False
self_cp_bbox = self.get_col_bbox()
other_cp_bbox = other.get_col_bbox()
for bb1, bb2 in iter.product(self_cp_bbox, other_cp_bbox):
if cuboid_col.cub_collision_detect(bb1, bb2):
return True
return False
def to_ldraw(self):
return to_ldr_format(self.color, self.trans_matrix, f"{self.template.id}.dat")
def rotate(self, rot_mat):
self.trans_matrix[:3, :3] = np.dot(rot_mat, self.trans_matrix[:3, :3])
def translate(self, trans_vec):
self.trans_matrix[:3, 3:4] = self.trans_matrix[:3, 3:4] + np.reshape(
trans_vec, (3, 1)
)
def get_rotation(self):
return self.trans_matrix[:3, :3]
def get_translation(self):
return self.trans_matrix[:3, 3]
def reset_transformation(self):
self.trans_matrix = np.identity(4, dtype=float)
def get_translation_for_mesh(self):
return self.trans_matrix[:3, 3] / 2.5
def get_current_conn_points(self):
conn_points = []
for cp in self.template.c_points:
conn_point_orient = geo_util.vec_local2world(
self.trans_matrix[:3, :3], cp.orient
)
conn_point_bi_orient = geo_util.vec_local2world(
self.trans_matrix[:3, :3], cp.bi_orient
)
conn_point_position = geo_util.point_local2world(
self.trans_matrix[:3, :3], self.trans_matrix[:3, 3], cp.pos
)
conn_points.append(
CPoint(
conn_point_position,
conn_point_orient,
conn_point_bi_orient,
cp.type,
)
)
return conn_points
def get_mesh(self):
color_dict = color_phraser()
obj_file_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "database", "obj",
f'{self.template.id + ".obj"}')
mesh = o3d.io.read_triangle_mesh(
obj_file_path
)
mesh.compute_vertex_normals()
if str(self.color) in color_dict.keys():
mesh.paint_uniform_color(color_dict[str(self.color)])
elif not str(self.color).isdigit(): # color stored in hex
rgb_color = trimesh.visual.color.hex_to_rgba(self.color[3:])
mesh.paint_uniform_color(list(map(lambda comp: comp / 255, rgb_color[:3])))
else:
print("warning, no such color in ldview, print red")
mesh.paint_uniform_color([1, 0, 0])
mesh.scale(25, center=(0, 0, 0))
mesh.rotate(self.get_rotation().tolist(), [0, 0, 0])
mesh.translate([i for i in self.get_translation().tolist()])
return mesh
if __name__ == "__main__":
from bricks_modeling.file_IO.model_reader import read_bricks_from_file
from bricks_modeling.file_IO.model_writer import write_bricks_to_file
from bricks_modeling.connectivity_graph import ConnectivityGraph
bricks = read_bricks_from_file("")
for i in range(len(bricks)):
for j in range(len(bricks)):
if not i == j and i > j:
collide = bricks[i].collide(bricks[j])
connect = bricks[i].connect(bricks[j]) and (not collide)
print(f"{i}=={j}: ", bricks[i] == bricks[j])
print(f"{i} collide with {j}: ", collide, "\n")
print(f"{i} connect with {j}: ", connect, "\n")
| [
"bricks_modeling.connections.connpoint.CPoint",
"util.geometry_util.point_local2world",
"numpy.array",
"open3d.io.read_triangle_mesh",
"numpy.reshape",
"itertools.product",
"numpy.max",
"util.cuboid_collision.cub_collision_detect",
"numpy.dot",
"numpy.min",
"util.geometry_util.vec_local2world",
... | [((7756, 7781), 'bricks_modeling.file_IO.model_reader.read_bricks_from_file', 'read_bricks_from_file', (['""""""'], {}), "('')\n", (7777, 7781), False, 'from bricks_modeling.file_IO.model_reader import read_bricks_from_file\n'), ((762, 820), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 1, -1], [-1, 1, -1], [-1, 1, 1]]'], {}), '([[1, 1, 1], [1, 1, -1], [-1, 1, -1], [-1, 1, 1]])\n', (770, 820), True, 'import numpy as np\n'), ((858, 893), 'numpy.array', 'np.array', (['[[1, 1, 1], [-1, -1, -1]]'], {}), '([[1, 1, 1], [-1, -1, -1]])\n', (866, 893), True, 'import numpy as np\n'), ((943, 1036), 'numpy.array', 'np.array', (["[bbox['Dimension'][0] / 2, bbox['Dimension'][1] / 2, bbox['Dimension'][2] / 2]"], {}), "([bbox['Dimension'][0] / 2, bbox['Dimension'][1] / 2, bbox[\n 'Dimension'][2] / 2])\n", (951, 1036), True, 'import numpy as np\n'), ((2902, 2927), 'numpy.amax', 'np.amax', (['corner_pos[:, 0]'], {}), '(corner_pos[:, 0])\n', (2909, 2927), True, 'import numpy as np\n'), ((2944, 2969), 'numpy.amin', 'np.amin', (['corner_pos[:, 0]'], {}), '(corner_pos[:, 0])\n', (2951, 2969), True, 'import numpy as np\n'), ((2986, 3011), 'numpy.amax', 'np.amax', (['corner_pos[:, 1]'], {}), '(corner_pos[:, 1])\n', (2993, 3011), True, 'import numpy as np\n'), ((3028, 3053), 'numpy.amin', 'np.amin', (['corner_pos[:, 1]'], {}), '(corner_pos[:, 1])\n', (3035, 3053), True, 'import numpy as np\n'), ((3070, 3095), 'numpy.amax', 'np.amax', (['corner_pos[:, 2]'], {}), '(corner_pos[:, 2])\n', (3077, 3095), True, 'import numpy as np\n'), ((3112, 3137), 'numpy.amin', 'np.amin', (['corner_pos[:, 2]'], {}), '(corner_pos[:, 2])\n', (3119, 3137), True, 'import numpy as np\n'), ((4855, 4896), 'itertools.product', 'iter.product', (['self_cp_bbox', 'other_cp_bbox'], {}), '(self_cp_bbox, other_cp_bbox)\n', (4867, 4896), True, 'import itertools as iter\n'), ((5045, 5116), 'bricks_modeling.file_IO.util.to_ldr_format', 'to_ldr_format', (['self.color', 'self.trans_matrix', 'f"""{self.template.id}.dat"""'], {}), "(self.color, self.trans_matrix, f'{self.template.id}.dat')\n", (5058, 5116), False, 'from bricks_modeling.file_IO.util import to_ldr_format\n'), ((5185, 5227), 'numpy.dot', 'np.dot', (['rot_mat', 'self.trans_matrix[:3, :3]'], {}), '(rot_mat, self.trans_matrix[:3, :3])\n', (5191, 5227), True, 'import numpy as np\n'), ((5590, 5617), 'numpy.identity', 'np.identity', (['(4)'], {'dtype': 'float'}), '(4, dtype=float)\n', (5601, 5617), True, 'import numpy as np\n'), ((6533, 6548), 'bricks_modeling.database.ldraw_colors.color_phraser', 'color_phraser', ([], {}), '()\n', (6546, 6548), False, 'from bricks_modeling.database.ldraw_colors import color_phraser\n'), ((6733, 6773), 'open3d.io.read_triangle_mesh', 'o3d.io.read_triangle_mesh', (['obj_file_path'], {}), '(obj_file_path)\n', (6758, 6773), True, 'import open3d as o3d\n'), ((1351, 1375), 'numpy.array', 'np.array', (["bbox['Origin']"], {}), "(bbox['Origin'])\n", (1359, 1375), True, 'import numpy as np\n'), ((2017, 2067), 'os.path.join', 'os.path.join', (['config.col_folder', 'f"""{brick_id}.col"""'], {}), "(config.col_folder, f'{brick_id}.col')\n", (2029, 2067), False, 'import os\n'), ((3325, 3339), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (3336, 3339), True, 'import numpy as np\n'), ((4649, 4715), 'util.cuboid_collision.cub_collision_detect', 'cuboid_col.cub_collision_detect', (['self_brick_bbox', 'other_brick_bbox'], {}), '(self_brick_bbox, other_brick_bbox)\n', (4680, 4715), True, 'import util.cuboid_collision as cuboid_col\n'), ((4913, 4954), 'util.cuboid_collision.cub_collision_detect', 'cuboid_col.cub_collision_detect', (['bb1', 'bb2'], {}), '(bb1, bb2)\n', (4944, 4954), True, 'import util.cuboid_collision as cuboid_col\n'), ((5331, 5360), 'numpy.reshape', 'np.reshape', (['trans_vec', '(3, 1)'], {}), '(trans_vec, (3, 1))\n', (5341, 5360), True, 'import numpy as np\n'), ((5845, 5907), 'util.geometry_util.vec_local2world', 'geo_util.vec_local2world', (['self.trans_matrix[:3, :3]', 'cp.orient'], {}), '(self.trans_matrix[:3, :3], cp.orient)\n', (5869, 5907), True, 'import util.geometry_util as geo_util\n'), ((5973, 6038), 'util.geometry_util.vec_local2world', 'geo_util.vec_local2world', (['self.trans_matrix[:3, :3]', 'cp.bi_orient'], {}), '(self.trans_matrix[:3, :3], cp.bi_orient)\n', (5997, 6038), True, 'import util.geometry_util as geo_util\n'), ((6103, 6194), 'util.geometry_util.point_local2world', 'geo_util.point_local2world', (['self.trans_matrix[:3, :3]', 'self.trans_matrix[:3, 3]', 'cp.pos'], {}), '(self.trans_matrix[:3, :3], self.trans_matrix[:3,\n 3], cp.pos)\n', (6129, 6194), True, 'import util.geometry_util as geo_util\n'), ((1093, 1123), 'numpy.tile', 'np.tile', (['cuboid_center', '(4, 1)'], {}), '(cuboid_center, (4, 1))\n', (1100, 1123), True, 'import numpy as np\n'), ((1196, 1226), 'numpy.tile', 'np.tile', (['cuboid_center', '(2, 1)'], {}), '(cuboid_center, (2, 1))\n', (1203, 1226), True, 'import numpy as np\n'), ((2099, 2149), 'os.path.join', 'os.path.join', (['config.col_folder', 'f"""{brick_id}.col"""'], {}), "(config.col_folder, f'{brick_id}.col')\n", (2111, 2149), False, 'import os\n'), ((2345, 2366), 'numpy.array', 'np.array', (['line[11:14]'], {}), '(line[11:14])\n', (2353, 2366), True, 'import numpy as np\n'), ((6269, 6346), 'bricks_modeling.connections.connpoint.CPoint', 'CPoint', (['conn_point_position', 'conn_point_orient', 'conn_point_bi_orient', 'cp.type'], {}), '(conn_point_position, conn_point_orient, conn_point_bi_orient, cp.type)\n', (6275, 6346), False, 'from bricks_modeling.connections.connpoint import CPoint\n'), ((6602, 6627), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (6617, 6627), False, 'import os\n'), ((7040, 7088), 'trimesh.visual.color.hex_to_rgba', 'trimesh.visual.color.hex_to_rgba', (['self.color[3:]'], {}), '(self.color[3:])\n', (7072, 7088), False, 'import trimesh\n'), ((2408, 2429), 'numpy.array', 'np.array', (['line[14:17]'], {}), '(line[14:17])\n', (2416, 2429), True, 'import numpy as np\n'), ((3564, 3610), 'numpy.max', 'np.max', (['(self.trans_matrix - other.trans_matrix)'], {}), '(self.trans_matrix - other.trans_matrix)\n', (3570, 3610), True, 'import numpy as np\n'), ((3633, 3679), 'numpy.min', 'np.min', (['(self.trans_matrix - other.trans_matrix)'], {}), '(self.trans_matrix - other.trans_matrix)\n', (3639, 3679), True, 'import numpy as np\n'), ((4412, 4446), 'bricks_modeling.connections.conn_type.compute_conn_type', 'compute_conn_type', (['p_self', 'p_other'], {}), '(p_self, p_other)\n', (4429, 4446), False, 'from bricks_modeling.connections.conn_type import compute_conn_type\n'), ((2277, 2297), 'numpy.array', 'np.array', (['line[2:11]'], {}), '(line[2:11])\n', (2285, 2297), True, 'import numpy as np\n')] |
import os
import numpy as np
import pandas as pd
"""
for class_archivo in archivos:
f = open(os.path.abspath(os.path.join(path,class_archivo)),'r')
lineas = f.read().splitlines()
#print(lineas,"\n")
f.close()
"""
def leer_predicciones(archivos):
lista_archivos = []
for file_archivos in archivos:
data = pd.read_csv(os.path.abspath(os.path.join(path,file_archivos)), sep=" ", header=None)
data.columns = ["imagen", "prob", "xmin_pred", "ymin_pred", "xmax_pred", "ymax_pred"]
lista_archivos.append(data)
return lista_archivos
def read_data_cfg(datacfg):
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(datacfg, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '':
continue
key,value = line.split('=')
key = key.strip()
value = value.strip()
options[key] = value
return options
def leer_target_cord(lineas_archivo_valid):
target = []
for linea in lineas_archivo_valid:
linea = linea.replace("imagenes","labels").replace(".jpg",".txt")
nombre = os.path.basename(linea).split('.')[0]
data_nombre = pd.DataFrame(columns=['imagen'])
data_nombre.loc[0] = [nombre]
data = pd.read_csv(linea, sep=" ", header=None)
data = pd.concat([data_nombre,data],axis=1, ignore_index=True)
data.columns = ["imagen","class", "xmin", "ymin", "xmax", "ymax"]
target.append(data)
return pd.concat(target)
def IOU(df):
copy = df.copy()
df_iou = pd.DataFrame(columns=['imagen','iou'])
idx = 0
for index, row in df.iterrows():
xmin_inter = max(row["xmin"], row["xmin_pred"])
ymin_inter = max(row["ymin"], row["ymin_pred"])
xmax_inter = min(row["xmax"], row["xmax_pred"])
ymax_inter = min(row["ymax"], row["ymax_pred"])
# Calculo de area de intersecion de rectangulos
inter_area = max(0, xmax_inter - xmin_inter + 1) * max(0, ymax_inter - ymin_inter + 1)
# Calculo de area objetivo y area de prediccion
actual_area = (row["xmax"] - row["xmin"] + 1) * (row["ymax"]- row["ymin"] + 1)
pred_area = (row["xmax_pred"] - row["xmin_pred"] + 1) * (row["ymax_pred"] - row["ymin_pred"] + 1)
# Calculo interseccion sobre union
iou = inter_area / float(actual_area + pred_area - inter_area)
df_iou.loc[idx] = [row["imagen"], iou]
idx+=1
merge = pd.merge(df, df_iou, on='imagen')
return merge
def precision_recall(iou):
Precision = []
Recall = []
TP = FP = 0
FN = len(iou[iou['TP/FP']== 'TP'])
for index , row in iou.iterrows():
if row['iou'] > 0.5:
TP =TP+1
else:
FP =FP+1
try:
AP = TP/(TP+FP)
Rec = TP/(TP+FN)
except ZeroDivisionError:
AP = Recall = 0.0
Precision.append(AP)
Recall.append(Rec)
iou['Precision'] = Precision
iou['Recall'] = Recall
return iou
def Map(iou):
prec_at_rec = []
for recall_level in np.linspace(0.0, 1.0, 11):
try:
x = iou[iou['Recall'] >= recall_level]['Precision']
prec = max(x)
except:
prec = 0.0
print("AP para Recall:",recall_level," ",prec)
prec_at_rec.append(prec)
avg_prec = np.mean(prec_at_rec)
#print('11 point precision is ', prec_at_rec)
print('mAP is ', avg_prec)
if __name__ == "__main__":
path = "./results"
datacfg = "./cfg/camion.data"
archivos_pred = os.listdir(path)
lista_archivos = leer_predicciones(archivos_pred)
predicciones = pd.concat(lista_archivos)
options = read_data_cfg(datacfg)
path_archivo_valid = options['valid']
f = open(path_archivo_valid, "r")
lineas_archivo_valid = f.read().splitlines()
cord_target = leer_target_cord(lineas_archivo_valid)
union = pd.merge(predicciones, cord_target, on='imagen')
iou=IOU(union)
iou = iou.drop(["xmin", "ymin", "xmax", "ymax","xmin_pred", "ymin_pred", "xmax_pred", "ymax_pred"], axis=1)
eval_table = pd.DataFrame()
iou['TP/FP'] = iou['iou'].apply(lambda x: 'TP' if x>=0.5 else 'FP')
iou = precision_recall(iou)
iou['IP'] = iou.groupby('Recall')['Precision'].transform('max')
Map(iou)
iou.to_excel("Resultado_Test.xlsx")
| [
"numpy.mean",
"os.listdir",
"pandas.read_csv",
"pandas.merge",
"os.path.join",
"numpy.linspace",
"os.path.basename",
"pandas.DataFrame",
"pandas.concat"
] | [((1558, 1575), 'pandas.concat', 'pd.concat', (['target'], {}), '(target)\n', (1567, 1575), True, 'import pandas as pd\n'), ((1625, 1664), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['imagen', 'iou']"}), "(columns=['imagen', 'iou'])\n", (1637, 1664), True, 'import pandas as pd\n'), ((2529, 2562), 'pandas.merge', 'pd.merge', (['df', 'df_iou'], {'on': '"""imagen"""'}), "(df, df_iou, on='imagen')\n", (2537, 2562), True, 'import pandas as pd\n'), ((3162, 3187), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(11)'], {}), '(0.0, 1.0, 11)\n', (3173, 3187), True, 'import numpy as np\n'), ((3434, 3454), 'numpy.mean', 'np.mean', (['prec_at_rec'], {}), '(prec_at_rec)\n', (3441, 3454), True, 'import numpy as np\n'), ((3641, 3657), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3651, 3657), False, 'import os\n'), ((3731, 3756), 'pandas.concat', 'pd.concat', (['lista_archivos'], {}), '(lista_archivos)\n', (3740, 3756), True, 'import pandas as pd\n'), ((3999, 4047), 'pandas.merge', 'pd.merge', (['predicciones', 'cord_target'], {'on': '"""imagen"""'}), "(predicciones, cord_target, on='imagen')\n", (4007, 4047), True, 'import pandas as pd\n'), ((4197, 4211), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4209, 4211), True, 'import pandas as pd\n'), ((1247, 1279), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['imagen']"}), "(columns=['imagen'])\n", (1259, 1279), True, 'import pandas as pd\n'), ((1333, 1373), 'pandas.read_csv', 'pd.read_csv', (['linea'], {'sep': '""" """', 'header': 'None'}), "(linea, sep=' ', header=None)\n", (1344, 1373), True, 'import pandas as pd\n'), ((1389, 1446), 'pandas.concat', 'pd.concat', (['[data_nombre, data]'], {'axis': '(1)', 'ignore_index': '(True)'}), '([data_nombre, data], axis=1, ignore_index=True)\n', (1398, 1446), True, 'import pandas as pd\n'), ((364, 397), 'os.path.join', 'os.path.join', (['path', 'file_archivos'], {}), '(path, file_archivos)\n', (376, 397), False, 'import os\n'), ((1187, 1210), 'os.path.basename', 'os.path.basename', (['linea'], {}), '(linea)\n', (1203, 1210), False, 'import os\n')] |
"""
<NAME>17
PanCancer Classifier
scripts/initialize/process_sample_freeze.py
Takes in sample freeze data that was determined by TCGA PanCancer Atlas
consortium along with raw RNAseq and mutation data. The script will process
the datasets and subset each according to the frozen samples. The frozen
samples were previously determined and include all samples to consider in
downstream analyses.
Usage: Run once in command line
python scripts/initialize/process_sample_freeze.py
Output:
RNAseq and mutation data subset by sample freeze
"""
import os
import numpy as np
import pandas as pd
# Input Files
rna_file = os.path.join('data', 'raw', 'pancan_normalized_rnaseq.tsv')
mut_file = os.path.join('data', 'raw', 'mc3.v0.2.8.PUBLIC.maf.gz')
sample_freeze_file = os.path.join('data', 'raw',
'sampleset_freeze_version4_modify.csv')
# Output Files
rna_out_file = os.path.join('data', 'pancan_rnaseq_freeze.tsv.gz')
mut_out_file = os.path.join('data', 'pancan_mutation_freeze.tsv.gz')
freeze_out_file = os.path.join('data', 'sample_freeze.tsv')
burden_out_file = os.path.join('data', 'mutation_burden_freeze.tsv')
# Load Data
rnaseq_df = pd.read_table(rna_file, index_col=0)
mutation_df = pd.read_table(mut_file)
sample_freeze_df = pd.read_csv(sample_freeze_file)
# Process RNAseq file
rnaseq_df.index = rnaseq_df.index.map(lambda x: x.split('|')[0])
rnaseq_df.columns = rnaseq_df.columns.str.slice(start=0, stop=15)
rnaseq_df = rnaseq_df.drop('?').fillna(0).sort_index(axis=1)
# Gene is listed twice in RNAseq data, drop both occurrences
rnaseq_df.drop('SLC35E2', axis=0, inplace=True)
rnaseq_df = rnaseq_df.T
# Determine consistent sample freeze in RNAseq
freeze_barcodes = set(sample_freeze_df.SAMPLE_BARCODE)
freeze_barcodes = freeze_barcodes.intersection(set(rnaseq_df.index))
# Process Mutation File
mutation_df = mutation_df.assign(PATIENT_BARCODE=mutation_df
.Tumor_Sample_Barcode
.str.slice(start=0, stop=12))
mutation_df = mutation_df.assign(SAMPLE_BARCODE=mutation_df
.Tumor_Sample_Barcode
.str.slice(start=0, stop=15))
# Determine consistent sample freeze between RNAseq and mutation
mut_samples = set(mutation_df.SAMPLE_BARCODE.unique())
freeze_barcodes = freeze_barcodes.intersection(mut_samples)
freeze_barcodes = sorted(freeze_barcodes)
# Subset rnaseq data to only barcodes and remove duplicate rows
rnaseq_df = rnaseq_df.loc[freeze_barcodes, :]
rnaseq_df = rnaseq_df[~rnaseq_df.index.duplicated()]
rnaseq_df.to_csv(rna_out_file, sep='\t', compression='gzip')
# Filter mutation types and generate binary matrix
mutations = {
'Frame_Shift_Del',
'Frame_Shift_Ins',
'In_Frame_Del',
'In_Frame_Ins',
'Missense_Mutation',
'Nonsense_Mutation',
'Nonstop_Mutation',
'RNA',
'Splice_Site',
'Translation_Start_Site',
}
# Process synapse mutations
mut_pivot = (mutation_df.query("Variant_Classification in @mutations")
.groupby(['SAMPLE_BARCODE', 'Chromosome',
'Hugo_Symbol'])
.apply(len).reset_index()
.rename(columns={0: 'mutation'}))
mut_pivot = (mut_pivot.pivot_table(index='SAMPLE_BARCODE',
columns='Hugo_Symbol', values='mutation',
fill_value=0)
.astype(bool).astype(int))
# 12 Samples don't have any deleterious mutations
# This command will introduce NAs for these 12 samples, fill them with zeros
mut_pivot = mut_pivot.loc[freeze_barcodes, :]
mut_pivot = mut_pivot.fillna(0)
mut_pivot = mut_pivot.astype(int)
mut_pivot.to_csv(mut_out_file, sep='\t', compression='gzip')
# Generate a mutation burden variable (log10 total deleterious mutations)
burden_df = mutation_df[mutation_df['Variant_Classification'].isin(mutations)]
burden_df = burden_df.groupby('SAMPLE_BARCODE').apply(len)
burden_df = np.log10(burden_df)
burden_df = burden_df.loc[freeze_barcodes]
burden_df = burden_df.fillna(0)
burden_df = pd.DataFrame(burden_df, columns=['log10_mut'])
burden_df.to_csv(burden_out_file, sep='\t')
# Write out finalized and subset sample freeze file
sample_freeze_df = sample_freeze_df[sample_freeze_df.SAMPLE_BARCODE
.isin(freeze_barcodes)]
sample_freeze_df.to_csv(freeze_out_file, sep='\t')
| [
"numpy.log10",
"pandas.read_csv",
"os.path.join",
"pandas.read_table",
"pandas.DataFrame"
] | [((626, 685), 'os.path.join', 'os.path.join', (['"""data"""', '"""raw"""', '"""pancan_normalized_rnaseq.tsv"""'], {}), "('data', 'raw', 'pancan_normalized_rnaseq.tsv')\n", (638, 685), False, 'import os\n'), ((697, 752), 'os.path.join', 'os.path.join', (['"""data"""', '"""raw"""', '"""mc3.v0.2.8.PUBLIC.maf.gz"""'], {}), "('data', 'raw', 'mc3.v0.2.8.PUBLIC.maf.gz')\n", (709, 752), False, 'import os\n'), ((774, 841), 'os.path.join', 'os.path.join', (['"""data"""', '"""raw"""', '"""sampleset_freeze_version4_modify.csv"""'], {}), "('data', 'raw', 'sampleset_freeze_version4_modify.csv')\n", (786, 841), False, 'import os\n'), ((907, 958), 'os.path.join', 'os.path.join', (['"""data"""', '"""pancan_rnaseq_freeze.tsv.gz"""'], {}), "('data', 'pancan_rnaseq_freeze.tsv.gz')\n", (919, 958), False, 'import os\n'), ((974, 1027), 'os.path.join', 'os.path.join', (['"""data"""', '"""pancan_mutation_freeze.tsv.gz"""'], {}), "('data', 'pancan_mutation_freeze.tsv.gz')\n", (986, 1027), False, 'import os\n'), ((1046, 1087), 'os.path.join', 'os.path.join', (['"""data"""', '"""sample_freeze.tsv"""'], {}), "('data', 'sample_freeze.tsv')\n", (1058, 1087), False, 'import os\n'), ((1106, 1156), 'os.path.join', 'os.path.join', (['"""data"""', '"""mutation_burden_freeze.tsv"""'], {}), "('data', 'mutation_burden_freeze.tsv')\n", (1118, 1156), False, 'import os\n'), ((1182, 1218), 'pandas.read_table', 'pd.read_table', (['rna_file'], {'index_col': '(0)'}), '(rna_file, index_col=0)\n', (1195, 1218), True, 'import pandas as pd\n'), ((1233, 1256), 'pandas.read_table', 'pd.read_table', (['mut_file'], {}), '(mut_file)\n', (1246, 1256), True, 'import pandas as pd\n'), ((1276, 1307), 'pandas.read_csv', 'pd.read_csv', (['sample_freeze_file'], {}), '(sample_freeze_file)\n', (1287, 1307), True, 'import pandas as pd\n'), ((4032, 4051), 'numpy.log10', 'np.log10', (['burden_df'], {}), '(burden_df)\n', (4040, 4051), True, 'import numpy as np\n'), ((4139, 4185), 'pandas.DataFrame', 'pd.DataFrame', (['burden_df'], {'columns': "['log10_mut']"}), "(burden_df, columns=['log10_mut'])\n", (4151, 4185), True, 'import pandas as pd\n')] |
import pickle
import numpy as np
import os
import gzip
def save_emb(keys, values, filename, number_format=".6f"):
assert len(keys) == len(values)
number_format = f"{{n:{number_format}}}"
out = gzip.open(filename, "wb")
for i, (k, v) in enumerate(zip(keys, values)):
if i and not i % 1000:
print(f"{i} items written ...", end="\r")
line = f"{k} " + " ".join([number_format.format(n=n) for n in v]) + "\n"
out.write(line.encode("ascii"))
print(f"DONE. {i+1} items written to {filename}.")
out.close()
def save_emb_gz(keys, values, filename, number_format=".4e"):
assert len(keys) == len(values)
number_format = f"{{n:{number_format}}}"
out = gzip.open(filename, "wb")
for i, (k, v) in enumerate(zip(keys, values)):
if i and not i % 1000:
print(f"{i} items written ...", end="\r")
line = f"{k} " + " ".join([number_format.format(n=n) for n in v]) + "\n"
out.write(line.encode("ascii"))
print(f"DONE. {i+1} items written to {filename}.")
out.close()
def load_emb(filename, n_items=None, encoding="utf-8"):
word2ind = {}
ind2word = []
embeddings = []
with open(filename, "r", encoding="utf-8") as f:
for line in f:
if n_items and len(ind2word) >= n_items:
break
if not len(ind2word) % 1000:
print(f"{len(ind2word)} items loaded ...", end="\r")
word, *emb_str = line.strip().split()
vector = np.asarray([float(s) for s in emb_str])
word2ind[word] = len(word2ind)
ind2word.append(word)
embeddings.append(vector)
print(f"DONE. {len(ind2word)} items loaded from {filename}.")
return word2ind, ind2word, np.stack(embeddings)
def load_emb_gz(filename, n_items=None):
word2ind = {}
ind2word = []
embeddings = []
f = gzip.open(filename, "rb")
for line in f:
if n_items and len(ind2word) >= n_items:
break
if not len(ind2word) % 1000:
print(f"{len(ind2word)} items loaded ...", end="\r")
word, *emb_str = line.decode("ascii").strip().split()
vector = np.asarray([float(s) for s in emb_str])
word2ind[word] = len(word2ind)
ind2word.append(word)
embeddings.append(vector)
f.close()
print(f"DONE. {len(ind2word)} items loaded from {filename}.")
return word2ind, ind2word, np.stack(embeddings)
def load_emb_pickled(filename):
print(f"Loading features from {filename}.npy.gz")
with gzip.open(filename + ".npy.gz", "rb") as f:
embeddings = np.load(f)
print(f"Loading metadata from {filename}.meta.pkl")
with open(filename + ".meta.pkl", "rb") as f:
meta = pickle.load(f)
uniq, classes = np.unique(meta["classes"], return_inverse=True)
print(f"{len(uniq)} categories found.")
meta["class_idx"] = classes
meta["class_names"] = uniq
return meta, embeddings
def make_categories(filenames, sep=None):
"""
Extracts categories from a list of file paths, assuming
the files are sorted in folders corresponding to their categories, e.g.:
["path/to/data/category1/image1.png",
"path/to/data/category1/image2.png",
"path/to/data/category2/image1.png"]
Returns a np.array of category indices.
"""
if sep is None:
sep = os.path.sep
if len(filenames[0].split(sep)) < 2:
print("No categories found.")
return None
filenames_split = [f.split(sep)[-2] for f in filenames]
uniq, cat = np.unique(filenames_split, return_inverse=True)
print(f"{len(uniq)} categories found.")
return cat
def split_dataset(data_length, ratio):
ind = np.arange(data_length)
np.random.shuffle(ind)
ratio = np.asarray(ratio)
ratio = ratio / ratio.sum() * data_length
splits = [0]
for n in ratio:
splits.append(splits[-1] + n)
out = [ind[ratio[i]:ratio[i + 1]] for i in range(len(ratio))]
| [
"numpy.unique",
"gzip.open",
"numpy.asarray",
"pickle.load",
"numpy.stack",
"numpy.load",
"numpy.arange",
"numpy.random.shuffle"
] | [((208, 233), 'gzip.open', 'gzip.open', (['filename', '"""wb"""'], {}), "(filename, 'wb')\n", (217, 233), False, 'import gzip\n'), ((717, 742), 'gzip.open', 'gzip.open', (['filename', '"""wb"""'], {}), "(filename, 'wb')\n", (726, 742), False, 'import gzip\n'), ((1901, 1926), 'gzip.open', 'gzip.open', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (1910, 1926), False, 'import gzip\n'), ((2798, 2845), 'numpy.unique', 'np.unique', (["meta['classes']"], {'return_inverse': '(True)'}), "(meta['classes'], return_inverse=True)\n", (2807, 2845), True, 'import numpy as np\n'), ((3568, 3615), 'numpy.unique', 'np.unique', (['filenames_split'], {'return_inverse': '(True)'}), '(filenames_split, return_inverse=True)\n', (3577, 3615), True, 'import numpy as np\n'), ((3726, 3748), 'numpy.arange', 'np.arange', (['data_length'], {}), '(data_length)\n', (3735, 3748), True, 'import numpy as np\n'), ((3753, 3775), 'numpy.random.shuffle', 'np.random.shuffle', (['ind'], {}), '(ind)\n', (3770, 3775), True, 'import numpy as np\n'), ((3788, 3805), 'numpy.asarray', 'np.asarray', (['ratio'], {}), '(ratio)\n', (3798, 3805), True, 'import numpy as np\n'), ((1773, 1793), 'numpy.stack', 'np.stack', (['embeddings'], {}), '(embeddings)\n', (1781, 1793), True, 'import numpy as np\n'), ((2448, 2468), 'numpy.stack', 'np.stack', (['embeddings'], {}), '(embeddings)\n', (2456, 2468), True, 'import numpy as np\n'), ((2566, 2603), 'gzip.open', 'gzip.open', (["(filename + '.npy.gz')", '"""rb"""'], {}), "(filename + '.npy.gz', 'rb')\n", (2575, 2603), False, 'import gzip\n'), ((2631, 2641), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (2638, 2641), True, 'import numpy as np\n'), ((2763, 2777), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2774, 2777), False, 'import pickle\n')] |
"""
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
Defines the interface of the Display module of the racecar_core library.
"""
import abc
import numpy as np
import math
from typing import List, Tuple, Any
from nptyping import NDArray
import racecar_utils as rc_utils
class Display(abc.ABC):
"""
Allows the user to print images to the screen.
"""
# The radii dots used to indicate points
__BIG_DOT_RADIUS = 8
__SMALL_DOT_RADIUS = 4
__LIDAR_CAR_RADIUS = 2
@abc.abstractmethod
def create_window(self) -> None:
"""
Creates an empty window into which images will be displayed.
Note:
It is not necessary to call create_window before any of the other display
methods (show_color_image, show_depth_image, etc.). These methods will
automatically create a new window if one was not already created.
Example::
# Creates a window
rc.camera.create_window()
# Display an image in this window
image = rc.camera.get_color_image()
rc.display.show_color_image(image)
"""
pass
@abc.abstractmethod
def show_color_image(self, image: NDArray) -> None:
"""
Displays a color image in a window.
Args:
image: The color image to display to the the screen.
Example::
image = rc.camera.get_color_image()
# Show the image captured by the camera
rc.display.show_color_image(image)
"""
pass
def show_depth_image(
self,
image: NDArray[(Any, Any), np.float32],
max_depth: int = 1000,
points: List[Tuple[int, int]] = [],
) -> None:
"""
Displays a depth image in grayscale in a window.
Args:
image: The depth image to display to the screen.
max_depth: The farthest depth to show in the image in cm. Anything past
this depth is shown as black.
points: A list of points in (pixel row, pixel column) format to show on
the image as colored dots.
Example::
depth_image = rc.camera.get_depth_image()
# Show the depth_image captured by the camera.
rc.display.show_depth_image(depth_image)
# Show anything that is at most 500 cm away, and show a black cross at
# row 3, column 5
rc.display.show_depth_image(depth_image, 500, [(3, 5)])
"""
assert max_depth > 0, "max_depth must be positive."
for point in points:
assert (
0 <= point[0] < image.shape[0] and 0 <= point[1] < image.shape[1]
), "The point {} is not a valid pixel row and column within image.".format(
point
)
color_image = rc_utils.colormap_depth_image(image, max_depth)
# Draw a dot at each point in points
for point in points:
rc_utils.draw_circle(
color_image,
point,
rc_utils.ColorBGR.green.value,
radius=self.__BIG_DOT_RADIUS,
)
rc_utils.draw_circle(
color_image,
point,
rc_utils.ColorBGR.blue.value,
radius=self.__SMALL_DOT_RADIUS,
)
self.show_color_image(color_image)
def show_lidar(
self,
samples: NDArray[Any, np.float32],
radius: int = 128,
max_range: int = 1000,
highlighted_samples: List[Tuple[float, float]] = [],
) -> None:
"""
Displays a set of LIDAR samples.
Args:
samples: A complete LIDAR scan.
radius: Half of the width or height (in pixels) of the generated image.
max_range: The farthest depth to show in the image in cm. Anything past
this depth is shown as black.
highlighted_samples: A list of samples in (angle, distance) format to show
as light blue dots. Angle must be in degrees from straight ahead
(clockwise), and distance must be in cm.
Note:
Each sample in samples is shown as a red pixel. Each sample in
highlighted_samples is shown as a blue pixel. The car is shown as a green
dot at the center of the visualization.
Warning:
samples must be a complete LIDAR scan. This function assumes that each
sample is equal angle appart, and that samples spans the entire 360 degrees.
If this is not the case, the visualization will be inaccurate.
Example::
depth_image = rc.camera.get_depth_image()
# Show the depth_image captured by the camera.
rc.display.show_depth_image(depth_image)
# Show anything that is at most 500 cm away, and show a black cross at
# row 3, column 5
rc.display.show_depth_image(depth_image, 500, [(3, 5)])
"""
assert radius > 0, "radius must be positive."
assert max_range > 0, "max_range must be positive."
# Create a square black image with the requested radius
image = np.zeros((2 * radius, 2 * radius, 3), np.uint8, "C")
num_samples: int = len(samples)
# Draw a red pixel for each non-zero sample less than max_range
for i in range(num_samples):
if 0 < samples[i] < max_range:
angle: float = 2 * math.pi * i / num_samples
length: float = radius * samples[i] / max_range
r: int = int(radius - length * math.cos(angle))
c: int = int(radius + length * math.sin(angle))
image[r][c][2] = 255
# Draw a green dot to denote the car
rc_utils.draw_circle(
image,
(radius, radius),
rc_utils.ColorBGR.green.value,
self.__LIDAR_CAR_RADIUS,
)
# Draw a light blue pixel for each point in highlighted_samples
for (angle, distance) in highlighted_samples:
if 0 < distance < max_range:
angle_rad = angle * math.pi / 180
length: float = radius * distance / max_range
r: int = int(radius - length * math.cos(angle_rad))
c: int = int(radius + length * math.sin(angle_rad))
image[r][c][0] = 255
image[r][c][1] = 255
image[r][c][2] = 0
self.show_color_image(image)
| [
"racecar_utils.draw_circle",
"math.cos",
"numpy.zeros",
"math.sin",
"racecar_utils.colormap_depth_image"
] | [((2877, 2924), 'racecar_utils.colormap_depth_image', 'rc_utils.colormap_depth_image', (['image', 'max_depth'], {}), '(image, max_depth)\n', (2906, 2924), True, 'import racecar_utils as rc_utils\n'), ((5268, 5320), 'numpy.zeros', 'np.zeros', (['(2 * radius, 2 * radius, 3)', 'np.uint8', '"""C"""'], {}), "((2 * radius, 2 * radius, 3), np.uint8, 'C')\n", (5276, 5320), True, 'import numpy as np\n'), ((5858, 5963), 'racecar_utils.draw_circle', 'rc_utils.draw_circle', (['image', '(radius, radius)', 'rc_utils.ColorBGR.green.value', 'self.__LIDAR_CAR_RADIUS'], {}), '(image, (radius, radius), rc_utils.ColorBGR.green.value,\n self.__LIDAR_CAR_RADIUS)\n', (5878, 5963), True, 'import racecar_utils as rc_utils\n'), ((3012, 3117), 'racecar_utils.draw_circle', 'rc_utils.draw_circle', (['color_image', 'point', 'rc_utils.ColorBGR.green.value'], {'radius': 'self.__BIG_DOT_RADIUS'}), '(color_image, point, rc_utils.ColorBGR.green.value,\n radius=self.__BIG_DOT_RADIUS)\n', (3032, 3117), True, 'import racecar_utils as rc_utils\n'), ((3205, 3311), 'racecar_utils.draw_circle', 'rc_utils.draw_circle', (['color_image', 'point', 'rc_utils.ColorBGR.blue.value'], {'radius': 'self.__SMALL_DOT_RADIUS'}), '(color_image, point, rc_utils.ColorBGR.blue.value,\n radius=self.__SMALL_DOT_RADIUS)\n', (3225, 3311), True, 'import racecar_utils as rc_utils\n'), ((5686, 5701), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (5694, 5701), False, 'import math\n'), ((5750, 5765), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (5758, 5765), False, 'import math\n'), ((6346, 6365), 'math.cos', 'math.cos', (['angle_rad'], {}), '(angle_rad)\n', (6354, 6365), False, 'import math\n'), ((6414, 6433), 'math.sin', 'math.sin', (['angle_rad'], {}), '(angle_rad)\n', (6422, 6433), False, 'import math\n')] |
#Importing Packages
import cv2
import numpy as np
from deskew import deskewer
"""Function for pre-processing the TItle Deed Scans"""
def clean_image(imagepath, greyscale = True, rescaling = True, enhance = True, blur = True,
binarization = "simple", skew = True):
#read in image
image = cv2.imread(imagepath)
#convert to greyscale
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#Resize images for better results
if rescaling == True:
image = cv2.resize(image, None, fx=2.5, fy=2.5, interpolation=cv2.INTER_CUBIC)
#Deskew image using deskewer function
if skew == True:
image, angle = deskewer(image)
#Choose binarization method
if binarization == "otsu":
image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
elif binarization == "adaptive":
image = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)[1]
elif binarization == "simple":
image = cv2.threshold(image,127,255,cv2.THRESH_BINARY)[1]
#Noise Removal
if enhance == True:
kernel = np.ones((1,1), np.uint8)
image= cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
image= cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel)
if blur == True:
image = cv2.bilateralFilter(image,9,75,75)
return image
| [
"numpy.ones",
"cv2.bilateralFilter",
"cv2.threshold",
"cv2.morphologyEx",
"cv2.adaptiveThreshold",
"cv2.cvtColor",
"cv2.resize",
"cv2.imread",
"deskew.deskewer"
] | [((311, 332), 'cv2.imread', 'cv2.imread', (['imagepath'], {}), '(imagepath)\n', (321, 332), False, 'import cv2\n'), ((373, 412), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (385, 412), False, 'import cv2\n'), ((496, 566), 'cv2.resize', 'cv2.resize', (['image', 'None'], {'fx': '(2.5)', 'fy': '(2.5)', 'interpolation': 'cv2.INTER_CUBIC'}), '(image, None, fx=2.5, fy=2.5, interpolation=cv2.INTER_CUBIC)\n', (506, 566), False, 'import cv2\n'), ((661, 676), 'deskew.deskewer', 'deskewer', (['image'], {}), '(image)\n', (669, 676), False, 'from deskew import deskewer\n'), ((1148, 1173), 'numpy.ones', 'np.ones', (['(1, 1)', 'np.uint8'], {}), '((1, 1), np.uint8)\n', (1155, 1173), True, 'import numpy as np\n'), ((1189, 1236), 'cv2.morphologyEx', 'cv2.morphologyEx', (['image', 'cv2.MORPH_OPEN', 'kernel'], {}), '(image, cv2.MORPH_OPEN, kernel)\n', (1205, 1236), False, 'import cv2\n'), ((1253, 1301), 'cv2.morphologyEx', 'cv2.morphologyEx', (['image', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(image, cv2.MORPH_CLOSE, kernel)\n', (1269, 1301), False, 'import cv2\n'), ((1341, 1378), 'cv2.bilateralFilter', 'cv2.bilateralFilter', (['image', '(9)', '(75)', '(75)'], {}), '(image, 9, 75, 75)\n', (1360, 1378), False, 'import cv2\n'), ((759, 824), 'cv2.threshold', 'cv2.threshold', (['image', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (772, 824), False, 'import cv2\n'), ((883, 983), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['image', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY_INV', '(11)', '(2)'], {}), '(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY_INV, 11, 2)\n', (904, 983), False, 'import cv2\n'), ((1035, 1084), 'cv2.threshold', 'cv2.threshold', (['image', '(127)', '(255)', 'cv2.THRESH_BINARY'], {}), '(image, 127, 255, cv2.THRESH_BINARY)\n', (1048, 1084), False, 'import cv2\n')] |
import os
from collections import defaultdict
import numpy as np
import tensorflow as tf
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import shutil
import glob
def _tabulate_events(dpath):
summary_iterators = [EventAccumulator(os.path.join(dpath, dname, 'train')).Reload() for dname in os.listdir(dpath)]
tags = summary_iterators[0].Tags()['scalars']
for it in summary_iterators:
assert set(it.Tags()['scalars']) == set(tags)
out = defaultdict(list)
for tag in tags:
for events in zip(*[acc.Scalars(tag) for acc in summary_iterators]):
assert len(set(e.step for e in events)) == 1
out[tag].append([e.value for e in events])
return out
def _write_combined_events(dpath, d_combined, dname='combined'):
fpath = os.path.join(dpath, dname)
writer = tf.summary.FileWriter(fpath)
tags, values = zip(*d_combined.items())
timestep_mean = np.array(values).mean(axis=-1)
for tag, means in zip(tags, timestep_mean):
for i, mean in enumerate(means):
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=mean)])
writer.add_summary(summary, global_step=i)
writer.flush()
def _combine_dirs(dpath, chop, sep='_'):
s = set(sep.join(dname.split(sep)[:-chop]) for dname in os.listdir(dpath))
for exp_name in s:
dpath_prefix = os.path.join(dpath, exp_name)
if not os.path.isdir(dpath_prefix):
os.mkdir(dpath_prefix)
for dpath_seed in glob.glob(dpath_prefix + '*'):
if dpath_seed != dpath_prefix:
shutil.move(dpath_seed, dpath_prefix)
def combine_events(args):
if not args.combined:
_combine_dirs(args.dpath, args.chop)
for dpath in glob.glob(os.path.join(args.dpath, '*')):
d = _tabulate_events(dpath)
_write_combined_events(dpath, d)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('dpath', help='Directory path to runs.')
parser.add_argument('-c', '--combined', action='store_true')
parser.add_argument('--chop', help='Number of directory name parameters to ignore.', type=int, default=2)
args = parser.parse_args()
combine_events(args)
| [
"os.listdir",
"argparse.ArgumentParser",
"shutil.move",
"os.path.join",
"numpy.array",
"os.path.isdir",
"collections.defaultdict",
"os.mkdir",
"tensorflow.Summary.Value",
"tensorflow.summary.FileWriter",
"glob.glob"
] | [((503, 520), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (514, 520), False, 'from collections import defaultdict\n'), ((829, 855), 'os.path.join', 'os.path.join', (['dpath', 'dname'], {}), '(dpath, dname)\n', (841, 855), False, 'import os\n'), ((869, 897), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['fpath'], {}), '(fpath)\n', (890, 897), True, 'import tensorflow as tf\n'), ((1986, 2011), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2009, 2011), False, 'import argparse\n'), ((1421, 1450), 'os.path.join', 'os.path.join', (['dpath', 'exp_name'], {}), '(dpath, exp_name)\n', (1433, 1450), False, 'import os\n'), ((1556, 1585), 'glob.glob', 'glob.glob', (["(dpath_prefix + '*')"], {}), "(dpath_prefix + '*')\n", (1565, 1585), False, 'import glob\n'), ((1812, 1841), 'os.path.join', 'os.path.join', (['args.dpath', '"""*"""'], {}), "(args.dpath, '*')\n", (1824, 1841), False, 'import os\n'), ((334, 351), 'os.listdir', 'os.listdir', (['dpath'], {}), '(dpath)\n', (344, 351), False, 'import os\n'), ((964, 980), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (972, 980), True, 'import numpy as np\n'), ((1466, 1493), 'os.path.isdir', 'os.path.isdir', (['dpath_prefix'], {}), '(dpath_prefix)\n', (1479, 1493), False, 'import os\n'), ((1507, 1529), 'os.mkdir', 'os.mkdir', (['dpath_prefix'], {}), '(dpath_prefix)\n', (1515, 1529), False, 'import os\n'), ((1355, 1372), 'os.listdir', 'os.listdir', (['dpath'], {}), '(dpath)\n', (1365, 1372), False, 'import os\n'), ((1646, 1683), 'shutil.move', 'shutil.move', (['dpath_seed', 'dpath_prefix'], {}), '(dpath_seed, dpath_prefix)\n', (1657, 1683), False, 'import shutil\n'), ((275, 310), 'os.path.join', 'os.path.join', (['dpath', 'dname', '"""train"""'], {}), "(dpath, dname, 'train')\n", (287, 310), False, 'import os\n'), ((1125, 1169), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'tag', 'simple_value': 'mean'}), '(tag=tag, simple_value=mean)\n', (1141, 1169), True, 'import tensorflow as tf\n')] |
import sys
import cv2
import numpy as np
#####################################
# #
# Contour analysis and shape matching ######
#####################################
# Extract reference contour from the image
def get_ref_contour(img):
ref_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(ref_gray, 127, 255, 0)
# Find all the contours in the thresholded image. The values
# for the second and third parameters are restricted to a certain
# number of possible values. You can learn more 'findContours' function
#here:
#http://docs.opencv.org/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html
contours, hierarchy = cv2.findContours(thresh, 1, 2)
# Extract the relevant contour based on area ratio. We use the
# area ratio because the main image boundary contour is
# extracted as well and we don't want that. This area ratio # threshold will ensure that we only take
#the contour inside the image.
for contour in contours:
area = cv2.contourArea(contour)
img_area = img.shape[0] * img.shape[1]
if 0.05 < area/float(img_area) < 0.8:
return contour
# Extract all the contours from the image
def get_all_contours(img):
ref_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(ref_gray, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, 1, 2)
return contours
'''
if __name__=='__main__':
# Boomerang reference image
img1 = cv2.imread(sys.argv[1])
# Input image containing all the different shapes
img2 = cv2.imread(sys.argv[2])
# Extract the reference contour
ref_contour = get_ref_contour(img1)
# Extract all the contours from the input image
input_contours = get_all_contours(img2)
closest_contour = input_contours[0]
min_dist = sys.maxint
# Finding the closest contour
for contour in input_contours:
# Matching the shapes and taking the closest one
ret = cv2.matchShapes(ref_contour, contour, 1, 0.0)
if ret < min_dist:
min_dist = ret
closest_contour = contour
cv2.drawContours(img2, [closest_contour], -1, (0,0,0), 3)
cv2.imshow('Output', img2)
cv2.waitKey()
'''
##########################################
# #
# Identifying the pizza with the slice taken out ######
##########################################
# Input is a color image
def get_contours(img):
# Convert the image to grayscale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Threshold the input image
ret, thresh = cv2.threshold(img_gray, 127, 255, 0)
# Find the contours in the above image
contours, hierarchy = cv2.findContours(thresh, 2, 1)
return contours
'''
if __name__=='__main__':
img = cv2.imread(sys.argv[1])
# Iterate over the extracted contours
for contour in get_contours(img):
# Extract convex hull from the contour
hull = cv2.convexHull(contour, returnPoints=False)
# Extract convexity defects from the above hull
defects = cv2.convexityDefects(contour, hull)
if defects is None:
continue
# Draw lines and circles to show the defects
for i in range(defects.shape[0]):
start_defect, end_defect, far_defect, _ = defects[i,0]
start = tuple(contour[start_defect][0])
end = tuple(contour[end_defect][0])
far = tuple(contour[far_defect][0])
cv2.circle(img, far, 5, [128,0,0], -1)
cv2.drawContours(img, [contour], -1, (0,0,0), 3)
cv2.imshow('Convexity defects',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
'''
if __name__=='__main__':
img = cv2.imread(sys.argv[1])
# Iterate over the extracted contours
for contour in get_contours(img):
orig_contour = contour
epsilon = 0.01 * cv2.arcLength(contour, True)
contour = cv2.approxPolyDP(contour, epsilon, True)
# Extract convex hull and the convexity defects
hull = cv2.convexHull(contour, returnPoints=False)
defects = cv2.convexityDefects(contour,hull)
if defects is None:
continue
# Draw lines and circles to show the defects
for i in range(defects.shape[0]):
start_defect, end_defect, far_defect, _ = defects[i,0]
start = tuple(contour[start_defect][0])
end = tuple(contour[end_defect][0])
far = tuple(contour[far_defect][0])
cv2.circle(img, far, 7, [255,0,0], -1)
cv2.drawContours(img, [orig_contour], -1, (0,0,0), 3)
cv2.imshow('Convexity defects',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
#####################################
# #
# How to censor a shape ######
#####################################
'''
if __name__=='__main__':
# Input image containing all the shapes
img = cv2.imread(sys.argv[1])
img_orig = np.copy(img)
input_contours = get_all_contours(img)
solidity_values = []
# Compute solidity factors of all the contours
for contour in input_contours:
area_contour = cv2.contourArea(contour)
convex_hull = cv2.convexHull(contour)
area_hull = cv2.contourArea(convex_hull)
solidity = float(area_contour)/area_hull
solidity_values.append(solidity)
# Clustering using KMeans
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10,
1.0)
flags = cv2.KMEANS_RANDOM_CENTERS
solidity_values =np.array(solidity_values).reshape((len(solidity_values),1)).astype('float32')
compactness, labels, centers = cv2.kmeans(solidity_values, 2, criteria,10, flags)
closest_class = np.argmin(centers)
output_contours = []
for i in solidity_values[labels==closest_class]:
index = np.where(solidity_values==i)[0][0]
output_contours.append(input_contours[index])
cv2.drawContours(img, output_contours, -1, (0,0,0), 3)
cv2.imshow('Output', img)
# Censoring
for contour in output_contours:
rect = cv2.minAreaRect(contour)
box = cv2.cv.BoxPoints(rect)
box = np.int0(box)
cv2.drawContours(img_orig,[box],0,(0,0,0),-1)
cv2.imshow('Censored', img_orig)
cv2.waitKey()
'''
#####################################
# #
# What is Image Segmentation? ######
#####################################
# Draw rectangle based on the input selection
def draw_rectangle(event, x, y, flags, params):
global x_init, y_init, drawing, top_left_pt, bottom_right_pt, img_orig
# Detecting mouse button down event
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
x_init, y_init = x, y
# Detecting mouse movement
elif event == cv2.EVENT_MOUSEMOVE:
if drawing:
top_left_pt, bottom_right_pt = (x_init,y_init), (x,y)
img[y_init:y, x_init:x] = 255 - img_orig[y_init:y, x_init:x]
cv2.rectangle(img, top_left_pt, bottom_right_pt, (0,255,0), 2)
# Detecting mouse button up event
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
top_left_pt, bottom_right_pt = (x_init,y_init), (x,y)
img[y_init:y, x_init:x] = 255 - img[y_init:y, x_init:x]
cv2.rectangle(img, top_left_pt, bottom_right_pt, (0,255,0), 2)
rect_final = (x_init, y_init, x-x_init, y-y_init)
# Run Grabcut on the region of interest
run_grabcut(img_orig, rect_final)
# Grabcut algorithm
def run_grabcut(img_orig, rect_final):
# Initialize the mask
mask = np.zeros(img_orig.shape[:2],np.uint8)
# Extract the rectangle and set the region of
# interest in the above mask
x,y,w,h = rect_final
mask[y:y+h, x:x+w] = 1
# Initialize background and foreground models
bgdModel = np.zeros((1,65), np.float64)
fgdModel = np.zeros((1,65), np.float64)
# Run Grabcut algorithm
cv2.grabCut(img_orig, mask, rect_final, bgdModel, fgdModel, 5,
cv2.GC_INIT_WITH_RECT)
# Extract new mask
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
# Apply the above mask to the image
img_orig = img_orig*mask2[:,:,np.newaxis]
# Display the image
cv2.imshow('Output', img_orig)
if __name__=='__main__':
drawing = False
top_left_pt, bottom_right_pt = (-1,-1), (-1,-1)
# Read the input image
img_orig = cv2.imread(sys.argv[1])
img = img_orig.copy()
cv2.namedWindow('Input')
cv2.setMouseCallback('Input', draw_rectangle)
while True:
cv2.imshow('Input', img)
c = cv2.waitKey(1)
if c == 27:
break
cv2.destroyAllWindows()
| [
"cv2.setMouseCallback",
"cv2.rectangle",
"cv2.threshold",
"numpy.where",
"cv2.grabCut",
"cv2.imshow",
"cv2.contourArea",
"numpy.zeros",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.findContours",
"cv2.imread",
"cv2.namedWindow"
] | [((337, 374), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (349, 374), False, 'import cv2\n'), ((394, 430), 'cv2.threshold', 'cv2.threshold', (['ref_gray', '(127)', '(255)', '(0)'], {}), '(ref_gray, 127, 255, 0)\n', (407, 430), False, 'import cv2\n'), ((780, 810), 'cv2.findContours', 'cv2.findContours', (['thresh', '(1)', '(2)'], {}), '(thresh, 1, 2)\n', (796, 810), False, 'import cv2\n'), ((1364, 1401), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1376, 1401), False, 'import cv2\n'), ((1421, 1457), 'cv2.threshold', 'cv2.threshold', (['ref_gray', '(127)', '(255)', '(0)'], {}), '(ref_gray, 127, 255, 0)\n', (1434, 1457), False, 'import cv2\n'), ((1485, 1515), 'cv2.findContours', 'cv2.findContours', (['thresh', '(1)', '(2)'], {}), '(thresh, 1, 2)\n', (1501, 1515), False, 'import cv2\n'), ((2694, 2731), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2706, 2731), False, 'import cv2\n'), ((2784, 2820), 'cv2.threshold', 'cv2.threshold', (['img_gray', '(127)', '(255)', '(0)'], {}), '(img_gray, 127, 255, 0)\n', (2797, 2820), False, 'import cv2\n'), ((2892, 2922), 'cv2.findContours', 'cv2.findContours', (['thresh', '(2)', '(1)'], {}), '(thresh, 2, 1)\n', (2908, 2922), False, 'import cv2\n'), ((7996, 8034), 'numpy.zeros', 'np.zeros', (['img_orig.shape[:2]', 'np.uint8'], {}), '(img_orig.shape[:2], np.uint8)\n', (8004, 8034), True, 'import numpy as np\n'), ((8240, 8269), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (8248, 8269), True, 'import numpy as np\n'), ((8285, 8314), 'numpy.zeros', 'np.zeros', (['(1, 65)', 'np.float64'], {}), '((1, 65), np.float64)\n', (8293, 8314), True, 'import numpy as np\n'), ((8348, 8438), 'cv2.grabCut', 'cv2.grabCut', (['img_orig', 'mask', 'rect_final', 'bgdModel', 'fgdModel', '(5)', 'cv2.GC_INIT_WITH_RECT'], {}), '(img_orig, mask, rect_final, bgdModel, fgdModel, 5, cv2.\n GC_INIT_WITH_RECT)\n', (8359, 8438), False, 'import cv2\n'), ((8644, 8674), 'cv2.imshow', 'cv2.imshow', (['"""Output"""', 'img_orig'], {}), "('Output', img_orig)\n", (8654, 8674), False, 'import cv2\n'), ((8819, 8842), 'cv2.imread', 'cv2.imread', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (8829, 8842), False, 'import cv2\n'), ((8875, 8899), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Input"""'], {}), "('Input')\n", (8890, 8899), False, 'import cv2\n'), ((8905, 8950), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""Input"""', 'draw_rectangle'], {}), "('Input', draw_rectangle)\n", (8925, 8950), False, 'import cv2\n'), ((9075, 9098), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (9096, 9098), False, 'import cv2\n'), ((1129, 1153), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (1144, 1153), False, 'import cv2\n'), ((8977, 9001), 'cv2.imshow', 'cv2.imshow', (['"""Input"""', 'img'], {}), "('Input', img)\n", (8987, 9001), False, 'import cv2\n'), ((9015, 9029), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (9026, 9029), False, 'import cv2\n'), ((8476, 8517), 'numpy.where', 'np.where', (['((mask == 2) | (mask == 0))', '(0)', '(1)'], {}), '((mask == 2) | (mask == 0), 0, 1)\n', (8484, 8517), True, 'import numpy as np\n'), ((7378, 7442), 'cv2.rectangle', 'cv2.rectangle', (['img', 'top_left_pt', 'bottom_right_pt', '(0, 255, 0)', '(2)'], {}), '(img, top_left_pt, bottom_right_pt, (0, 255, 0), 2)\n', (7391, 7442), False, 'import cv2\n'), ((7682, 7746), 'cv2.rectangle', 'cv2.rectangle', (['img', 'top_left_pt', 'bottom_right_pt', '(0, 255, 0)', '(2)'], {}), '(img, top_left_pt, bottom_right_pt, (0, 255, 0), 2)\n', (7695, 7746), False, 'import cv2\n')] |
import numpy as np
class Target:
def __init__(self,
init_weight=1.0,
init_state=np.array([[0.0], [0.0], [0.0], [0.0]]),
init_cov=np.diag((0.01, 0.01, 0.01, 0.01)),
process_noise=0.001,
step=3,
dt_1=1,
dt_2=1):
self.state = init_state
self.state_cov = init_cov
self.weight = init_weight
self.measure_cov = init_cov
self.dt_1 = dt_1
self.dt_2 = dt_2
self.all_states = []
self.all_states.append(init_state)
self.all_cov = []
self.all_cov.append(init_cov)
self.state[2][0] = step
self.state[3][0] = step
self.A = np.array([[1, 0, dt_1, 0],
[0, 1, 0, dt_2],
[0, 0, 1, 0],
[0, 0, 0, 1]])
self.B = np.eye(init_state.shape[0])
self.U = np.zeros((init_state.shape[0], 1))
self.Q = np.eye(init_state.shape[0])
self.H = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
self.R = np.eye(2)
self.process_noise = process_noise
def set_dir(self, dt_1, dt_2):
self.A = np.array([[1, 0, dt_1, 0],
[0, 1, 0, dt_2],
[0, 0, 1, 0],
[0, 0, 0, 1]])
def next_state(self, noise=False):
x = self.state
next_state = np.dot(self.A, x) + np.dot(self.B, self.U)
# Add small process noise
if noise:
Qsim = np.diag([self.process_noise, self.process_noise]) ** 2
next_state[0, 0] = next_state[0, 0] + np.random.randn() * Qsim[0, 0]
next_state[1, 0] = next_state[1, 0] + np.random.randn() * Qsim[1, 1]
self.state = next_state
self.all_states.append(next_state)
next_cov = np.dot(self.A, np.dot(self.state_cov, self.A.T)) + self.Q
self.state_cov = next_cov
self.all_cov.append(next_cov)
def get_measurement(self):
obs = np.dot(self.H, self.state)
self.measure_cov = self.R + np.dot(self.H,
np.dot(self.state_cov, self.H.T))
return obs
def sample(self, N=1):
return np.random.multivariate_normal(self.state.flat, self.state_cov, size=N)
| [
"numpy.eye",
"numpy.random.multivariate_normal",
"numpy.diag",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.random.randn"
] | [((120, 158), 'numpy.array', 'np.array', (['[[0.0], [0.0], [0.0], [0.0]]'], {}), '([[0.0], [0.0], [0.0], [0.0]])\n', (128, 158), True, 'import numpy as np\n'), ((186, 219), 'numpy.diag', 'np.diag', (['(0.01, 0.01, 0.01, 0.01)'], {}), '((0.01, 0.01, 0.01, 0.01))\n', (193, 219), True, 'import numpy as np\n'), ((741, 813), 'numpy.array', 'np.array', (['[[1, 0, dt_1, 0], [0, 1, 0, dt_2], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, dt_1, 0], [0, 1, 0, dt_2], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (749, 813), True, 'import numpy as np\n'), ((912, 939), 'numpy.eye', 'np.eye', (['init_state.shape[0]'], {}), '(init_state.shape[0])\n', (918, 939), True, 'import numpy as np\n'), ((957, 991), 'numpy.zeros', 'np.zeros', (['(init_state.shape[0], 1)'], {}), '((init_state.shape[0], 1))\n', (965, 991), True, 'import numpy as np\n'), ((1010, 1037), 'numpy.eye', 'np.eye', (['init_state.shape[0]'], {}), '(init_state.shape[0])\n', (1016, 1037), True, 'import numpy as np\n'), ((1056, 1094), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0]])\n', (1064, 1094), True, 'import numpy as np\n'), ((1112, 1121), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1118, 1121), True, 'import numpy as np\n'), ((1219, 1291), 'numpy.array', 'np.array', (['[[1, 0, dt_1, 0], [0, 1, 0, dt_2], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, dt_1, 0], [0, 1, 0, dt_2], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (1227, 1291), True, 'import numpy as np\n'), ((2060, 2086), 'numpy.dot', 'np.dot', (['self.H', 'self.state'], {}), '(self.H, self.state)\n', (2066, 2086), True, 'import numpy as np\n'), ((2278, 2348), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.state.flat', 'self.state_cov'], {'size': 'N'}), '(self.state.flat, self.state_cov, size=N)\n', (2307, 2348), True, 'import numpy as np\n'), ((1457, 1474), 'numpy.dot', 'np.dot', (['self.A', 'x'], {}), '(self.A, x)\n', (1463, 1474), True, 'import numpy as np\n'), ((1477, 1499), 'numpy.dot', 'np.dot', (['self.B', 'self.U'], {}), '(self.B, self.U)\n', (1483, 1499), True, 'import numpy as np\n'), ((1572, 1621), 'numpy.diag', 'np.diag', (['[self.process_noise, self.process_noise]'], {}), '([self.process_noise, self.process_noise])\n', (1579, 1621), True, 'import numpy as np\n'), ((1899, 1931), 'numpy.dot', 'np.dot', (['self.state_cov', 'self.A.T'], {}), '(self.state_cov, self.A.T)\n', (1905, 1931), True, 'import numpy as np\n'), ((2181, 2213), 'numpy.dot', 'np.dot', (['self.state_cov', 'self.H.T'], {}), '(self.state_cov, self.H.T)\n', (2187, 2213), True, 'import numpy as np\n'), ((1677, 1694), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1692, 1694), True, 'import numpy as np\n'), ((1758, 1775), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1773, 1775), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import sklearn
from sklearn import cluster
faithful = pd.read_csv('faithful.csv', index_col=0)
# print(faithful.head())
faithful.columns = ['eruptions', 'waiting']
plt.scatter(faithful.eruptions, faithful.waiting)
plt.title('Old Faithful Data Scatterplot')
plt.xlabel('Length of eruption (minutes)')
plt.ylabel('Time between eruptions (minutes)')
# plt.show()
faith = np.array(faithful)
k = 2
kmeans = cluster.KMeans(n_clusters=k)
kmeans.fit(faith)
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
for i in range(k):
# select only data observations with cluster label == i
ds = faith[np.where(labels == i)]
# plot the data observations
plt.plot(ds[:, 0], ds[:, 1], 'o', markersize=7)
# plot the centroids
lines = plt.plot(centroids[i, 0], centroids[i, 1], 'kx')
# make the centroid x's bigger
plt.setp(lines, ms=15.0)
plt.setp(lines, mew=4.0)
plt.show()
| [
"sklearn.cluster.KMeans",
"matplotlib.pyplot.setp",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((145, 185), 'pandas.read_csv', 'pd.read_csv', (['"""faithful.csv"""'], {'index_col': '(0)'}), "('faithful.csv', index_col=0)\n", (156, 185), True, 'import pandas as pd\n'), ((257, 306), 'matplotlib.pyplot.scatter', 'plt.scatter', (['faithful.eruptions', 'faithful.waiting'], {}), '(faithful.eruptions, faithful.waiting)\n', (268, 306), True, 'import matplotlib.pyplot as plt\n'), ((307, 349), 'matplotlib.pyplot.title', 'plt.title', (['"""Old Faithful Data Scatterplot"""'], {}), "('Old Faithful Data Scatterplot')\n", (316, 349), True, 'import matplotlib.pyplot as plt\n'), ((350, 392), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Length of eruption (minutes)"""'], {}), "('Length of eruption (minutes)')\n", (360, 392), True, 'import matplotlib.pyplot as plt\n'), ((393, 439), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time between eruptions (minutes)"""'], {}), "('Time between eruptions (minutes)')\n", (403, 439), True, 'import matplotlib.pyplot as plt\n'), ((462, 480), 'numpy.array', 'np.array', (['faithful'], {}), '(faithful)\n', (470, 480), True, 'import numpy as np\n'), ((497, 525), 'sklearn.cluster.KMeans', 'cluster.KMeans', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (511, 525), False, 'from sklearn import cluster\n'), ((987, 997), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (995, 997), True, 'import matplotlib.pyplot as plt\n'), ((760, 807), 'matplotlib.pyplot.plot', 'plt.plot', (['ds[:, 0]', 'ds[:, 1]', '"""o"""'], {'markersize': '(7)'}), "(ds[:, 0], ds[:, 1], 'o', markersize=7)\n", (768, 807), True, 'import matplotlib.pyplot as plt\n'), ((845, 893), 'matplotlib.pyplot.plot', 'plt.plot', (['centroids[i, 0]', 'centroids[i, 1]', '"""kx"""'], {}), "(centroids[i, 0], centroids[i, 1], 'kx')\n", (853, 893), True, 'import matplotlib.pyplot as plt\n'), ((933, 957), 'matplotlib.pyplot.setp', 'plt.setp', (['lines'], {'ms': '(15.0)'}), '(lines, ms=15.0)\n', (941, 957), True, 'import matplotlib.pyplot as plt\n'), ((962, 986), 'matplotlib.pyplot.setp', 'plt.setp', (['lines'], {'mew': '(4.0)'}), '(lines, mew=4.0)\n', (970, 986), True, 'import matplotlib.pyplot as plt\n'), ((700, 721), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (708, 721), True, 'import numpy as np\n')] |
import numpy as np
class network():
"""
A network class for the neural network which include the following methods:
- feedforward(), for using the network on a given input
- SGD(), for apply Stochastic gradient descent (e.g. training the
network)
- BP(), for apply backpropergation, this function is intented to be
called using SGD()
- cost(), for calculating the cost of a given input in regards to the
desired output
- eval(), for evaluation the performance of the network, while
training
"""
def __init__(self, l_sizes: list):
"""
Where mu and sigma to adjust the initial starting parameters for the
neural network (w, b is weight and bias, respectively)
Note l_sizes (layer sizes) is given as a list of layers sizes.
Note that the first layers does not contain weights and biases.
Note that there is good argument for initializing the biases at zero
following the Stanford CS231N Notes:
http://cs231n.github.io/neural-networks-2/
(not mentioned in the assigment, its effects is not (yet) explored)
"""
self.n_layers = len(l_sizes)
self.layer_sizes = l_sizes
# Setting random biases using by default N(0, 1) (intercepts)
self.biases = [np.sqrt(b_sigma)*np.random.randn(x, 1)
for x in l_sizes[1:]]
# Setting random weights using by default N(0, 1) (beta values)
self.weights = [np.sqrt(w_sigma)*np.random.randn(y, x) + w_mu
for x, y in np.array((l_sizes[:-1], l_sizes[1:])).T]
def feedforward(self, x, save_var = False):
"""
Returns prediction of the network given the input, x
Assumes n_input is a np.array of shape (dimension) (n,) or (n, 1),
where n is equal to size of the first layers (e.g. l_sizes[0])
"""
# Used debugging and for backpropergations (BP)
if save_var == True:
xs = x
l_activation = [x] # a list of all the layer activation (with sigmoid)
x_list = [] # list of vectors, one for each layer (without sigmoid)
# Note that the calc is split up as to save variables underway
for l in range(self.n_layers-1):
x = np.dot(self.weights[l], xs) + self.biases[l]
x_list.append(x)
xs = sigmoid(x)
l_activation.append(xs)
return x_list, xs, l_activation
# Tranforming input in case of dim (n,), it does not influence (n, 1)
x = x.reshape(-1, 1)
# Note this could be optimized using matrix multiplication
# -1 since x is the input layer
for l in range(self.n_layers-1):
x = sigmoid(np.dot(self.weights[l], x) + self.biases[l])
return x
def SGD(self, train_data, epochs, batch_size, learning_rate,
test_data=None, save_performance = False):
"""
Stochastic Gradient Descent (SGD)
Loops through the number of epochs, splitting to training data into
evenly sized chunk of size n, where n is the batch size. Then loops
over each of these and applying Backpropergation (BP).
Lastly if a test data is given it evaluates the network performance on
the testdata using the eval() function
"""
# Copying the data in as to not reorder the original data,
# keeping the same name for readability.
train_data = train_data[:]
# Save a list for performance to be saved in
if save_performance:
if not test_data:
raise Exception("Performance can't be saved if no test data is given")
self.performance = []
for epoch in range(epochs):
print(f"\n Epoch: {(epoch+1)}/{epochs}", end="")
random.shuffle(train_data) # Using a Fisher Yates Shuffle
batches = chunks(train_data, batch_size)
# Note that instead of looping through each batch, you could have
# a more effective approach would be to consider each batch as a
# vector in a matrix, and from here simply use matrix
# multiplication
for batch in batches:
# Apply backpergation using gradient descent for each batch
self.BP(batch, learning_rate)
if test_data:
n_correct, n = self.eval(test_data)
print(f", Obtained Accuracy: {np.round(n_correct/n, 2)}" +
f" \t ({n_correct}/{n})", end="")
if save_performance:
n_correct_train, n_t = self.eval(train_data, train_data = True)
self.performance.append((n_correct/n, n_correct_train/n_t))
print("\n Process complete")
def BP(self, batch, learning_rate):
"""
Backpropergation (BP)
loops trough each training sample in the batch and applies gradient
descent. Lastly it averages the gradient vector and updates the wieghts
and biases of the network.
Where a batch is a tuple of length 2 on the form (pixels, answer).
Where pixels is a list of pixel activation (zero is black) and answer
is a boolean list og length 10, indicating the number of the digit.
(assumes the MNIST data)
"""
n_biases = [np.zeros(bias.shape) for bias in self.biases]
n_weights = [np.zeros(weight.shape) for weight in self.weights]
# looping over each batch, applying gradient descent
for pixels, answer in batch:
### start BP
dn_biases = [np.zeros(b.shape) for b in self.biases]
dn_weights = [np.zeros(w.shape) for w in self.weights]
# feedforward - where we save relevant variables
x_list, activation, activations = self.feedforward(pixels, save_var=True)
# update the weight and biases going backward in the N.N.
delta = self.cost(activations[-1],
answer) * sigmoid(x_list[-1], derivative=True)
dn_biases[-1] = delta
dn_weights[-1] = np.dot(delta, activations[-2].transpose())
# Note that the following loop is loop backwards
for l in range(2, self.n_layers):
x = x_list[-l]
s_deriv = sigmoid(x, derivative=True)
delta = s_deriv * np.dot(self.weights[-l+1].T, delta)
# Saving dn's
dn_biases[-l] = delta
dn_weights[-l] = np.dot(delta, activations[-l-1].T)
for l in range(self.n_layers-1):
n_biases[l] += dn_biases[l]
n_weights[l] += dn_weights[l]
# update weight and biases - averaged and weighted by the learning rate
for l in range(self.n_layers-1):
self.weights[l] = self.weights[l] - (learning_rate / len(batch)) * n_weights[l]
self.biases[l] = self.biases[l] - (learning_rate / len(batch)) * n_biases[l]
def cost(self, output, actual, derivative = True):
"""
A cost function, which returns the difference between the output of the
neural network (e.g. its prediction) and the actual value
Note that this is (in part, se note of the end) a partial derivative of
the cost function given by (in laTeX):
\frac { 1 }{ 2 } \sum _{ n }{ \frac { |f(x)-a|^{ 2 } }{ 2 } }
where n is the number of observations, f(x) is the output of the neural
network and a is the actual result.
"""
# In practice only the derived function is used, consequently the
# original function serves only a conceptual purpose
if derivative == False:
return 1/2 * (output - actual)*(output - actual)
return(output - actual)
def eval(self, data, train_data = False):
"""
Evaluates the network on a given test data, returning a tuple with
the number of correct predictions and the total number of predicitons.
assumes the MNIST database or data with similar structure
"""
# creates a 2 by n matrix, where n is the length of the test_data
# where the second column indicates the right answer
# Note that there is a restructering for the train_data due to the
# different structures of train and test_data
if train_data:
predictions = np.array([(np.argmax(self.feedforward(pixels)), np.argmax(answer))
for pixels, answer in data])
else:
predictions = np.array([(np.argmax(self.feedforward(pixels)), answer)
for pixels, answer in data])
n_correct = sum(predictions[:, 0] == predictions[:, 1])
return (n_correct, len(predictions))
| [
"numpy.sqrt",
"numpy.argmax",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.random.randn",
"numpy.round"
] | [((5522, 5542), 'numpy.zeros', 'np.zeros', (['bias.shape'], {}), '(bias.shape)\n', (5530, 5542), True, 'import numpy as np\n'), ((5589, 5611), 'numpy.zeros', 'np.zeros', (['weight.shape'], {}), '(weight.shape)\n', (5597, 5611), True, 'import numpy as np\n'), ((1353, 1369), 'numpy.sqrt', 'np.sqrt', (['b_sigma'], {}), '(b_sigma)\n', (1360, 1369), True, 'import numpy as np\n'), ((1370, 1391), 'numpy.random.randn', 'np.random.randn', (['x', '(1)'], {}), '(x, 1)\n', (1385, 1391), True, 'import numpy as np\n'), ((5789, 5806), 'numpy.zeros', 'np.zeros', (['b.shape'], {}), '(b.shape)\n', (5797, 5806), True, 'import numpy as np\n'), ((5855, 5872), 'numpy.zeros', 'np.zeros', (['w.shape'], {}), '(w.shape)\n', (5863, 5872), True, 'import numpy as np\n'), ((6740, 6776), 'numpy.dot', 'np.dot', (['delta', 'activations[-l - 1].T'], {}), '(delta, activations[-l - 1].T)\n', (6746, 6776), True, 'import numpy as np\n'), ((1533, 1549), 'numpy.sqrt', 'np.sqrt', (['w_sigma'], {}), '(w_sigma)\n', (1540, 1549), True, 'import numpy as np\n'), ((1550, 1571), 'numpy.random.randn', 'np.random.randn', (['y', 'x'], {}), '(y, x)\n', (1565, 1571), True, 'import numpy as np\n'), ((1615, 1652), 'numpy.array', 'np.array', (['(l_sizes[:-1], l_sizes[1:])'], {}), '((l_sizes[:-1], l_sizes[1:]))\n', (1623, 1652), True, 'import numpy as np\n'), ((2360, 2387), 'numpy.dot', 'np.dot', (['self.weights[l]', 'xs'], {}), '(self.weights[l], xs)\n', (2366, 2387), True, 'import numpy as np\n'), ((2837, 2863), 'numpy.dot', 'np.dot', (['self.weights[l]', 'x'], {}), '(self.weights[l], x)\n', (2843, 2863), True, 'import numpy as np\n'), ((6586, 6623), 'numpy.dot', 'np.dot', (['self.weights[-l + 1].T', 'delta'], {}), '(self.weights[-l + 1].T, delta)\n', (6592, 6623), True, 'import numpy as np\n'), ((8696, 8713), 'numpy.argmax', 'np.argmax', (['answer'], {}), '(answer)\n', (8705, 8713), True, 'import numpy as np\n'), ((4606, 4632), 'numpy.round', 'np.round', (['(n_correct / n)', '(2)'], {}), '(n_correct / n, 2)\n', (4614, 4632), True, 'import numpy as np\n')] |
import os
from typing import Dict
import numpy as np
import pandas as pd
import scipy
import torch
import generate_data
import rnn
import taylor_expansion
import utils
def linear_approximation(X: np.array, t: float) -> np.array:
"""Linear approximation of X."""
assert t >= 0
assert t <= 1
nb_steps = len(X) - 1
k = int(nb_steps * t)
if t == 1:
return X[nb_steps]
return X[k] + nb_steps * (t - k / nb_steps) * (X[k+1]-X[k])
def f(t: float, h: np.array, X: np.array, Wh: np.array, Wi: np.array, b: np.array, non_linearity: str) -> np.array:
"""Evolution function of the RNN cell given to the scipy solver."""
if non_linearity == 'sigmoid':
return 1 / (1 + np.exp(-(Wh @ h + Wi @ linear_approximation(X, t) + b)))
elif non_linearity == 'tanh':
return np.tanh(Wh @ h + Wi @ linear_approximation(X, t) + b)
def approximation(model: torch.nn.Module, N: int, X: torch.Tensor) -> np.array:
"""Computes the distance between the solution of the CDE with scipy solver and the Taylor expansion truncated at N.
:param model: RNN model
:param N: truncation order of the Taylor expansion
:param X: driving path, of shape (batch_size, length, channels)
:return: numpy array of the distance between the two solutions.
"""
hidden_state = torch.randn(model.hidden_channels + model.input_channels)
hidden_state[-model.input_channels:] = X[0,:-1]
Wh = model.weight_hh.detach().numpy()
Wi = model.weight_ih.detach().numpy()
b = model.bias.detach().numpy()
ode_result = scipy.integrate.solve_ivp(lambda t,h: f(t, h, X[:,:-1].detach().numpy(), Wh, Wi, b, model.non_linearity),
(0, 1),
method='LSODA',
y0=hidden_state[:-model.input_channels].detach().numpy(),
rtol=10**-12,
atol=10**-14).y[:,-1]
_, euler_coeff_sparse = taylor_expansion.model_approximation(model, N, X, hidden_state, is_sparse=True)
return np.linalg.norm(euler_coeff_sparse[:,:-2].detach().numpy() - np.expand_dims(ode_result, axis=0), axis=1)
def compute_taylor_convergence(experiment_dir: str, config: Dict):
"""Compares the solution of a CDE obtained with a classical solver to its Taylor approximation with signatures for
various truncations, when the tensor field of the CDE are random RNNs, with either tanh or sigmoid activations, and
the driving path is a 2d spiral. Saves the results in a dataframe.
:param experiment_dir: directory where the experiment is saved
:param config: configuration values
:return: None
"""
X, _ = generate_data.generate_spirals(1, config['length'])
input_channels = X.shape[2] # dimension d
Xtime = utils.add_time(X / utils.total_variation(X))[0]
n_classes = 2
df = pd.DataFrame(columns=['Step N', 'Error', 'Activation', 'Weight L2 norm'])
for k in range(config['n_realisations']):
if k % 10 == 0:
print('Realisation: {}/{}'.format(k, config['n_realisations']))
for activation in ['sigmoid', 'tanh']:
model = rnn.RNNModel(input_channels, config['hidden_channels'], output_channels=n_classes,
non_linearity=activation)
weight = torch.norm(torch.cat([model.weight_hh, model.weight_ih])).detach().numpy()
result = approximation(model, config['order'], Xtime)
for n in range(config['order']):
df = df.append({'Step N': n+1, 'Error': result[n], 'Weight L2 norm': weight, 'Activation': activation},
ignore_index=True)
df.to_csv(os.path.join(experiment_dir, 'taylor_convergence.csv'))
| [
"rnn.RNNModel",
"taylor_expansion.model_approximation",
"os.path.join",
"utils.total_variation",
"generate_data.generate_spirals",
"numpy.expand_dims",
"pandas.DataFrame",
"torch.randn",
"torch.cat"
] | [((1322, 1379), 'torch.randn', 'torch.randn', (['(model.hidden_channels + model.input_channels)'], {}), '(model.hidden_channels + model.input_channels)\n', (1333, 1379), False, 'import torch\n'), ((2040, 2119), 'taylor_expansion.model_approximation', 'taylor_expansion.model_approximation', (['model', 'N', 'X', 'hidden_state'], {'is_sparse': '(True)'}), '(model, N, X, hidden_state, is_sparse=True)\n', (2076, 2119), False, 'import taylor_expansion\n'), ((2759, 2810), 'generate_data.generate_spirals', 'generate_data.generate_spirals', (['(1)', "config['length']"], {}), "(1, config['length'])\n", (2789, 2810), False, 'import generate_data\n'), ((2945, 3018), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Step N', 'Error', 'Activation', 'Weight L2 norm']"}), "(columns=['Step N', 'Error', 'Activation', 'Weight L2 norm'])\n", (2957, 3018), True, 'import pandas as pd\n'), ((3770, 3824), 'os.path.join', 'os.path.join', (['experiment_dir', '"""taylor_convergence.csv"""'], {}), "(experiment_dir, 'taylor_convergence.csv')\n", (3782, 3824), False, 'import os\n'), ((2191, 2225), 'numpy.expand_dims', 'np.expand_dims', (['ode_result'], {'axis': '(0)'}), '(ode_result, axis=0)\n', (2205, 2225), True, 'import numpy as np\n'), ((3232, 3345), 'rnn.RNNModel', 'rnn.RNNModel', (['input_channels', "config['hidden_channels']"], {'output_channels': 'n_classes', 'non_linearity': 'activation'}), "(input_channels, config['hidden_channels'], output_channels=\n n_classes, non_linearity=activation)\n", (3244, 3345), False, 'import rnn\n'), ((2888, 2912), 'utils.total_variation', 'utils.total_variation', (['X'], {}), '(X)\n', (2909, 2912), False, 'import utils\n'), ((3406, 3451), 'torch.cat', 'torch.cat', (['[model.weight_hh, model.weight_ih]'], {}), '([model.weight_hh, model.weight_ih])\n', (3415, 3451), False, 'import torch\n')] |
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# torch.manual_seed(1) # reproducible
# np.random.seed(1)
# Hyper Parameters
BATCH_SIZE = 64
LR_G = 0.0001 # learning rate for generator
LR_D = 0.0001 # learning rate for discriminator
N_IDEAS = 5 # think of this as number of ideas for generating an art work(Generator)
ART_COMPONENTS = 15 # it could be total point G can drew in the canvas
PAINT_POINTS = np.vstack([np.linspace(-1, 1, ART_COMPONENTS) for _ in range(BATCH_SIZE)])
def artist_works(): # painting from the famous artist (real target)
#a = np.random.uniform(1, 2, size=BATCH_SIZE)[:, np.newaxis]
r = 0.02 * np.random.randn(1, ART_COMPONENTS)
paintings = np.sin(PAINT_POINTS * np.pi) + r
paintings = torch.from_numpy(paintings).float()
return paintings
G = nn.Sequential( # Generator
nn.Linear(N_IDEAS, 128), # random ideas (could from normal distribution)
nn.ReLU(),
nn.Linear(128, ART_COMPONENTS), # making a painting from these random ideas
)
D = nn.Sequential( # Discriminator
nn.Linear(ART_COMPONENTS, 128), # receive art work either from the famous artist or a newbie like G
nn.ReLU(),
nn.Linear(128, 1),
nn.Sigmoid(), # tell the probability that the art work is made by artist
)
opt_D = torch.optim.Adam(D.parameters(), lr=LR_D)
opt_G = torch.optim.Adam(G.parameters(), lr=LR_G)
plt.ion() # something about continuous plotting
D_loss_history = []
G_loss_history = []
for step in range(10000):
artist_paintings = artist_works() # real painting from artist
G_ideas = torch.randn(BATCH_SIZE, N_IDEAS) # random ideas
G_paintings = G(G_ideas) # fake painting from G (random ideas)
prob_artist0 = D(artist_paintings) # D try to increase this prob
prob_artist1 = D(G_paintings) # D try to reduce this prob
D_loss = - torch.mean(torch.log(prob_artist0) + torch.log(1. - prob_artist1))
G_loss = torch.mean(torch.log(1. - prob_artist1))
D_loss_history.append(D_loss)
G_loss_history.append(G_loss)
opt_D.zero_grad()
D_loss.backward(retain_graph=True) # reusing computational graph
opt_D.step()
opt_G.zero_grad()
G_loss.backward()
opt_G.step()
if step % 50 == 0: # plotting
plt.cla()
plt.plot(PAINT_POINTS[0], G_paintings.data.numpy()[0], c='#4AD631', lw=3, label='Generated painting',)
plt.plot(PAINT_POINTS[0], np.sin(PAINT_POINTS[0] * np.pi), c='#74BCFF', lw=3, label='upper bound')
plt.text(-1, 0.75, 'D accuracy=%.2f (0.5 for D to converge)' % prob_artist0.data.numpy().mean(), fontdict={'size': 13})
plt.text(-1, 0.5, 'D score= %.2f (-1.38 for G to converge)' % -D_loss.data.numpy(), fontdict={'size': 13})
plt.ylim((-1, 1));plt.legend(loc='upper right', fontsize=10);plt.draw();plt.pause(0.01)
plt.ioff()
plt.show() | [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"matplotlib.pyplot.draw",
"torch.log",
"numpy.sin",
"matplotlib.pyplot.ioff",
"torch.from_numpy",
"numpy.linspace",
"torch.nn.Linear",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.pause",
"numpy.random.r... | [((1523, 1532), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1530, 1532), True, 'import matplotlib.pyplot as plt\n'), ((3066, 3076), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (3074, 3076), True, 'import matplotlib.pyplot as plt\n'), ((3078, 3088), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3086, 3088), True, 'import matplotlib.pyplot as plt\n'), ((935, 958), 'torch.nn.Linear', 'nn.Linear', (['N_IDEAS', '(128)'], {}), '(N_IDEAS, 128)\n', (944, 958), True, 'import torch.nn as nn\n'), ((1020, 1029), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1027, 1029), True, 'import torch.nn as nn\n'), ((1036, 1066), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'ART_COMPONENTS'], {}), '(128, ART_COMPONENTS)\n', (1045, 1066), True, 'import torch.nn as nn\n'), ((1175, 1205), 'torch.nn.Linear', 'nn.Linear', (['ART_COMPONENTS', '(128)'], {}), '(ART_COMPONENTS, 128)\n', (1184, 1205), True, 'import torch.nn as nn\n'), ((1280, 1289), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1287, 1289), True, 'import torch.nn as nn\n'), ((1296, 1313), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(1)'], {}), '(128, 1)\n', (1305, 1313), True, 'import torch.nn as nn\n'), ((1320, 1332), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1330, 1332), True, 'import torch.nn as nn\n'), ((1736, 1768), 'torch.randn', 'torch.randn', (['BATCH_SIZE', 'N_IDEAS'], {}), '(BATCH_SIZE, N_IDEAS)\n', (1747, 1768), False, 'import torch\n'), ((495, 529), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'ART_COMPONENTS'], {}), '(-1, 1, ART_COMPONENTS)\n', (506, 529), True, 'import numpy as np\n'), ((717, 751), 'numpy.random.randn', 'np.random.randn', (['(1)', 'ART_COMPONENTS'], {}), '(1, ART_COMPONENTS)\n', (732, 751), True, 'import numpy as np\n'), ((769, 797), 'numpy.sin', 'np.sin', (['(PAINT_POINTS * np.pi)'], {}), '(PAINT_POINTS * np.pi)\n', (775, 797), True, 'import numpy as np\n'), ((2144, 2173), 'torch.log', 'torch.log', (['(1.0 - prob_artist1)'], {}), '(1.0 - prob_artist1)\n', (2153, 2173), False, 'import torch\n'), ((2491, 2500), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (2498, 2500), True, 'import matplotlib.pyplot as plt\n'), ((2975, 2992), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1, 1)'], {}), '((-1, 1))\n', (2983, 2992), True, 'import matplotlib.pyplot as plt\n'), ((2993, 3035), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'fontsize': '(10)'}), "(loc='upper right', fontsize=10)\n", (3003, 3035), True, 'import matplotlib.pyplot as plt\n'), ((3036, 3046), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3044, 3046), True, 'import matplotlib.pyplot as plt\n'), ((3047, 3062), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (3056, 3062), True, 'import matplotlib.pyplot as plt\n'), ((819, 846), 'torch.from_numpy', 'torch.from_numpy', (['paintings'], {}), '(paintings)\n', (835, 846), False, 'import torch\n'), ((2648, 2679), 'numpy.sin', 'np.sin', (['(PAINT_POINTS[0] * np.pi)'], {}), '(PAINT_POINTS[0] * np.pi)\n', (2654, 2679), True, 'import numpy as np\n'), ((2063, 2086), 'torch.log', 'torch.log', (['prob_artist0'], {}), '(prob_artist0)\n', (2072, 2086), False, 'import torch\n'), ((2089, 2118), 'torch.log', 'torch.log', (['(1.0 - prob_artist1)'], {}), '(1.0 - prob_artist1)\n', (2098, 2118), False, 'import torch\n')] |
import os
import os.path
import sys
import torch
import torch.utils.data as data
import cv2
import random
import numpy as np
cv2.setNumThreads(0)
class WiderFaceDetection(data.Dataset):
def __init__(self, txt_path, own_txt_path, pattern, preproc=None):
self.preproc = preproc
self.imgs_path = []
self.words = []
self.pattern = pattern
f = open(txt_path,'r')
lines = f.readlines()
isFirst = True
labels = []
# for own dataset
f_own = open(own_txt_path)
lines_own = f_own.readlines()
isFirst_own = True
labels_own = []
for line in lines_own:
line = line.rstrip('\n')
if line.startswith('/opt'):
if isFirst_own:
isFirst_own = False
else:
labels_copy_own = labels_own.copy()
self.words.append(labels_copy_own)
labels_own.clear()
self.imgs_path.append(line)
else:
line = line.split(' ')[1:]
label = [float(x) for x in line]
labels_own.append(label)
self.words.append(labels_own)
def __len__(self):
return len(self.imgs_path)
def __getitem__(self, index):
img = cv2.imread(self.imgs_path[index])
height, width, _ = img.shape
labels = self.words[index]
# for lable with visible part
annotations = np.zeros((0, 21))
if len(labels) == 0:
return annotations
for idx, label in enumerate(labels):
'''
for label with visible
'''
annotation = np.zeros((1, 21))
# bbox
annotation[0, 0] = label[0] # x1
annotation[0, 1] = label[1] # y1
annotation[0, 2] = label[2] # x2
annotation[0, 3] = label[3] # y2
# landmarks
annotation[0, 4] = label[4] # l0_x
annotation[0, 5] = label[5] # l0_y
annotation[0, 6] = label[6] # l1_x
annotation[0, 7] = label[7] # l1_y
annotation[0, 8] = label[12] # l2_x
annotation[0, 9] = label[13] # l2_y
annotation[0, 10] = label[8] # l3_x
annotation[0, 11] = label[9] # l3_y
annotation[0, 12] = label[10] # l4_x
annotation[0, 13] = label[11] # l4_y
annotation[0, 14] = 1
# angle
annotation[0, 15] = label[14]
# visible
annotation[0, 16] = label[15]
annotation[0, 17] = label[16]
annotation[0, 18] = label[19]
annotation[0, 19] = label[17]
annotation[0, 20] = label[18]
annotations = np.append(annotations, annotation, axis=0)
target = np.array(annotations)
if self.pattern == "train":
if len(labels) >= 2:
pass
else:
rand = np.random.rand()
if rand < 0.5:
rand_idx = random.randint(12880, len(self.imgs_path)-1)
rand_img = cv2.imread(self.imgs_path[rand_idx])
height_new, width_new, _ = rand_img.shape
if height < height_new and width < width_new:
pass
else:
label_rand = self.words[rand_idx]
annotations_rand = np.zeros((0, 21))
for label in label_rand:
annotation_rand = np.zeros((1, 21))
# bbox
annotation_rand[0, 0] = label[0] # x1
annotation_rand[0, 1] = label[1] # y1
annotation_rand[0, 2] = label[2] # x2
annotation_rand[0, 3] = label[3] # y2
# landmarks
annotation_rand[0, 4] = label[4] # l0_x
annotation_rand[0, 5] = label[5] # l0_y
annotation_rand[0, 6] = label[6] # l1_x
annotation_rand[0, 7] = label[7] # l1_y
annotation_rand[0, 8] = label[12] # l2_x
annotation_rand[0, 9] = label[13] # l2_y
annotation_rand[0, 10] = label[8] # l3_x
annotation_rand[0, 11] = label[9] # l3_y
annotation_rand[0, 12] = label[10] # l4_x
annotation_rand[0, 13] = label[11] # l4_y
annotation_rand[0, 14] = 1
# angle
annotation_rand[0, 15] = label[14]
# visible
annotation_rand[0, 16] = label[15]
annotation_rand[0, 17] = label[16]
annotation_rand[0, 18] = label[19]
annotation_rand[0, 19] = label[17]
annotation_rand[0, 20] = label[18]
annotations_rand = np.append(annotations_rand, annotation_rand, axis=0)
for i in range(250):
resize_ratio = random.uniform(0.4, 0.6)
height_rand, width_rand = int(height_new * resize_ratio), int(width_new * resize_ratio)
top_left_x = random.randint(0, width)
top_left_y = random.randint(0, height)
img_box = np.array([top_left_x, top_left_y, top_left_x + width_rand, top_left_y + height_rand])
face_box = annotations[0,:4]
iou = bb_intersection_over_union(img_box, face_box)
if top_left_x + width_rand > width or top_left_y + height_rand > height or iou > 0.02:
continue
if i < 249:
annotations_rand[:, :14] = annotations_rand[:, :14]*resize_ratio + np.tile(np.array([top_left_x, top_left_y]),7).reshape(-1, 14)
rand_img_resize = cv2.resize(rand_img, (width_rand, height_rand))
img[top_left_y:top_left_y+height_rand, top_left_x:top_left_x+width_rand, :] = rand_img_resize
target = np.vstack((target, np.array(annotations_rand)))
break
if self.preproc is not None:
img, target = self.preproc(img, target, self.imgs_path[index])
return torch.from_numpy(img), target
def bb_intersection_over_union(boxA, boxB):
boxA = [int(x) for x in boxA]
boxB = [int(x) for x in boxB]
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
targets = []
imgs = []
for _, sample in enumerate(batch):
for _, tup in enumerate(sample):
if torch.is_tensor(tup):
imgs.append(tup)
elif isinstance(tup, type(np.empty(0))):
annos = torch.from_numpy(tup).float()
targets.append(annos)
return (torch.stack(imgs, 0), targets) | [
"random.uniform",
"cv2.setNumThreads",
"numpy.random.rand",
"torch.stack",
"torch.from_numpy",
"numpy.append",
"numpy.array",
"numpy.zeros",
"torch.is_tensor",
"numpy.empty",
"cv2.resize",
"cv2.imread",
"random.randint"
] | [((127, 147), 'cv2.setNumThreads', 'cv2.setNumThreads', (['(0)'], {}), '(0)\n', (144, 147), False, 'import cv2\n'), ((1323, 1356), 'cv2.imread', 'cv2.imread', (['self.imgs_path[index]'], {}), '(self.imgs_path[index])\n', (1333, 1356), False, 'import cv2\n'), ((1492, 1509), 'numpy.zeros', 'np.zeros', (['(0, 21)'], {}), '((0, 21))\n', (1500, 1509), True, 'import numpy as np\n'), ((2906, 2927), 'numpy.array', 'np.array', (['annotations'], {}), '(annotations)\n', (2914, 2927), True, 'import numpy as np\n'), ((8219, 8239), 'torch.stack', 'torch.stack', (['imgs', '(0)'], {}), '(imgs, 0)\n', (8230, 8239), False, 'import torch\n'), ((1720, 1737), 'numpy.zeros', 'np.zeros', (['(1, 21)'], {}), '((1, 21))\n', (1728, 1737), True, 'import numpy as np\n'), ((2846, 2888), 'numpy.append', 'np.append', (['annotations', 'annotation'], {'axis': '(0)'}), '(annotations, annotation, axis=0)\n', (2855, 2888), True, 'import numpy as np\n'), ((6876, 6897), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (6892, 6897), False, 'import torch\n'), ((8006, 8026), 'torch.is_tensor', 'torch.is_tensor', (['tup'], {}), '(tup)\n', (8021, 8026), False, 'import torch\n'), ((3059, 3075), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3073, 3075), True, 'import numpy as np\n'), ((3214, 3250), 'cv2.imread', 'cv2.imread', (['self.imgs_path[rand_idx]'], {}), '(self.imgs_path[rand_idx])\n', (3224, 3250), False, 'import cv2\n'), ((3535, 3552), 'numpy.zeros', 'np.zeros', (['(0, 21)'], {}), '((0, 21))\n', (3543, 3552), True, 'import numpy as np\n'), ((8099, 8110), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (8107, 8110), True, 'import numpy as np\n'), ((3648, 3665), 'numpy.zeros', 'np.zeros', (['(1, 21)'], {}), '((1, 21))\n', (3656, 3665), True, 'import numpy as np\n'), ((5303, 5355), 'numpy.append', 'np.append', (['annotations_rand', 'annotation_rand'], {'axis': '(0)'}), '(annotations_rand, annotation_rand, axis=0)\n', (5312, 5355), True, 'import numpy as np\n'), ((5494, 5518), 'random.uniform', 'random.uniform', (['(0.4)', '(0.6)'], {}), '(0.4, 0.6)\n', (5508, 5518), False, 'import random\n'), ((5676, 5700), 'random.randint', 'random.randint', (['(0)', 'width'], {}), '(0, width)\n', (5690, 5700), False, 'import random\n'), ((5742, 5767), 'random.randint', 'random.randint', (['(0)', 'height'], {}), '(0, height)\n', (5756, 5767), False, 'import random\n'), ((5807, 5896), 'numpy.array', 'np.array', (['[top_left_x, top_left_y, top_left_x + width_rand, top_left_y + height_rand]'], {}), '([top_left_x, top_left_y, top_left_x + width_rand, top_left_y +\n height_rand])\n', (5815, 5896), True, 'import numpy as np\n'), ((8138, 8159), 'torch.from_numpy', 'torch.from_numpy', (['tup'], {}), '(tup)\n', (8154, 8159), False, 'import torch\n'), ((6437, 6484), 'cv2.resize', 'cv2.resize', (['rand_img', '(width_rand, height_rand)'], {}), '(rand_img, (width_rand, height_rand))\n', (6447, 6484), False, 'import cv2\n'), ((6671, 6697), 'numpy.array', 'np.array', (['annotations_rand'], {}), '(annotations_rand)\n', (6679, 6697), True, 'import numpy as np\n'), ((6333, 6367), 'numpy.array', 'np.array', (['[top_left_x, top_left_y]'], {}), '([top_left_x, top_left_y])\n', (6341, 6367), True, 'import numpy as np\n')] |
import numpy as np
import open3d as o3d
from .line_mesh import LineMesh
EXTRINSICS = None
MAX_POLYS = 10
ORANGE = (255 / 255, 188 / 255, 0)
GREEN = (0, 255 / 255, 0)
def flatten(l): return [item for sublist in l for item in sublist]
def update_points(pcd, pc):
pcd.points = o3d.utility.Vector3dVector(pc)
def set_line(line_set, points, lines, colors):
line_set.points = o3d.utility.Vector3dVector(points)
line_set.lines = o3d.utility.Vector2iVector(lines)
line_set.colors = o3d.utility.Vector3dVector(colors)
def construct_grid(size=10, n=10, color=[0.5, 0.5, 0.5], plane='xy', plane_offset=-1, translate=[0, 0, 0]):
grid_ls = o3d.geometry.LineSet()
my_grid = make_grid(size=size, n=n, color=color, plane=plane, plane_offset=plane_offset, translate=translate)
set_line(grid_ls, *my_grid)
return grid_ls
def make_grid(size=10, n=10, color=[0.5, 0.5, 0.5], plane='xy', plane_offset=-1, translate=[0, 0, 0]):
"""draw a grid as a line set"""
# lineset = o3d.geometry.LineSet()
s = size / float(n)
s2 = 0.5 * size
points = []
for i in range(0, n + 1):
x = -s2 + i * s
points.append([x, -s2, plane_offset])
points.append([x, s2, plane_offset])
for i in range(0, n + 1):
z = -s2 + i * s
points.append([-s2, z, plane_offset])
points.append([s2, z, plane_offset])
points = np.array(points)
if plane == 'xz':
points[:, [2, 1]] = points[:, [1, 2]]
points = points + translate
n_points = points.shape[0]
lines = [[i, i + 1] for i in range(0, n_points - 1, 2)]
colors = [list(color)] * (n_points - 1)
return points, lines, colors
def clear_polys(all_polys, vis):
for line_mesh in all_polys:
line_mesh.remove_line(vis)
return []
def handle_shapes(vis, planes, obstacles, all_polys, line_radius=0.15):
all_polys = clear_polys(all_polys, vis)
for plane, _ in planes:
points = np.array(plane.exterior)
line_mesh = LineMesh(points, colors=GREEN, radius=line_radius)
line_mesh.add_line(vis)
all_polys.append(line_mesh)
for plane, _ in obstacles:
points = np.array(plane.exterior)
line_mesh = LineMesh(points, colors=ORANGE, radius=line_radius)
line_mesh.add_line(vis)
all_polys.append(line_mesh)
return all_polys
def create_lines(planes, obstacles, line_radius=0.15, rotate_func=None):
all_polys = []
for plane, _ in planes:
points = np.array(plane.exterior)
if rotate_func:
points = rotate_func(points)
line_mesh = LineMesh(points, colors=GREEN, radius=line_radius)
all_polys.append(line_mesh)
for plane, _ in obstacles:
points = np.array(plane.exterior)
if rotate_func:
points = rotate_func(points)
line_mesh = LineMesh(points, colors=ORANGE, radius=line_radius)
all_polys.append(line_mesh)
return all_polys
def get_extrinsics(vis):
ctr = vis.get_view_control()
camera_params = ctr.convert_to_pinhole_camera_parameters()
return camera_params.extrinsic
def set_initial_view(vis, extrinsics=[EXTRINSICS]):
ctr = vis.get_view_control()
camera_params = ctr.convert_to_pinhole_camera_parameters()
camera_params.extrinsic = extrinsics
ctr.convert_from_pinhole_camera_parameters(camera_params)
| [
"numpy.array",
"open3d.utility.Vector2iVector",
"open3d.geometry.LineSet",
"open3d.utility.Vector3dVector"
] | [((284, 314), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['pc'], {}), '(pc)\n', (310, 314), True, 'import open3d as o3d\n'), ((386, 420), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (412, 420), True, 'import open3d as o3d\n'), ((442, 475), 'open3d.utility.Vector2iVector', 'o3d.utility.Vector2iVector', (['lines'], {}), '(lines)\n', (468, 475), True, 'import open3d as o3d\n'), ((498, 532), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (524, 532), True, 'import open3d as o3d\n'), ((657, 679), 'open3d.geometry.LineSet', 'o3d.geometry.LineSet', ([], {}), '()\n', (677, 679), True, 'import open3d as o3d\n'), ((1391, 1407), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1399, 1407), True, 'import numpy as np\n'), ((1957, 1981), 'numpy.array', 'np.array', (['plane.exterior'], {}), '(plane.exterior)\n', (1965, 1981), True, 'import numpy as np\n'), ((2170, 2194), 'numpy.array', 'np.array', (['plane.exterior'], {}), '(plane.exterior)\n', (2178, 2194), True, 'import numpy as np\n'), ((2496, 2520), 'numpy.array', 'np.array', (['plane.exterior'], {}), '(plane.exterior)\n', (2504, 2520), True, 'import numpy as np\n'), ((2742, 2766), 'numpy.array', 'np.array', (['plane.exterior'], {}), '(plane.exterior)\n', (2750, 2766), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
N = 500
imsized = 10
def cMap1():
v = 10
k = 256
vals = np.ones((k, 4))
vals[:, 0] = np.array([(i % v)/v for i in range(k)])
vals[:, 1] = np.array([((i + 5) % v)/v for i in range(k)])
vals[:, 2] = np.array([((i + 7) % v)/v for i in range(k)])
newcmp = ListedColormap(vals)
return newcmp
def cMap2():
colors = [(234/255, 230/255, 202/255),
(114/255, 0, 0),
(234/255, 230/255, 202/255),
(114/255, 0, 0),
(234/255, 230/255, 202/255),
(114/255, 0, 0),
(30/255, 23/255, 20/255),
(234/255, 230/255, 202/255),
(114/255, 0, 0),
(30/255, 23/255, 20/255),
(234/255, 230/255, 202/255),
(30/255, 23/255, 20/255),
(114/255, 0, 0)] # R -> G -> B
cmap = LinearSegmentedColormap.from_list('my_list', colors, N=40)
return cmap
def display(mesh):
cmap = 'twilight_r'
#cmap = cMap2()
plt.figure(num = None, figsize=(imsized, imsized), dpi=300)
plt.axis('off')
#plot = plt.imshow(mesh, cmap = cmap, interpolation='lanczos' )
plot = plt.imshow(mesh, cmap = cmap, interpolation='lanczos')
####
filenameImage = f'test{N}_{cmap}.png'
plt.savefig(filenameImage, bbox_inches = 'tight')
####
plt.show()
plt.close()
if __name__ == '__main__':
mesh = np.load('ArrNP500_300_0.7_0.1_(-1-0.5j)_(-1+0.2j)_(-0.5-0.5j)_(-0.5+0.3j).npy')
display(mesh) | [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig",
"numpy.ones",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"numpy.load",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.show"
] | [((258, 273), 'numpy.ones', 'np.ones', (['(k, 4)'], {}), '((k, 4))\n', (265, 273), True, 'import numpy as np\n'), ((470, 490), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['vals'], {}), '(vals)\n', (484, 490), False, 'from matplotlib.colors import ListedColormap, LinearSegmentedColormap\n'), ((1041, 1099), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""my_list"""', 'colors'], {'N': '(40)'}), "('my_list', colors, N=40)\n", (1074, 1099), False, 'from matplotlib.colors import ListedColormap, LinearSegmentedColormap\n'), ((1186, 1243), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'num': 'None', 'figsize': '(imsized, imsized)', 'dpi': '(300)'}), '(num=None, figsize=(imsized, imsized), dpi=300)\n', (1196, 1243), True, 'import matplotlib.pyplot as plt\n'), ((1251, 1266), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1259, 1266), True, 'import matplotlib.pyplot as plt\n'), ((1347, 1399), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mesh'], {'cmap': 'cmap', 'interpolation': '"""lanczos"""'}), "(mesh, cmap=cmap, interpolation='lanczos')\n", (1357, 1399), True, 'import matplotlib.pyplot as plt\n'), ((1459, 1506), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filenameImage'], {'bbox_inches': '"""tight"""'}), "(filenameImage, bbox_inches='tight')\n", (1470, 1506), True, 'import matplotlib.pyplot as plt\n'), ((1524, 1534), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1532, 1534), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1550), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1548, 1550), True, 'import matplotlib.pyplot as plt\n'), ((1593, 1672), 'numpy.load', 'np.load', (['"""ArrNP500_300_0.7_0.1_(-1-0.5j)_(-1+0.2j)_(-0.5-0.5j)_(-0.5+0.3j).npy"""'], {}), "('ArrNP500_300_0.7_0.1_(-1-0.5j)_(-1+0.2j)_(-0.5-0.5j)_(-0.5+0.3j).npy')\n", (1600, 1672), True, 'import numpy as np\n')] |
"""
Divergence metric between two scores based on size of subgraph isomorphism. If
two DAGs are the exact same, the subgraph isomorphism will be of maximum size
and node divergence and edge divergence will be zero.
"""
import sys
import os
import json
import argparse
import numpy as np
import networkx as nx
def get_flow(f):
return json.load(f)["flow"]
def simplify_flow(flow):
if isinstance(flow, str):
flow = json.loads(flow)
s_flow = {}
for node in flow:
s_node = dict(**node)
s_node["wires"] = s_node.get("wires", [])
if len(s_node["wires"]) > 0 and isinstance(s_node["wires"][0], list):
s_node["wires"] = sum(s_node["wires"], [])
s_flow[s_node["id"]] = s_node
return s_flow
def num_nodes(flow):
return len(flow.keys())
def num_edges(flow):
return sum(len(v["wires"]) for v in flow.values())
def has_edge(flow, k1, k2):
return k2 in flow[k1]["wires"]
def edge_to_string(k1, k2):
return " --> ".join([k1, k2])
def string_to_edge(s):
return tuple(s.split(" --> "))
def get_node_similarity(node1, node2):
if node1["type"] != node2["type"]:
return 0
__skip_compares__ = set(
[
"id",
"x",
"y",
"z",
"wires",
"type",
"endpointUrl", # for bot-intent type nodes
"name", # for ui_ nodes
"group", # for ui_ nodes
"tab", # for all nodes
"label", # for tab nodes
]
)
num = 0
den = 0
inc = 0
for x in node1.keys():
if x in __skip_compares__:
continue
den += 1
inc = 1
val1 = node1.get(x, None)
val2 = node2.get(x, None)
if (val1 is None) ^ (val2 is None):
inc = 0
elif not isinstance(val1, type(val1)) and not isinstance(val1, type(val2)):
inc = 0
elif val1 != val2:
inc = 0
num += inc
if den == 0 or num == den:
return 1
else:
return num / den
def mapping_weight(node1, node2):
# only makes sense to compare nodes of the same type
# can add additional conditions here if needed
try:
mnode1 = {k: v for k, v in node1.items() if k != "wires"}
mnode2 = {k: v for k, v in node2.items() if k != "wires"}
ans = get_node_similarity(mnode1, mnode2)
except Exception as e:
print("Comparison Exception:", e)
print(
"comparing",
json.dumps(node1, indent=2),
"\nand\n",
json.dumps(node2, indent=2),
)
ans = 0
return ans
def get_nodemap(flow1, flow2):
nodemap = []
for k1, v1 in flow1.items():
for k2, v2 in flow2.items():
wt = mapping_weight(v1, v2)
if wt > 0:
nodemap.append((k1, k2, wt))
nodemap.sort(key=lambda x: (
len(flow1[x[0]]["wires"]) + len(flow2[x[1]]["wires"])))
return nodemap
def create_product_graph(nmap, flow1, flow2):
prodgraph = set()
for k1a, k2a, wta in nmap:
for k1b, k2b, wtb in nmap:
# assert one-to-one mapping
if k1a == k1b or k2a == k2b:
continue
# is there is an edge between the two nodes in flow1?
e_a = has_edge(flow1, k1a, k1b)
# is there is an edge between the corresponding two nodes in flow2?
e_b = has_edge(flow2, k2a, k2b)
if not (e_a ^ e_b):
# if (k1a, k1b) ⇔ (k2a, k2b), AND
# the mapped nodes are of the same type,
# add edge to product graph
ind1 = nmap.index((k1a, k2a, wta))
ind2 = nmap.index((k1b, k2b, wtb))
edge = (min(ind1, ind2), max(ind1, ind2))
prodgraph.add(edge)
return list(prodgraph)
def density(pgraph, nmap):
return (2 * len(pgraph)) / (len(nmap) * (len(nmap) - 1))
def check_clique(pgraph, clq):
for i in clq:
for j in clq:
if (i != j) and (i, j) not in pgraph:
return False
return True
def large_graph_corr(pgraph, nmap, flow1, flow2):
pg_arr = np.array(pgraph, dtype=np.uint64) + 1
# runtime error if vertex numbers has 0, so add 1 and subtract when finding subset
import cliquematch
G = cliquematch.Graph.from_edgelist(pg_arr, len(nmap))
exact = True
dens = density(pgraph, nmap)
if dens > 0.7:
# highly dense graphs => node mapping is not strict enough,
# (too many nodes of same type) so computing the exact value is SLOW
# hence approximate via heuristic (some form of penalty)
clique0 = G.get_max_clique(use_heuristic=True, use_dfs=False)
# note that the approximate clique is <= the exact clique
exact = False
else:
clique0 = G.get_max_clique(use_heuristic=True, use_dfs=True)
clique = max(
G.all_cliques(size=len(clique0)), key=setup_weighted_clique(nmap, flow1, flow2)
)
subset = [nmap[i - 1] for i in clique]
return subset, exact
def setup_weighted_clique(nmap, flow1, flow2):
def clique_wt(clq):
wts = [nmap[x - 1][2] for x in clq]
return sum(wts)
return clique_wt
def small_graph_corr(pgraph, nmap, flow1, flow2):
G = nx.Graph()
G.add_nodes_from(i + 1 for i in range(len(nmap)))
G.add_edges_from([(a + 1, b + 1) for a, b in pgraph])
clique = max(
nx.algorithms.clique.find_cliques(G),
key=setup_weighted_clique(nmap, flow1, flow2),
)
subset = [nmap[x - 1] for x in clique]
return subset, True
def find_correspondence(pgraph, nmap, flow1, flow2):
if len(pgraph) == 0 and len(nmap) == 0:
return [], True
elif len(pgraph) < 2000:
return small_graph_corr(pgraph, nmap, flow1, flow2)
else:
return large_graph_corr(pgraph, nmap, flow1, flow2)
def get_mapped_edges(subset, flow1, flow2):
mapped_edges = {}
for k1a, k2a, wta in subset:
for k1b, k2b, wtb in subset:
if k1a == k1b or k2a == k2b:
continue
# is there is an edge between the two nodes in flow1?
e_a = has_edge(flow1, k1a, k1b)
# is there is an edge between the corresponding two nodes in flow2?
e_b = has_edge(flow2, k2a, k2b)
if e_a and e_b:
# successfully mapped the edge
mapped_edges[edge_to_string(
k1a, k1b)] = edge_to_string(k2a, k2b)
return mapped_edges
def edge_similarity(edgemap, nodemap, flow1, flow2):
if num_edges(flow1) != 0 and num_edges(flow2) != 0:
return (len(edgemap) / num_edges(flow1)) * (len(edgemap) / num_edges(flow2))
else:
return 0
def node_similarity(subset, nodemap, flow1, flow2):
if num_nodes(flow1) != 0 and num_nodes(flow2) != 0:
score = sum(x[2] for x in subset)
answer = (score / num_nodes(flow1)) * (score / num_nodes(flow2))
return answer
else:
return 0
def get_divergence(full1, full2, edges_only=True):
flow1 = simplify_flow(full1)
flow2 = simplify_flow(full2)
nmap = get_nodemap(flow1, flow2)
pg = create_product_graph(nmap, flow1, flow2)
corr, exact = find_correspondence(pg, nmap, flow1, flow2)
emap = get_mapped_edges(corr, flow1, flow2)
# print(f"{num_nodes(flow1)} nodes, {num_edges(flow1)} edges in flow1")
# print(f"{num_nodes(flow2)} nodes, {num_edges(flow2)} edges in flow2")
# print(len(emap), "edges mapped")
ns = node_similarity(corr, nmap, flow1, flow2)
es = edge_similarity(emap, nmap, flow1, flow2)
if edges_only:
return 1 - es
else:
return (1 - ns, 1 - es, exact)
def node_divergence(flow1, flow2):
return get_divergence(flow1, flow2, False)[0]
def edge_divergence(flow1, flow2):
return get_divergence(flow1, flow2, True)[1]
def runner(file1, file2):
divergence = get_divergence(get_flow(file1), get_flow(file2), False)[0]
return divergence
| [
"json.loads",
"json.dumps",
"networkx.Graph",
"numpy.array",
"json.load",
"networkx.algorithms.clique.find_cliques"
] | [((5362, 5372), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (5370, 5372), True, 'import networkx as nx\n'), ((342, 354), 'json.load', 'json.load', (['f'], {}), '(f)\n', (351, 354), False, 'import json\n'), ((435, 451), 'json.loads', 'json.loads', (['flow'], {}), '(flow)\n', (445, 451), False, 'import json\n'), ((4233, 4266), 'numpy.array', 'np.array', (['pgraph'], {'dtype': 'np.uint64'}), '(pgraph, dtype=np.uint64)\n', (4241, 4266), True, 'import numpy as np\n'), ((5511, 5547), 'networkx.algorithms.clique.find_cliques', 'nx.algorithms.clique.find_cliques', (['G'], {}), '(G)\n', (5544, 5547), True, 'import networkx as nx\n'), ((2543, 2570), 'json.dumps', 'json.dumps', (['node1'], {'indent': '(2)'}), '(node1, indent=2)\n', (2553, 2570), False, 'import json\n'), ((2607, 2634), 'json.dumps', 'json.dumps', (['node2'], {'indent': '(2)'}), '(node2, indent=2)\n', (2617, 2634), False, 'import json\n')] |
import numpy as np
import mc3
def quad(p, x):
"""
Quadratic polynomial function.
Parameters
p: Polynomial constant, linear, and quadratic coefficients.
x: Array of dependent variables where to evaluate the polynomial.
Returns
y: Polinomial evaluated at x: y = p0 + p1*x + p2*x^2
"""
y = p[0] + p[1]*x + p[2]*x**2.0
return y
# For the sake of example, create a noisy synthetic dataset, in a real
# scenario you would get your dataset from your data analysis pipeline:
np.random.seed(3)
x = np.linspace(0, 10, 100)
p_true = [3.0, -2.4, 0.5]
y = quad(p_true, x)
uncert = np.sqrt(np.abs(y))
data = y + np.random.normal(0, uncert)
# Initial guess for fitting parameters:
params = np.array([10.0, -2.0, 0.1])
pstep = np.array([0.03, 0.03, 0.05])
# Run the MCMC:
func = quad
mc3_results = mc3.sample(data, uncert, func, params, indparams=[x],
pstep=pstep, sampler='snooker', nsamples=1e5, burnin=1000, ncpu=7)
# And now, some post processing:
import mc3.plots as mp
import mc3.utils as mu
# Output dict contains the entire sample (posterior), need to remove burn-in:
posterior, zchain, zmask = mu.burn(mc3_results)
bestp = mc3_results['bestp']
# Set parameter names:
pnames = ["constant", "linear", "quadratic"]
# Plot best-fitting model and binned data:
mp.modelfit(data, uncert, x, y, savefile="quad_bestfit.png")
# Plot trace plot:
mp.trace(posterior, zchain, pnames=pnames, savefile="quad_trace.png")
# Plot pairwise posteriors:
mp.pairwise(posterior, pnames=pnames, bestp=bestp, savefile="quad_pairwise.png")
# Plot marginal posterior histograms (with 68% highest-posterior-density credible regions):
mp.histogram(posterior, pnames=pnames, bestp=bestp, quantile=0.683,
savefile="quad_hist.png")
| [
"numpy.random.normal",
"numpy.abs",
"mc3.utils.burn",
"numpy.array",
"numpy.linspace",
"mc3.plots.modelfit",
"mc3.sample",
"numpy.random.seed",
"mc3.plots.trace",
"mc3.plots.pairwise",
"mc3.plots.histogram"
] | [((524, 541), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (538, 541), True, 'import numpy as np\n'), ((546, 569), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (557, 569), True, 'import numpy as np\n'), ((733, 760), 'numpy.array', 'np.array', (['[10.0, -2.0, 0.1]'], {}), '([10.0, -2.0, 0.1])\n', (741, 760), True, 'import numpy as np\n'), ((770, 798), 'numpy.array', 'np.array', (['[0.03, 0.03, 0.05]'], {}), '([0.03, 0.03, 0.05])\n', (778, 798), True, 'import numpy as np\n'), ((842, 972), 'mc3.sample', 'mc3.sample', (['data', 'uncert', 'func', 'params'], {'indparams': '[x]', 'pstep': 'pstep', 'sampler': '"""snooker"""', 'nsamples': '(100000.0)', 'burnin': '(1000)', 'ncpu': '(7)'}), "(data, uncert, func, params, indparams=[x], pstep=pstep, sampler=\n 'snooker', nsamples=100000.0, burnin=1000, ncpu=7)\n", (852, 972), False, 'import mc3\n'), ((1154, 1174), 'mc3.utils.burn', 'mu.burn', (['mc3_results'], {}), '(mc3_results)\n', (1161, 1174), True, 'import mc3.utils as mu\n'), ((1316, 1376), 'mc3.plots.modelfit', 'mp.modelfit', (['data', 'uncert', 'x', 'y'], {'savefile': '"""quad_bestfit.png"""'}), "(data, uncert, x, y, savefile='quad_bestfit.png')\n", (1327, 1376), True, 'import mc3.plots as mp\n'), ((1397, 1466), 'mc3.plots.trace', 'mp.trace', (['posterior', 'zchain'], {'pnames': 'pnames', 'savefile': '"""quad_trace.png"""'}), "(posterior, zchain, pnames=pnames, savefile='quad_trace.png')\n", (1405, 1466), True, 'import mc3.plots as mp\n'), ((1496, 1581), 'mc3.plots.pairwise', 'mp.pairwise', (['posterior'], {'pnames': 'pnames', 'bestp': 'bestp', 'savefile': '"""quad_pairwise.png"""'}), "(posterior, pnames=pnames, bestp=bestp, savefile='quad_pairwise.png'\n )\n", (1507, 1581), True, 'import mc3.plots as mp\n'), ((1670, 1767), 'mc3.plots.histogram', 'mp.histogram', (['posterior'], {'pnames': 'pnames', 'bestp': 'bestp', 'quantile': '(0.683)', 'savefile': '"""quad_hist.png"""'}), "(posterior, pnames=pnames, bestp=bestp, quantile=0.683,\n savefile='quad_hist.png')\n", (1682, 1767), True, 'import mc3.plots as mp\n'), ((633, 642), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (639, 642), True, 'import numpy as np\n'), ((655, 682), 'numpy.random.normal', 'np.random.normal', (['(0)', 'uncert'], {}), '(0, uncert)\n', (671, 682), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from numpy.testing import assert_almost_equal as aae
from spectra import SticksSpectrum
def setup():
pass
def teardown():
pass
def test_init():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities, units="ms", style="IR", y_shift=-5, time=9)
aae(s1.energies, energies)
aae(s1.intensities, intensities)
assert s1.units == "ms"
assert s1.style == "IR"
assert s1.y_shift == -5
assert s1.time == 9
def test_iter():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
assert all(e == i for e, i in s1)
def test_eq():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("S1", energies, intensities)
s2 = SticksSpectrum("S1", energies, intensities)
s3 = SticksSpectrum("S1", energies, intensities, style="MS")
s4 = SticksSpectrum("S4", energies, intensities)
s5 = SticksSpectrum("S5", energies, intensities, y_shift=6)
assert s1 == s2
assert s1 != s3
assert s1 != s4
assert s1 != s5
def test_len():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("S1", energies, intensities)
s2 = SticksSpectrum("S1", energies, intensities)
assert len(s1) == len(energies)
assert len(s2) == len(energies)
def test_str():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
assert str(s1) == "<SticksSpectrum: Hello World>"
def test_add_sub():
energies1, intensities1 = np.arange(10), np.arange(10)
energies2, intensities2 = np.arange(20), np.arange(20)
s1 = SticksSpectrum("Hello World", energies1, intensities1)
s1 + s1
s2 = 1 + s1
s3 = s2 - 1
s4 = 1 - s3
s5 = s1 - s1
s6 = s1 - s2
s7 = SticksSpectrum("Hello Big World", energies2, intensities2)
s1 + s7
s1 - s7
s = s1.copy()
s.energies += 1
s + s1
s - s1
assert s1.name == "Hello World"
assert s2.name == "Hello World + 1"
assert s3.name == "Hello World + 1 – 1"
assert s4.name == "1 – Hello World + 1 – 1"
assert s5.name == "Hello World – Hello World"
assert s6.name == "Hello World – Hello World + 1"
aae(s1.energies, s2.energies)
aae(s1.energies, s3.energies)
aae(s1.energies, s4.energies)
aae(s3.intensities, s1.intensities)
def test_abs():
energies, intensities1, intensities2 = np.arange(10), np.arange(10), np.arange(10)
intensities2[5:] = -intensities2[5:]
s1 = SticksSpectrum("S1", energies, intensities1)
s2 = SticksSpectrum("S2", energies, intensities2)
assert s1 != s2
assert any(s1.intensities != s2.intensities)
aae(s1.intensities, abs(s2).intensities)
def test_mul():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("S1", energies, intensities)
s1 * s1
def test_div():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("S1", energies, intensities)
div = s1 / s1
aae(div.energies, range(10))
aae(div.intensities, [np.nan] + [1] * 9)
def test_copy():
energies, intensities = np.arange(1, 11), np.arange(1, 11)
s1 = SticksSpectrum("Hello World", energies, intensities)
s2 = s1.copy()
assert s1 == s2
assert id(s1) != id(s2)
def test_domain():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
assert s1.domain == (0, 9)
@pytest.mark.xfail(raises=NotImplementedError)
def test_smoothed():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.smoothed()
def test_baseline_subtracted():
energies, intensities = np.arange(1, 11), np.arange(1, 11)
s1 = SticksSpectrum("Hello World", energies, intensities)
s2 = s1.baseline_subtracted()
s3 = s1.baseline_subtracted(9)
aae(s1.intensities - 1, s2.intensities)
aae(s1.intensities - 9, s3.intensities)
@pytest.mark.xfail(raises=NotImplementedError)
def test_set_zero():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.set_zero(99)
def test_sliced():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.sliced()
def test_from_csvs(tmp_path):
test_csv = f"{tmp_path}/test.csv"
with open(test_csv, "w") as f:
f.write("x,A,B\n0,2,4\n1,3,5")
SticksSpectrum.from_csvs(test_csv)
SticksSpectrum.from_csvs("tests/files/xrd.csv")
@pytest.mark.xfail(raises=NotImplementedError)
def test_norm():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.norm()
def test_normed():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.normed()
@pytest.mark.xfail(raises=NotImplementedError)
def test_peaks():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.peaks()
def test_min_max():
s1 = SticksSpectrum.from_csvs("tests/files/spectrum1.csv")[0]
assert min(s1) == (5, 0)
assert max(s1) == (25, 0)
assert s1.min == (16, -10)
assert s1.max == (13, 21)
@pytest.mark.xfail(raises=NotImplementedError)
def test_correlation():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.correlation(s1)
def test_convert():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.convert(2, npoints=100)
s1.convert(2, npoints=100, energy_lim=(-5, 50))
| [
"spectra.SticksSpectrum.from_csvs",
"pytest.mark.xfail",
"numpy.testing.assert_almost_equal",
"spectra.SticksSpectrum",
"numpy.arange"
] | [((3586, 3631), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'NotImplementedError'}), '(raises=NotImplementedError)\n', (3603, 3631), False, 'import pytest\n'), ((4111, 4156), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'NotImplementedError'}), '(raises=NotImplementedError)\n', (4128, 4156), False, 'import pytest\n'), ((4713, 4758), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'NotImplementedError'}), '(raises=NotImplementedError)\n', (4730, 4758), False, 'import pytest\n'), ((5070, 5115), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'NotImplementedError'}), '(raises=NotImplementedError)\n', (5087, 5115), False, 'import pytest\n'), ((5481, 5526), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'NotImplementedError'}), '(raises=NotImplementedError)\n', (5498, 5526), False, 'import pytest\n'), ((258, 358), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies', 'intensities'], {'units': '"""ms"""', 'style': '"""IR"""', 'y_shift': '(-5)', 'time': '(9)'}), "('Hello World', energies, intensities, units='ms', style='IR',\n y_shift=-5, time=9)\n", (272, 358), False, 'from spectra import SticksSpectrum\n'), ((359, 385), 'numpy.testing.assert_almost_equal', 'aae', (['s1.energies', 'energies'], {}), '(s1.energies, energies)\n', (362, 385), True, 'from numpy.testing import assert_almost_equal as aae\n'), ((390, 422), 'numpy.testing.assert_almost_equal', 'aae', (['s1.intensities', 'intensities'], {}), '(s1.intensities, intensities)\n', (393, 422), True, 'from numpy.testing import assert_almost_equal as aae\n'), ((616, 668), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies', 'intensities'], {}), "('Hello World', energies, intensities)\n", (630, 668), False, 'from spectra import SticksSpectrum\n'), ((790, 833), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""S1"""', 'energies', 'intensities'], {}), "('S1', energies, intensities)\n", (804, 833), False, 'from spectra import SticksSpectrum\n'), ((843, 886), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""S1"""', 'energies', 'intensities'], {}), "('S1', energies, intensities)\n", (857, 886), False, 'from spectra import SticksSpectrum\n'), ((896, 951), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""S1"""', 'energies', 'intensities'], {'style': '"""MS"""'}), "('S1', energies, intensities, style='MS')\n", (910, 951), False, 'from spectra import SticksSpectrum\n'), ((961, 1004), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""S4"""', 'energies', 'intensities'], {}), "('S4', energies, intensities)\n", (975, 1004), False, 'from spectra import SticksSpectrum\n'), ((1014, 1068), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""S5"""', 'energies', 'intensities'], {'y_shift': '(6)'}), "('S5', energies, intensities, y_shift=6)\n", (1028, 1068), False, 'from spectra import SticksSpectrum\n'), ((1234, 1277), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""S1"""', 'energies', 'intensities'], {}), "('S1', energies, intensities)\n", (1248, 1277), False, 'from spectra import SticksSpectrum\n'), ((1287, 1330), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""S1"""', 'energies', 'intensities'], {}), "('S1', energies, intensities)\n", (1301, 1330), False, 'from spectra import SticksSpectrum\n'), ((1488, 1540), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies', 'intensities'], {}), "('Hello World', energies, intensities)\n", (1502, 1540), False, 'from spectra import SticksSpectrum\n'), ((1745, 1799), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies1', 'intensities1'], {}), "('Hello World', energies1, intensities1)\n", (1759, 1799), False, 'from spectra import SticksSpectrum\n'), ((1904, 1962), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello Big World"""', 'energies2', 'intensities2'], {}), "('Hello Big World', energies2, intensities2)\n", (1918, 1962), False, 'from spectra import SticksSpectrum\n'), ((2327, 2356), 'numpy.testing.assert_almost_equal', 'aae', (['s1.energies', 's2.energies'], {}), '(s1.energies, s2.energies)\n', (2330, 2356), True, 'from numpy.testing import assert_almost_equal as aae\n'), ((2361, 2390), 'numpy.testing.assert_almost_equal', 'aae', (['s1.energies', 's3.energies'], {}), '(s1.energies, s3.energies)\n', (2364, 2390), True, 'from numpy.testing import assert_almost_equal as aae\n'), ((2395, 2424), 'numpy.testing.assert_almost_equal', 'aae', (['s1.energies', 's4.energies'], {}), '(s1.energies, s4.energies)\n', (2398, 2424), True, 'from numpy.testing import assert_almost_equal as aae\n'), ((2429, 2464), 'numpy.testing.assert_almost_equal', 'aae', (['s3.intensities', 's1.intensities'], {}), '(s3.intensities, s1.intensities)\n', (2432, 2464), True, 'from numpy.testing import assert_almost_equal as aae\n'), ((2620, 2664), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""S1"""', 'energies', 'intensities1'], {}), "('S1', energies, intensities1)\n", (2634, 2664), False, 'from spectra import SticksSpectrum\n'), ((2674, 2718), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""S2"""', 'energies', 'intensities2'], {}), "('S2', energies, intensities2)\n", (2688, 2718), False, 'from spectra import SticksSpectrum\n'), ((2918, 2961), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""S1"""', 'energies', 'intensities'], {}), "('S1', energies, intensities)\n", (2932, 2961), False, 'from spectra import SticksSpectrum\n'), ((3059, 3102), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""S1"""', 'energies', 'intensities'], {}), "('S1', energies, intensities)\n", (3073, 3102), False, 'from spectra import SticksSpectrum\n'), ((3159, 3199), 'numpy.testing.assert_almost_equal', 'aae', (['div.intensities', '([np.nan] + [1] * 9)'], {}), '(div.intensities, [np.nan] + [1] * 9)\n', (3162, 3199), True, 'from numpy.testing import assert_almost_equal as aae\n'), ((3291, 3343), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies', 'intensities'], {}), "('Hello World', energies, intensities)\n", (3305, 3343), False, 'from spectra import SticksSpectrum\n'), ((3498, 3550), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies', 'intensities'], {}), "('Hello World', energies, intensities)\n", (3512, 3550), False, 'from spectra import SticksSpectrum\n'), ((3719, 3771), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies', 'intensities'], {}), "('Hello World', energies, intensities)\n", (3733, 3771), False, 'from spectra import SticksSpectrum\n'), ((3897, 3949), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies', 'intensities'], {}), "('Hello World', energies, intensities)\n", (3911, 3949), False, 'from spectra import SticksSpectrum\n'), ((4024, 4063), 'numpy.testing.assert_almost_equal', 'aae', (['(s1.intensities - 1)', 's2.intensities'], {}), '(s1.intensities - 1, s2.intensities)\n', (4027, 4063), True, 'from numpy.testing import assert_almost_equal as aae\n'), ((4068, 4107), 'numpy.testing.assert_almost_equal', 'aae', (['(s1.intensities - 9)', 's3.intensities'], {}), '(s1.intensities - 9, s3.intensities)\n', (4071, 4107), True, 'from numpy.testing import assert_almost_equal as aae\n'), ((4244, 4296), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies', 'intensities'], {}), "('Hello World', energies, intensities)\n", (4258, 4296), False, 'from spectra import SticksSpectrum\n'), ((4405, 4457), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies', 'intensities'], {}), "('Hello World', energies, intensities)\n", (4419, 4457), False, 'from spectra import SticksSpectrum\n'), ((4623, 4657), 'spectra.SticksSpectrum.from_csvs', 'SticksSpectrum.from_csvs', (['test_csv'], {}), '(test_csv)\n', (4647, 4657), False, 'from spectra import SticksSpectrum\n'), ((4662, 4709), 'spectra.SticksSpectrum.from_csvs', 'SticksSpectrum.from_csvs', (['"""tests/files/xrd.csv"""'], {}), "('tests/files/xrd.csv')\n", (4686, 4709), False, 'from spectra import SticksSpectrum\n'), ((4842, 4894), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies', 'intensities'], {}), "('Hello World', energies, intensities)\n", (4856, 4894), False, 'from spectra import SticksSpectrum\n'), ((4997, 5049), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies', 'intensities'], {}), "('Hello World', energies, intensities)\n", (5011, 5049), False, 'from spectra import SticksSpectrum\n'), ((5200, 5252), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies', 'intensities'], {}), "('Hello World', energies, intensities)\n", (5214, 5252), False, 'from spectra import SticksSpectrum\n'), ((5617, 5669), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies', 'intensities'], {}), "('Hello World', energies, intensities)\n", (5631, 5669), False, 'from spectra import SticksSpectrum\n'), ((5782, 5834), 'spectra.SticksSpectrum', 'SticksSpectrum', (['"""Hello World"""', 'energies', 'intensities'], {}), "('Hello World', energies, intensities)\n", (5796, 5834), False, 'from spectra import SticksSpectrum\n'), ((220, 233), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (229, 233), True, 'import numpy as np\n'), ((235, 248), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (244, 248), True, 'import numpy as np\n'), ((578, 591), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (587, 591), True, 'import numpy as np\n'), ((593, 606), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (602, 606), True, 'import numpy as np\n'), ((752, 765), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (761, 765), True, 'import numpy as np\n'), ((767, 780), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (776, 780), True, 'import numpy as np\n'), ((1196, 1209), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1205, 1209), True, 'import numpy as np\n'), ((1211, 1224), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1220, 1224), True, 'import numpy as np\n'), ((1450, 1463), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1459, 1463), True, 'import numpy as np\n'), ((1465, 1478), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1474, 1478), True, 'import numpy as np\n'), ((1648, 1661), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1657, 1661), True, 'import numpy as np\n'), ((1663, 1676), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1672, 1676), True, 'import numpy as np\n'), ((1707, 1720), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (1716, 1720), True, 'import numpy as np\n'), ((1722, 1735), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (1731, 1735), True, 'import numpy as np\n'), ((2526, 2539), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2535, 2539), True, 'import numpy as np\n'), ((2541, 2554), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2550, 2554), True, 'import numpy as np\n'), ((2556, 2569), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2565, 2569), True, 'import numpy as np\n'), ((2880, 2893), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2889, 2893), True, 'import numpy as np\n'), ((2895, 2908), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2904, 2908), True, 'import numpy as np\n'), ((3021, 3034), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3030, 3034), True, 'import numpy as np\n'), ((3036, 3049), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3045, 3049), True, 'import numpy as np\n'), ((3247, 3263), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (3256, 3263), True, 'import numpy as np\n'), ((3265, 3281), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (3274, 3281), True, 'import numpy as np\n'), ((3460, 3473), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3469, 3473), True, 'import numpy as np\n'), ((3475, 3488), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3484, 3488), True, 'import numpy as np\n'), ((3681, 3694), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3690, 3694), True, 'import numpy as np\n'), ((3696, 3709), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3705, 3709), True, 'import numpy as np\n'), ((3853, 3869), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (3862, 3869), True, 'import numpy as np\n'), ((3871, 3887), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (3880, 3887), True, 'import numpy as np\n'), ((4206, 4219), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4215, 4219), True, 'import numpy as np\n'), ((4221, 4234), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4230, 4234), True, 'import numpy as np\n'), ((4367, 4380), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4376, 4380), True, 'import numpy as np\n'), ((4382, 4395), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4391, 4395), True, 'import numpy as np\n'), ((4804, 4817), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4813, 4817), True, 'import numpy as np\n'), ((4819, 4832), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4828, 4832), True, 'import numpy as np\n'), ((4959, 4972), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4968, 4972), True, 'import numpy as np\n'), ((4974, 4987), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4983, 4987), True, 'import numpy as np\n'), ((5162, 5175), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (5171, 5175), True, 'import numpy as np\n'), ((5177, 5190), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (5186, 5190), True, 'import numpy as np\n'), ((5300, 5353), 'spectra.SticksSpectrum.from_csvs', 'SticksSpectrum.from_csvs', (['"""tests/files/spectrum1.csv"""'], {}), "('tests/files/spectrum1.csv')\n", (5324, 5353), False, 'from spectra import SticksSpectrum\n'), ((5579, 5592), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (5588, 5592), True, 'import numpy as np\n'), ((5594, 5607), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (5603, 5607), True, 'import numpy as np\n'), ((5744, 5757), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (5753, 5757), True, 'import numpy as np\n'), ((5759, 5772), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (5768, 5772), True, 'import numpy as np\n')] |
from enum import Enum
from typing import List
from typing import Optional
import numpy as np
import pandas as pd
from etna.transforms.base import PerSegmentWrapper
from etna.transforms.base import Transform
class ImputerMode(str, Enum):
"""Enum for different imputation strategy."""
zero = "zero"
mean = "mean"
running_mean = "running_mean"
forward_fill = "forward_fill"
seasonal = "seasonal"
class _OneSegmentTimeSeriesImputerTransform(Transform):
"""One segment version of transform to fill NaNs in series of a given dataframe.
- It is assumed that given series begins with first non NaN value.
- This transform can't fill NaNs in the future, only on train data.
- This transform can't fill NaNs if all values are NaNs. In this case exception is raised.
"""
def __init__(self, in_column: str, strategy: str, window: int, seasonality: int, default_value: Optional[float]):
"""
Create instance of _OneSegmentTimeSeriesImputerTransform.
Parameters
----------
in_column:
name of processed column
strategy:
filling value in missing timestamps:
- If "zero", then replace missing dates with zeros
- If "mean", then replace missing dates using the mean in fit stage.
- If "running_mean" then replace missing dates using mean of subset of data
- If "forward_fill" then replace missing dates using last existing value
- If "seasonal" then replace missing dates using seasonal moving average
window:
In case of moving average and seasonality.
* If ``window=-1`` all previous dates are taken in account
* Otherwise only window previous dates
seasonality:
the length of the seasonality
default_value:
value which will be used to impute the NaNs left after applying the imputer with the chosen strategy
Raises
------
ValueError:
if incorrect strategy given
"""
self.in_column = in_column
self.strategy = ImputerMode(strategy)
self.window = window
self.seasonality = seasonality
self.default_value = default_value
self.fill_value: Optional[int] = None
self.nan_timestamps: Optional[List[pd.Timestamp]] = None
def fit(self, df: pd.DataFrame) -> "_OneSegmentTimeSeriesImputerTransform":
"""
Fit preprocess params.
Parameters
----------
df: pd.DataFrame
dataframe with series to fit preprocess params with
Returns
-------
self: _OneSegmentTimeSeriesImputerTransform
fitted preprocess
"""
raw_series = df[self.in_column]
if np.all(raw_series.isna()):
raise ValueError("Series hasn't non NaN values which means it is empty and can't be filled.")
series = raw_series[raw_series.first_valid_index() :]
self.nan_timestamps = series[series.isna()].index
if self.strategy == ImputerMode.zero:
self.fill_value = 0
elif self.strategy == ImputerMode.mean:
self.fill_value = series.mean()
return self
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Transform given series.
Parameters
----------
df: pd.Dataframe
transform ``in_column`` series of given dataframe
Returns
-------
result: pd.DataFrame
dataframe with in_column series with filled gaps
"""
result_df = df.copy()
cur_nans = result_df[result_df[self.in_column].isna()].index
result_df[self.in_column] = self._fill(result_df[self.in_column])
# restore nans not in self.nan_timestamps
restore_nans = cur_nans.difference(self.nan_timestamps)
result_df.loc[restore_nans, self.in_column] = np.nan
return result_df
def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Inverse transform dataframe.
Parameters
----------
df: pd.Dataframe
inverse transform ``in_column`` series of given dataframe
Returns
-------
result: pd.DataFrame
dataframe with in_column series with initial values
"""
result_df = df.copy()
index = result_df.index.intersection(self.nan_timestamps)
result_df.loc[index, self.in_column] = np.nan
return result_df
def _fill(self, df: pd.Series) -> pd.Series:
"""
Create new Series taking all previous dates and adding missing dates.
Fills missed values for new dates according to ``self.strategy``
Parameters
----------
df: pd.Series
series to fill
Returns
-------
result: pd.Series
"""
if self.nan_timestamps is None:
raise ValueError("Trying to apply the unfitted transform! First fit the transform.")
if self.strategy == ImputerMode.zero or self.strategy == ImputerMode.mean:
df = df.fillna(value=self.fill_value)
elif self.strategy == ImputerMode.forward_fill:
df = df.fillna(method="ffill")
elif self.strategy == ImputerMode.running_mean or self.strategy == ImputerMode.seasonal:
history = self.seasonality * self.window if self.window != -1 else len(df)
timestamps = list(df.index)
for timestamp in self.nan_timestamps:
i = timestamps.index(timestamp)
indexes = np.arange(i - self.seasonality, i - self.seasonality - history, -self.seasonality)
indexes = indexes[indexes >= 0]
df.iloc[i] = np.nanmean(df.iloc[indexes])
if self.default_value:
df = df.fillna(value=self.default_value)
return df
class TimeSeriesImputerTransform(PerSegmentWrapper):
"""Transform to fill NaNs in series of a given dataframe.
- It is assumed that given series begins with first non NaN value.
- This transform can't fill NaNs in the future, only on train data.
- This transform can't fill NaNs if all values are NaNs. In this case exception is raised.
Warning
-------
This transform can suffer from look-ahead bias in 'mean' mode. For transforming data at some timestamp
it uses information from the whole train part.
"""
def __init__(
self,
in_column: str = "target",
strategy: str = ImputerMode.zero,
window: int = -1,
seasonality: int = 1,
default_value: Optional[float] = None,
):
"""
Create instance of TimeSeriesImputerTransform.
Parameters
----------
in_column:
name of processed column
strategy:
filling value in missing timestamps:
- If "zero", then replace missing dates with zeros
- If "mean", then replace missing dates using the mean in fit stage.
- If "running_mean" then replace missing dates using mean of subset of data
- If "forward_fill" then replace missing dates using last existing value
- If "seasonal" then replace missing dates using seasonal moving average
window:
In case of moving average and seasonality.
* If ``window=-1`` all previous dates are taken in account
* Otherwise only window previous dates
seasonality:
the length of the seasonality
default_value:
value which will be used to impute the NaNs left after applying the imputer with the chosen strategy
Raises
------
ValueError:
if incorrect strategy given
"""
self.in_column = in_column
self.strategy = strategy
self.window = window
self.seasonality = seasonality
self.default_value = default_value
super().__init__(
transform=_OneSegmentTimeSeriesImputerTransform(
in_column=self.in_column,
strategy=self.strategy,
window=self.window,
seasonality=self.seasonality,
default_value=self.default_value,
)
)
__all__ = ["TimeSeriesImputerTransform"]
| [
"numpy.nanmean",
"numpy.arange"
] | [((5652, 5739), 'numpy.arange', 'np.arange', (['(i - self.seasonality)', '(i - self.seasonality - history)', '(-self.seasonality)'], {}), '(i - self.seasonality, i - self.seasonality - history, -self.\n seasonality)\n', (5661, 5739), True, 'import numpy as np\n'), ((5812, 5840), 'numpy.nanmean', 'np.nanmean', (['df.iloc[indexes]'], {}), '(df.iloc[indexes])\n', (5822, 5840), True, 'import numpy as np\n')] |
# Copyright (C) 2009 <NAME>
# Copyright (C) 2010-2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import array
def casestagg():
##----- Power Flow Data -----##
## system MVA base
baseMVA = 100.0
## bus data
# bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin
bus = array([
[1, 3, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[2, 1, 20, 10, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[3, 1, 45, 15, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[4, 1, 40, 5, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],
[5, 1, 60, 10, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9]
])
## generator data
# bus Pg Qg Qmax Qmin Vg mBase status Pmax Pmin
gen = array([
[1, 0, 0, 300, -300, 1.06, 100, 1, 250, 10],
[2, 40, 30, 300, -300, 1.06, 100, 1, 300, 10],
], dtype=float)
## branch data
# fbus tbus r x b rateA rateB rateC ratio angle status
branch = array([
[1, 2, 0.02, 0.06, 0.030*2, 250, 250, 250, 0, 0, 1],
[1, 3, 0.08, 0.24, 0.025*2, 250, 250, 250, 0, 0, 1],
[2, 3, 0.06, 0.18, 0.020*2, 250, 250, 250, 0, 0, 1],
[2, 4, 0.06, 0.18, 0.020*2, 250, 250, 250, 0, 0, 1],
[2, 5, 0.04, 0.12, 0.015*2, 250, 250, 250, 0, 0, 1],
[3, 4, 0.01, 0.03, 0.010*2, 250, 250, 250, 0, 0, 1],
[4, 5, 0.08, 0.24, 0.025*2, 250, 250, 250, 0, 0, 1]
])
area = array([])
gencost = array([])
return baseMVA, bus, gen, branch, area, gencost
| [
"numpy.array"
] | [((820, 1083), 'numpy.array', 'array', (['[[1, 3, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9], [2, 1, 20, 10, 0, 0, 1, 1, \n 0, 345, 1, 1.1, 0.9], [3, 1, 45, 15, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9],\n [4, 1, 40, 5, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9], [5, 1, 60, 10, 0, 0, 1,\n 1, 0, 345, 1, 1.1, 0.9]]'], {}), '([[1, 3, 0, 0, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9], [2, 1, 20, 10, 0, 0, \n 1, 1, 0, 345, 1, 1.1, 0.9], [3, 1, 45, 15, 0, 0, 1, 1, 0, 345, 1, 1.1, \n 0.9], [4, 1, 40, 5, 0, 0, 1, 1, 0, 345, 1, 1.1, 0.9], [5, 1, 60, 10, 0,\n 0, 1, 1, 0, 345, 1, 1.1, 0.9]])\n', (825, 1083), False, 'from numpy import array\n'), ((1201, 1318), 'numpy.array', 'array', (['[[1, 0, 0, 300, -300, 1.06, 100, 1, 250, 10], [2, 40, 30, 300, -300, 1.06, \n 100, 1, 300, 10]]'], {'dtype': 'float'}), '([[1, 0, 0, 300, -300, 1.06, 100, 1, 250, 10], [2, 40, 30, 300, -300, \n 1.06, 100, 1, 300, 10]], dtype=float)\n', (1206, 1318), False, 'from numpy import array\n'), ((1429, 1840), 'numpy.array', 'array', (['[[1, 2, 0.02, 0.06, 0.03 * 2, 250, 250, 250, 0, 0, 1], [1, 3, 0.08, 0.24, \n 0.025 * 2, 250, 250, 250, 0, 0, 1], [2, 3, 0.06, 0.18, 0.02 * 2, 250, \n 250, 250, 0, 0, 1], [2, 4, 0.06, 0.18, 0.02 * 2, 250, 250, 250, 0, 0, 1\n ], [2, 5, 0.04, 0.12, 0.015 * 2, 250, 250, 250, 0, 0, 1], [3, 4, 0.01, \n 0.03, 0.01 * 2, 250, 250, 250, 0, 0, 1], [4, 5, 0.08, 0.24, 0.025 * 2, \n 250, 250, 250, 0, 0, 1]]'], {}), '([[1, 2, 0.02, 0.06, 0.03 * 2, 250, 250, 250, 0, 0, 1], [1, 3, 0.08, \n 0.24, 0.025 * 2, 250, 250, 250, 0, 0, 1], [2, 3, 0.06, 0.18, 0.02 * 2, \n 250, 250, 250, 0, 0, 1], [2, 4, 0.06, 0.18, 0.02 * 2, 250, 250, 250, 0,\n 0, 1], [2, 5, 0.04, 0.12, 0.015 * 2, 250, 250, 250, 0, 0, 1], [3, 4, \n 0.01, 0.03, 0.01 * 2, 250, 250, 250, 0, 0, 1], [4, 5, 0.08, 0.24, 0.025 *\n 2, 250, 250, 250, 0, 0, 1]])\n', (1434, 1840), False, 'from numpy import array\n'), ((1882, 1891), 'numpy.array', 'array', (['[]'], {}), '([])\n', (1887, 1891), False, 'from numpy import array\n'), ((1906, 1915), 'numpy.array', 'array', (['[]'], {}), '([])\n', (1911, 1915), False, 'from numpy import array\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[50]:
# 6. naloga
# Source:
# https://towardsdatascience.com/building-a-k-nearest-neighbors-k-nn-model-with-scikit-learn-51209555453a
# https://medium.com/@svanillasun/how-to-deal-with-cross-validation-based-on-knn-algorithm-compute-auc-based-on-naive-bayes-ff4b8284cff4
# In[2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataAll = pd.read_csv("data/reg/181.csv")
data = dataAll.head(30)
data
data.shape
# In[41]:
"""
# we separate X and y values in 2 tables
X = data.drop(columns=['Y'])
y = y = data['Y']
X
y
"""
X = data[['X1', 'X2', 'X3', 'X4', 'X5']]
y = data['Y']
# In[1]:
from sklearn.model_selection import cross_val_score
import numpy as np
#create a new KNN model
knn_cv = KNeighborsClassifier(n_neighbors=3)
#train model with cv of 5
cv_scores = cross_val_score(knn_cv, X, y, cv=5)
#print each cv score (accuracy) and average them
print(cv_scores)
print('cv_scores mean:{}'.format(np.mean(cv_scores)))
| [
"numpy.mean",
"pandas.read_csv",
"sklearn.model_selection.cross_val_score"
] | [((412, 443), 'pandas.read_csv', 'pd.read_csv', (['"""data/reg/181.csv"""'], {}), "('data/reg/181.csv')\n", (423, 443), True, 'import pandas as pd\n'), ((847, 882), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['knn_cv', 'X', 'y'], {'cv': '(5)'}), '(knn_cv, X, y, cv=5)\n', (862, 882), False, 'from sklearn.model_selection import cross_val_score\n'), ((982, 1000), 'numpy.mean', 'np.mean', (['cv_scores'], {}), '(cv_scores)\n', (989, 1000), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import math
import colorsys
from pycrazyswarm import *
import pycrazyswarm.cfsim.cffirmware as firm
from udp_multicast import UdpMulticastSender
SCALE = 8.0
#SHIFT = [-0.3, 0, 0] # shift by this amount after take-off
SHIFT = [0.0, 0, 0.75]
def firmVecToNp(vec):
return np.array([vec.x, vec.y, vec.z])
if __name__ == "__main__":
swarm = Crazyswarm()
timeHelper = swarm.timeHelper
allcfs = swarm.allcfs
sender = UdpMulticastSender()
# Use this to custom-map trajectory IDs to CF IDs
ids = [40]
trajIds = [1]
# ids = [cf.id for cf in allcfs.crazyflies]
# trajIds = ids
cfs = [allcfs.crazyfliesById[i] for i in ids]
root = '/home/whoenig/heterogeneous/crazyswarm/ros_ws/src/crazyswarm/scripts/figure8_pps/'
fnames = ['{0}/pp{1}.csv'.format(root, i) for i in trajIds] #range(1, len(ids) + 1)]
trajs = [piecewise.loadcsv(fname) for fname in fnames]
for traj in trajs:
firm.piecewise_stretchtime(traj, SCALE)
totalTime = 0
for traj in trajs:
totalTime = max(totalTime, firm.piecewise_duration(traj))
# for cf, traj in zip(cfs, trajs):
# cf.uploadTrajectory(traj)
hues = np.linspace(0,1.0,len(cfs))
for cf, hue, traj in zip(cfs, hues, trajs):
r,g,b = colorsys.hsv_to_rgb(hue, 0.9, 1.0)
cf.setParam("ring/solidRed", int(r * 255))
cf.setParam("ring/solidGreen", int(g * 255))
cf.setParam("ring/solidBlue", int(b * 255))
cf.uploadTrajectory(traj)
allcfs.takeoff(targetHeight=1.0, duration=2.0)
timeHelper.sleep(2.5)
for cf, traj in zip(cfs, trajs):
result = firm.piecewise_eval(traj, 0, 0.033)
pos = firmVecToNp(result.pos) + np.array(SHIFT)
# assert(math.fabs(pos[2] - 0.5) < 1e-3)
cf.hover(pos, 0, 2.0)
timeHelper.sleep(2.5)
sender.send("startTrajectory")
allcfs.startTrajectory()
timeHelper.sleep(totalTime + 1.0)
# allcfs.setParam("ring/headlightEnable", 1)
# for cf, traj in zip(cfs, trajs):
# result = firm.piecewise_eval(traj, totalTime, 0.033)
# pos = firmVecToNp(result.pos)
# cf.hover(pos, math.pi, 2.0)
# timeHelper.sleep(5.0)
# for cf, traj in zip(cfs, trajs):
# result = firm.piecewise_eval(traj, totalTime, 0.033)
# pos = firmVecToNp(result.pos)
# cf.hover(pos, 0, 2.0)
# timeHelper.sleep(2.5)
# allcfs.setParam("ring/headlightEnable", 0)
# timeHelper.sleep(0.5)
timeHelper.sleep(5.0)
allcfs.startTrajectoryReversed()
timeHelper.sleep(totalTime + 1.0)
for cf in cfs:
hover_pos = cf.initialPosition + np.array([0, 0, 1.0])
cf.hover(hover_pos, 0, 2.0)
timeHelper.sleep(2.5)
allcfs.land(targetHeight=0.02, duration=3.0)
timeHelper.sleep(3.5)
| [
"pycrazyswarm.cfsim.cffirmware.piecewise_eval",
"colorsys.hsv_to_rgb",
"numpy.array",
"pycrazyswarm.cfsim.cffirmware.piecewise_stretchtime",
"pycrazyswarm.cfsim.cffirmware.piecewise_duration",
"udp_multicast.UdpMulticastSender"
] | [((318, 349), 'numpy.array', 'np.array', (['[vec.x, vec.y, vec.z]'], {}), '([vec.x, vec.y, vec.z])\n', (326, 349), True, 'import numpy as np\n'), ((476, 496), 'udp_multicast.UdpMulticastSender', 'UdpMulticastSender', ([], {}), '()\n', (494, 496), False, 'from udp_multicast import UdpMulticastSender\n'), ((980, 1019), 'pycrazyswarm.cfsim.cffirmware.piecewise_stretchtime', 'firm.piecewise_stretchtime', (['traj', 'SCALE'], {}), '(traj, SCALE)\n', (1006, 1019), True, 'import pycrazyswarm.cfsim.cffirmware as firm\n'), ((1308, 1342), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['hue', '(0.9)', '(1.0)'], {}), '(hue, 0.9, 1.0)\n', (1327, 1342), False, 'import colorsys\n'), ((1666, 1701), 'pycrazyswarm.cfsim.cffirmware.piecewise_eval', 'firm.piecewise_eval', (['traj', '(0)', '(0.033)'], {}), '(traj, 0, 0.033)\n', (1685, 1701), True, 'import pycrazyswarm.cfsim.cffirmware as firm\n'), ((1097, 1126), 'pycrazyswarm.cfsim.cffirmware.piecewise_duration', 'firm.piecewise_duration', (['traj'], {}), '(traj)\n', (1120, 1126), True, 'import pycrazyswarm.cfsim.cffirmware as firm\n'), ((1742, 1757), 'numpy.array', 'np.array', (['SHIFT'], {}), '(SHIFT)\n', (1750, 1757), True, 'import numpy as np\n'), ((2673, 2694), 'numpy.array', 'np.array', (['[0, 0, 1.0]'], {}), '([0, 0, 1.0])\n', (2681, 2694), True, 'import numpy as np\n')] |
from sys import argv
from random import random, sample
from math import exp, pi, sqrt
from numpy.random import chisquare, normal
__extra_productions = 2
__number_non_terminals = 0
__terminal_ratios = 0.0
__productions_length = 1
__tree_depth = 0
def generate_grammar(number_non_terminals = None,
extra_productions = None,
terminal_ratios = None,
productions_length = None,
tree_depth = None):
global __extra_productions
global __number_non_terminals
global __terminal_ratios
global __productions_length
global __tree_depth
df = 1 + int(random() * 20)
if number_non_terminals is None:
number_non_terminals = 2 + int(chisquare(df, 1)[0])
__number_non_terminals = number_non_terminals
if extra_productions is None:
extra_productions = int(chisquare(1 + df // 2, 1)[0])
__extra_productions = extra_productions
if terminal_ratios is None:
terminal_ratios = normal(1, 0.2)
__terminal_ratios = terminal_ratios
if productions_length is None:
productions_length = 1 + int(normal(4, 2))
__productions_length = productions_length
if tree_depth is None:
tree_depth = 1 + int(chisquare(1 + df // 4, 1)[0])
__tree_depth = tree_depth
s = __number_non_terminals
t = s / __terminal_ratios
s = round(s)
t = round(t)
t = t if t > 0 else 1
prcount = s + __extra_productions
nts, ts = __gen_terminals(s, t)
prs = __gen_productions(prcount, nts, ts)
return nts, ts, prs
'''
Generates the Non-terminals and Terminal symbols to be used in the Grammar
Converts the integer value provided into a base 26 number, and this number is
directly mapped onto the arabic [A-Za-z] alphabet
'''
def __gen_terminals(nt, t):
nts = []
ts = []
def base_10_to_26(base10):
value = []
rem = base10
while True:
val = rem // 26
value.insert(0, rem % 26)
rem = val
if rem == 0:
break
return value
def recursive_add(depth, max, symbol, base, array):
if depth == max:
array.append(symbol)
return
for a in range(26):
recursive_add(depth + 1, max, symbol + chr(base + a), base, array)
nt_base26 = base_10_to_26(nt)
for col, val in enumerate(nt_base26):
for i in range(val):
recursive_add(col + 1, len(nt_base26), chr(65 + i), 65, nts)
t_base26 = base_10_to_26(t)
for col, val in enumerate(t_base26):
for i in range(val):
recursive_add(col + 1, len(t_base26), chr(97 + i), 97, ts)
return nts, ts
'''
Generates the productions for the CFG
First generates a graph, that will determine which productions go where in the context free
grammar. This graph has the origin, the start symbol on top, which branches out to all other nodes,
other non-terminal symbols, to create the CFG. This function creates the nodes of the graph and
assigns them to a certain level in the tree
'''
def __gen_productions(prcount, nts, ts):
global __tree_depth
if __tree_depth > len(nts) - 1:
__tree_depth = len(nts) - 1
levels = [[] for i in range(__tree_depth + 1)]
final_alloc = [[] for i in range(__tree_depth + 1)]
poss = []
# pr_start = 0
# nt_no = 0
intervals = [1 for i in range(__tree_depth + 1)]
for i in range(__tree_depth + 1, prcount):
index = int(__get_no(__tree_depth) * __tree_depth) + 1
intervals[index] = intervals[index] + 1
x = 0
for index, count in enumerate(intervals):
for i in range(x, x + count):
levels[index].append(i)
if (len(levels[index]) > 1) and not (index in poss):
poss.append(index)
x = x + count
intervals = [1 for i in range(__tree_depth + 1)]
for i in range(__tree_depth + 1, len(nts)):
index = poss[int(__get_no(__tree_depth) * len(poss))]
intervals[index] = intervals[index] + 1
if intervals[index] == len(levels[index]):
poss.remove(index)
x = 0
for index, count in enumerate(intervals):
for i in range(x, x + count):
final_alloc[index].append(i)
x = x + count
return __gen_production_tree(prcount, final_alloc, levels, nts, ts)
'''
Creates the different edges in the graph, or the connections between productions in the CFG
'''
def __gen_production_tree(prcount, final_alloc, levels, nts, ts):
adj_matrix = [[] for i in range(prcount)]
for i in range(prcount):
for i in range(prcount):
adj_matrix[i].append(0)
final_assignments = [-1] * prcount
for i in range(0, __tree_depth + 1):
for j in range(len(final_alloc[i])):
nt = final_alloc[i][j]
pr = levels[i][j]
final_assignments[pr] = nt
for i in range(__tree_depth, 0, -1):
for to_index, j in enumerate(final_alloc[i]):
others = int(__get_no(i / 2.0) * len(final_alloc[i - 1]))
for o in range(others + 1):
from_index = int(random() * len(final_alloc[i - 1]))
start = levels[i - 1][from_index]
end = levels[i][to_index]
adj_matrix[start][end] = 1
prs = [ "" for i in range(prcount) ]
for index in range(prcount):
a = final_assignments[index]
row = adj_matrix[index]
if a == -1:
continue
r = __gen_rule_from_tree(nts[a], nts, ts, row, final_assignments)
prs[index] = r
indices = []
for i in range(__tree_depth, 0, -1):
for start in levels[i]:
if not final_assignments[start] == -1:
continue
indices.append(start)
index = sample(final_alloc[i], 1)[0]
final_assignments[start] = index
others = int((1 - __get_no(__tree_depth * 3)) * __tree_depth) + 1
for o in range(others):
end = int(__get_no(__tree_depth) * prcount)
adj_matrix[start][end] = 1
for index in indices:
r = __gen_rule_from_tree(nts[final_assignments[index]], nts, ts,
adj_matrix[index], final_assignments)
prs[index] = r
prs.sort()
return prs
'''
Uses the adjacency matrix generated for the graph to generate the productions for hte CFG. The productions
for each of the CFG must have a rule relating it to the production corresponding to the adjacent node in the graph
'''
def __gen_rule_from_tree(pr, nts, ts, vertices, nodes):
non_terminals = []
indices = [i for i, j in enumerate(vertices) if j == 1]
[non_terminals.append(nts[nodes[i]]) for i in indices if nts[nodes[i]] not in non_terminals]
if len(non_terminals) == 0:
r = random()
if r < 1.25 - exp(1.6 / nts.index(pr)):
rule = ['#']
else:
df = __get_def(nts.index(pr))
rule = __gen_t_only(ts, df)
else:
r = random()
if r < 0.6:
df = __get_def(len(non_terminals) / 2.0)
rule = __gen_nt_only(pr, non_terminals, df)
else:
df = __get_def(len(nts) - nts.index(pr))
rule = __gen_mixed(pr, non_terminals, ts, df)
return [pr, rule]
'''
Generate a production consisting of only terminal symbols, given a list of terminal symbols
'''
def __gen_t_only(ts, df):
n = __get_prob(df)
if n > 50:
n = 50
terminals = []
for i in range(n):
terminals.append(ts[int(random() * len(ts))])
rule = []
for t in terminals:
rule = rule + [t]
return rule
'''
Generate a production consisting of only non-terminal symbols, given a list of non-terminal symbols
'''
def __gen_nt_only(pr, nts, df):
n = __get_prob(df)
if n > 50:
n = 50
nterminals = []
for i in range(len(nts), n):
nterminals.append(nts[int(random() * len(nts))])
rules = []
for nt in nts:
rules.append(nt)
for t in nterminals:
rules.append(t)
return rules
'''
Generate a production consisting of a mix of terminal and non-terminal symbols, given a list of terminal and
non-terminal symbols
'''
def __gen_mixed(pr, nts, ts, df):
x1 = __terminal_ratios * 1.0 / (__terminal_ratios + 1)
x2 = 1
rules = []
for nt in nts:
rules += [nt]
n = __get_prob(df)
if n > 50:
n = 50
for i in range(len(nts), n):
r = random()
if (r < x1):
r = int(random() * len(nts))
p = nts[r]
if n == 1:
if p == pr:
p = nts[r - 1]
rules += [p]
else:
rules += sample(ts, 1)
return rules
def __get_def(i):
return 1 + i * __productions_length / 4
def __get_no(d):
h1 = 1.0 / (1 + d)
h2 = 2 - h1
m = h2 - h1
r = random()
if m == 0:
x = r
else:
x = sqrt((2 * r / m) + (h1 / m) ** 2) - h1 / m
return x
def __get_prob(df):
x = int(normal(df, df / 5, 1)[0])
if x < -1:
x = -1
return 1 + x
def print_grammar(nts, ts, prs):
non_terminals = "Non-terminals:"
for nt in nts:
non_terminals += " " + nt
terminals = "Terminals:"
for t in ts:
terminals += " " + t
print(non_terminals)
print(terminals)
print("Context Free Grammar:")
for pr in prs:
production = ""
for p in pr[1]:
if p in nts:
production += " {}".format(p)
else:
production += " '{}'".format(p)
print("{} ->{}".format(pr[0], production)) | [
"numpy.random.normal",
"random.sample",
"numpy.random.chisquare",
"math.sqrt",
"random.random"
] | [((8236, 8244), 'random.random', 'random', ([], {}), '()\n', (8242, 8244), False, 'from random import random, sample\n'), ((958, 972), 'numpy.random.normal', 'normal', (['(1)', '(0.2)'], {}), '(1, 0.2)\n', (964, 972), False, 'from numpy.random import chisquare, normal\n'), ((6352, 6360), 'random.random', 'random', ([], {}), '()\n', (6358, 6360), False, 'from random import random, sample\n'), ((6520, 6528), 'random.random', 'random', ([], {}), '()\n', (6526, 6528), False, 'from random import random, sample\n'), ((7888, 7896), 'random.random', 'random', ([], {}), '()\n', (7894, 7896), False, 'from random import random, sample\n'), ((8072, 8085), 'random.sample', 'sample', (['ts', '(1)'], {}), '(ts, 1)\n', (8078, 8085), False, 'from random import random, sample\n'), ((8287, 8318), 'math.sqrt', 'sqrt', (['(2 * r / m + (h1 / m) ** 2)'], {}), '(2 * r / m + (h1 / m) ** 2)\n', (8291, 8318), False, 'from math import exp, pi, sqrt\n'), ((8373, 8394), 'numpy.random.normal', 'normal', (['df', '(df / 5)', '(1)'], {}), '(df, df / 5, 1)\n', (8379, 8394), False, 'from numpy.random import chisquare, normal\n'), ((615, 623), 'random.random', 'random', ([], {}), '()\n', (621, 623), False, 'from random import random, sample\n'), ((833, 858), 'numpy.random.chisquare', 'chisquare', (['(1 + df // 2)', '(1)'], {}), '(1 + df // 2, 1)\n', (842, 858), False, 'from numpy.random import chisquare, normal\n'), ((1078, 1090), 'numpy.random.normal', 'normal', (['(4)', '(2)'], {}), '(4, 2)\n', (1084, 1090), False, 'from numpy.random import chisquare, normal\n'), ((5398, 5423), 'random.sample', 'sample', (['final_alloc[i]', '(1)'], {}), '(final_alloc[i], 1)\n', (5404, 5423), False, 'from random import random, sample\n'), ((703, 719), 'numpy.random.chisquare', 'chisquare', (['df', '(1)'], {}), '(df, 1)\n', (712, 719), False, 'from numpy.random import chisquare, normal\n'), ((1187, 1212), 'numpy.random.chisquare', 'chisquare', (['(1 + df // 4)', '(1)'], {}), '(1 + df // 4, 1)\n', (1196, 1212), False, 'from numpy.random import chisquare, normal\n'), ((7928, 7936), 'random.random', 'random', ([], {}), '()\n', (7934, 7936), False, 'from random import random, sample\n'), ((4807, 4815), 'random.random', 'random', ([], {}), '()\n', (4813, 4815), False, 'from random import random, sample\n'), ((7011, 7019), 'random.random', 'random', ([], {}), '()\n', (7017, 7019), False, 'from random import random, sample\n'), ((7373, 7381), 'random.random', 'random', ([], {}), '()\n', (7379, 7381), False, 'from random import random, sample\n')] |
from argparse import ArgumentParser
from glob import glob
from importlib import import_module
import numpy as np
import tensorflow as tf
from common import load_labels, load_pickle_file
def run_prediction(args):
batch_size = args.batch_size
model_class = import_module("models.{}".format(args.model)).Model()
model = model_class.get_model()
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]
)
model.load_weights(args.checkpoint)
prediction_generator, _ = \
model_class.get_input_fn_and_steps_per_epoch('prediction', batch_size)
results = model.predict(prediction_generator, batch_size=None)
predicted_labels_id = np.argmax(results, axis=1)
id_to_labels, _ = load_labels()
predicted_labels = [id_to_labels[label_id] for label_id in predicted_labels_id]
test_filenames = list(sorted(list(load_pickle_file('test_filenames.pickle'))))
print("fname,label")
for filename, predicted_label in zip(test_filenames, predicted_labels):
print("{},{}".format(filename, predicted_label))
def main():
parser = ArgumentParser(description='DL-MAI project #2 (RNN) prediction script.')
available_models = [model_name.split("/")[1].split(".")[0] for model_name in glob("models/*.py")]
parser.add_argument('model', choices=available_models)
parser.add_argument('checkpoint', metavar="model.ckpt") # type=lambda x: is_valid_file(parser, x)
parser.add_argument('--batch-size', default=1024, type=int)
args = parser.parse_args()
run_prediction(args)
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"common.load_labels",
"numpy.argmax",
"common.load_pickle_file",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.keras.optimizers.Adam",
"glob.glob"
] | [((820, 846), 'numpy.argmax', 'np.argmax', (['results'], {'axis': '(1)'}), '(results, axis=1)\n', (829, 846), True, 'import numpy as np\n'), ((870, 883), 'common.load_labels', 'load_labels', ([], {}), '()\n', (881, 883), False, 'from common import load_labels, load_pickle_file\n'), ((1239, 1311), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""DL-MAI project #2 (RNN) prediction script."""'}), "(description='DL-MAI project #2 (RNN) prediction script.')\n", (1253, 1311), False, 'from argparse import ArgumentParser\n'), ((397, 423), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (421, 423), True, 'import tensorflow as tf\n'), ((438, 501), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (483, 501), True, 'import tensorflow as tf\n'), ((1394, 1413), 'glob.glob', 'glob', (['"""models/*.py"""'], {}), "('models/*.py')\n", (1398, 1413), False, 'from glob import glob\n'), ((520, 564), 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {}), '()\n', (562, 564), True, 'import tensorflow as tf\n'), ((1007, 1048), 'common.load_pickle_file', 'load_pickle_file', (['"""test_filenames.pickle"""'], {}), "('test_filenames.pickle')\n", (1023, 1048), False, 'from common import load_labels, load_pickle_file\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 22 13:00:59 2021
@author: jackreid
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
from screeninfo import get_monitors
import dateutil
from datetime import datetime
#Set filepath of data
filepaths = ['/home/jackreid/Documents/School/Research/Space Enabled/Code/Decisions/Auxilary Files/SummaryGraphs/Rio de Janeiro/NightlightsRelativeAnomaly_2020.csv',
'/home/jackreid/Documents/School/Research/Space Enabled/Code/Decisions/Auxilary Files/SummaryGraphs/Rio de Janeiro/NightlightsRelativeAnomaly_Complete_NoIncrement.csv',
'/home/jackreid/Documents/School/Research/Space Enabled/Code/Decisions/Auxilary Files/SummaryGraphs/Rio de Janeiro/NightlightsMean_Complete.csv']
filepath = filepaths[1]
#Get screen resolution, used for sizing the graphs later on
for m in get_monitors():
print(str(m))
my_dpi = m.width/(m.width_mm*0.0393701)
#Extract data from the csv
datalist = []
with open(filepath, encoding='ISO-8859-15') as csvfile:
readCSV1 = csv.DictReader(csvfile, delimiter=',')
for row in readCSV1:
newrow = dict()
for entry in row.keys():
if row[entry]:
if entry not in ['Date_Name','Policy_Name']:
newrow[entry] = float(row[entry])
elif entry in ['Date_Name']:
newrow[entry] = enddate = dateutil.parser.parse(row[entry])
else:
newrow[entry] = np.nan
datalist.append(newrow)
#Convert data into a DataFrame for plotting purposes
df_data = pd.DataFrame(datalist)
#Sort the DataFrame by date
df_data = df_data[df_data['Date_Name'].notnull()].sort_values(by='Date_Name')
def main():
x = np.array(df_data['Date_Name'])
fig, ax = plt.subplots()
nightlight_places = []
for key in df_data.keys():
if key not in ['Date_Name']:
nightlight_places.append(key)
for place in nightlight_places:
y = np.array(df_data[place])
ymask = np.isfinite(y)
ax.plot(x[ymask], y[ymask], label=place)
ax.legend(loc='upper left', bbox_to_anchor=(1.05, 1),
ncol=2, borderaxespad=0)
fig.subplots_adjust(right=0.55)
fig.suptitle('Right-click to hide all\nMiddle-click to show all',
va='top', size='large')
leg = interactive_legend()
return fig, ax, leg
def interactive_legend(ax=None):
if ax is None:
ax = plt.gca()
if ax.legend_ is None:
ax.legend()
return InteractiveLegend(ax.get_legend())
class InteractiveLegend(object):
def __init__(self, legend):
self.legend = legend
self.fig = legend.axes.figure
self.lookup_artist, self.lookup_handle = self._build_lookups(legend)
self._setup_connections()
self.update()
def _setup_connections(self):
for artist in self.legend.texts + self.legend.legendHandles:
artist.set_picker(10) # 10 points tolerance
self.fig.canvas.mpl_connect('pick_event', self.on_pick)
self.fig.canvas.mpl_connect('button_press_event', self.on_click)
def _build_lookups(self, legend):
labels = [t.get_text() for t in legend.texts]
handles = legend.legendHandles
label2handle = dict(zip(labels, handles))
handle2text = dict(zip(handles, legend.texts))
lookup_artist = {}
lookup_handle = {}
for artist in legend.axes.get_children():
if artist.get_label() in labels:
handle = label2handle[artist.get_label()]
lookup_handle[artist] = handle
lookup_artist[handle] = artist
lookup_artist[handle2text[handle]] = artist
lookup_handle.update(zip(handles, handles))
lookup_handle.update(zip(legend.texts, handles))
return lookup_artist, lookup_handle
def on_pick(self, event):
handle = event.artist
if handle in self.lookup_artist:
artist = self.lookup_artist[handle]
artist.set_visible(not artist.get_visible())
self.update()
def on_click(self, event):
if event.button == 3:
visible = False
elif event.button == 2:
visible = True
else:
return
for artist in self.lookup_artist.values():
artist.set_visible(visible)
self.update()
def update(self):
for artist in self.lookup_artist.values():
handle = self.lookup_handle[artist]
if artist.get_visible():
handle.set_visible(True)
else:
handle.set_visible(False)
self.fig.canvas.draw()
def show(self):
plt.show()
if __name__ == '__main__':
fig, ax, leg = main()
plt.show() | [
"dateutil.parser.parse",
"csv.DictReader",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gca",
"numpy.array",
"numpy.isfinite",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"screeninfo.get_monitors"
] | [((906, 920), 'screeninfo.get_monitors', 'get_monitors', ([], {}), '()\n', (918, 920), False, 'from screeninfo import get_monitors\n'), ((1644, 1666), 'pandas.DataFrame', 'pd.DataFrame', (['datalist'], {}), '(datalist)\n', (1656, 1666), True, 'import pandas as pd\n'), ((1094, 1132), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (1108, 1132), False, 'import csv\n'), ((1795, 1825), 'numpy.array', 'np.array', (["df_data['Date_Name']"], {}), "(df_data['Date_Name'])\n", (1803, 1825), True, 'import numpy as np\n'), ((1840, 1854), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1852, 1854), True, 'import matplotlib.pyplot as plt\n'), ((4868, 4878), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4876, 4878), True, 'import matplotlib.pyplot as plt\n'), ((2053, 2077), 'numpy.array', 'np.array', (['df_data[place]'], {}), '(df_data[place])\n', (2061, 2077), True, 'import numpy as np\n'), ((2094, 2108), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (2105, 2108), True, 'import numpy as np\n'), ((2525, 2534), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2532, 2534), True, 'import matplotlib.pyplot as plt\n'), ((4799, 4809), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4807, 4809), True, 'import matplotlib.pyplot as plt\n'), ((1448, 1481), 'dateutil.parser.parse', 'dateutil.parser.parse', (['row[entry]'], {}), '(row[entry])\n', (1469, 1481), False, 'import dateutil\n')] |
# Training a Student network on CIFAR-10
# Teacher architecture: AlexNet, Student architecture: AlexNet half
from __future__ import print_function
import argparse
import torch
import os
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from alexnet import AlexNet
from alexnet_half import AlexNet_half
from torch.utils.data.sampler import SubsetRandomSampler
from tensorboardX import SummaryWriter
import numpy as np
# CUDA_VISIBLE_DEVICES=0 python KD_related_data.py --batch-size 2048 --test-batch-size 1000 --epochs 5000 --lr 0.001 --seed 108 --log-interval 10 --temp 20 --lambda_ 1
writer = SummaryWriter()
if not os.path.exists("models"):
os.makedirs("models")
def train(args, model, netS, device, train_loader, optimizer, epoch, temp, inc_classes):
model.eval()
netS.train()
loss_all_sum = 0
tot = 0
teacher_student_correct_sum = 0
for batch_idx, (data, target) in enumerate(train_loader):
data = torch.from_numpy(data.numpy()[np.isin(target,inc_classes)]).to(device)
if data.shape[0] == 0:
continue
tot += data.shape[0]
optimizer.zero_grad()
data = data*2 - 1
output_teacher_logits = model(data)
output_student_logits = netS(data)
output_teacher_logits_ht = output_teacher_logits / temp
output_student_logits_ht = output_student_logits / temp
sm_teacher_ht = F.softmax(output_teacher_logits_ht,dim=1)
sm_student_ht = F.softmax(output_student_logits_ht,dim=1)
sm_teacher = F.softmax(output_teacher_logits, dim=1)
sm_student = F.softmax(output_student_logits, dim=1)
loss_kd = nn.KLDivLoss(reduction='sum')(F.log_softmax(output_student_logits_ht, dim=1),F.softmax(output_teacher_logits_ht, dim=1))
pred_class_argmax_teacher = sm_teacher.max(1, keepdim=True)[1]
loss_ce = F.cross_entropy(output_student_logits, pred_class_argmax_teacher.view(data.shape[0]),reduction='sum')
loss_all = args.lambda_*temp*temp*loss_kd + (1-args.lambda_)*loss_ce
loss_all.backward()
loss_all_sum += loss_all
pred_class_argmax_student = sm_student.max(1, keepdim=True)[1]
pred_class_argmax_teacher = pred_class_argmax_teacher.view(sm_teacher.shape[0])
pred_class_argmax_student = pred_class_argmax_student.view(sm_teacher.shape[0])
teacher_student_correct = torch.sum(pred_class_argmax_student==pred_class_argmax_teacher)
teacher_student_correct_sum = teacher_student_correct_sum + (teacher_student_correct).cpu().data.numpy()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, int((batch_idx+1) * len(data)), int(len(train_loader.dataset)*0.8),
100. * (batch_idx+1) / len(train_loader), (loss_all/data.shape[0]).item()))
loss_all_mean = loss_all_sum / tot
teacher_student_acc = 100. * teacher_student_correct_sum / tot
print('Train set: Average loss: {:.4f}, Teacher-Student Accuracy: {}/{} ({:.0f}% )'.format(
loss_all_mean, teacher_student_correct_sum, tot, teacher_student_acc))
torch.save(netS.state_dict(), "models/"+str(epoch)+".pth")
return loss_all_mean, teacher_student_acc
def val(args, model, netS, device, test_loader, epoch, val_test, temp, inc_classes):
model.eval()
netS.eval()
loss_all_sum = 0
tot = 0
teacher_student_correct_sum = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
if data.shape[0] == 0:
continue
data = torch.from_numpy(data.numpy()[np.isin(target,inc_classes)]).to(device)
tot += data.shape[0]
data = data*2 - 1
output_teacher_logits = model(data)
output_student_logits = netS(data)
output_teacher_logits_ht = output_teacher_logits / temp
output_student_logits_ht = output_student_logits / temp
sm_teacher_ht = F.softmax(output_teacher_logits_ht,dim=1)
sm_student_ht = F.softmax(output_student_logits_ht,dim=1)
sm_teacher = F.softmax(output_teacher_logits, dim=1)
sm_student = F.softmax(output_student_logits, dim=1)
loss_kd = nn.KLDivLoss(reduction='sum')(F.log_softmax(output_student_logits_ht, dim=1),F.softmax(output_teacher_logits_ht, dim=1))
pred_class_argmax_teacher = sm_teacher.max(1, keepdim=True)[1]
loss_ce = F.cross_entropy(output_student_logits, pred_class_argmax_teacher.view(data.shape[0]),reduction='sum')
loss_all = args.lambda_*temp*temp*loss_kd + (1-args.lambda_)*loss_ce
loss_all_sum += loss_all
pred_class_argmax_student = sm_student.max(1, keepdim=True)[1]
pred_class_argmax_teacher = pred_class_argmax_teacher.view(sm_teacher.shape[0])
pred_class_argmax_student = pred_class_argmax_student.view(sm_teacher.shape[0])
teacher_student_correct = torch.sum(pred_class_argmax_student==pred_class_argmax_teacher)
teacher_student_correct_sum = teacher_student_correct_sum + (teacher_student_correct).cpu().data.numpy()
loss_all_mean = loss_all_sum / tot
teacher_student_acc = 100. * teacher_student_correct_sum / tot
print('{} set: Average loss: {:.4f}, Teacher-Student Accuracy: {}/{} ({:.0f}% )'.format(
val_test, loss_all_mean, teacher_student_correct_sum, tot, teacher_student_acc))
return loss_all_mean, teacher_student_acc
def test(args, model, netS, device, test_loader, epoch, val_test, temp):
model.eval()
netS.eval()
loss_all_sum = 0
tot = 0
student_correct_sum = 0
teacher_student_correct_sum = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
data, target = data.to(device), target.to(device)
tot += data.shape[0]
data = data*2 - 1
output_teacher_logits = model(data)
output_student_logits = netS(data)
output_teacher_logits_ht = output_teacher_logits / temp
output_student_logits_ht = output_student_logits / temp
sm_teacher_ht = F.softmax(output_teacher_logits_ht,dim=1)
sm_student_ht = F.softmax(output_student_logits_ht,dim=1)
sm_teacher = F.softmax(output_teacher_logits, dim=1)
sm_student = F.softmax(output_student_logits, dim=1)
loss_kd = nn.KLDivLoss(reduction='sum')(F.log_softmax(output_student_logits_ht, dim=1),F.softmax(output_teacher_logits_ht, dim=1))
pred_class_argmax_teacher = sm_teacher.max(1, keepdim=True)[1]
loss_ce = F.cross_entropy(output_student_logits, pred_class_argmax_teacher.view(data.shape[0]),reduction='sum')
loss_all = args.lambda_*temp*temp*loss_kd + (1-args.lambda_)*loss_ce
loss_all_sum += loss_all
pred_class_argmax_student = sm_student.max(1, keepdim=True)[1]
pred_class_argmax_teacher = pred_class_argmax_teacher.view(sm_teacher.shape[0])
pred_class_argmax_student = pred_class_argmax_student.view(sm_teacher.shape[0])
student_correct = torch.sum(pred_class_argmax_student==target)
student_correct_sum = student_correct_sum + (student_correct).cpu().data.numpy()
teacher_student_correct = torch.sum(pred_class_argmax_student==pred_class_argmax_teacher)
teacher_student_correct_sum = teacher_student_correct_sum + (teacher_student_correct).cpu().data.numpy()
loss_all_mean = loss_all_sum / tot
student_acc = 100. * student_correct_sum / tot
teacher_student_acc = 100. * teacher_student_correct_sum / tot
print('{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%), Teacher-Student Accuracy: {}/{} ({:.0f}% )'.format(
val_test, loss_all_mean, student_correct_sum, tot, student_acc, teacher_student_correct_sum, tot, teacher_student_acc))
return loss_all_mean, student_acc, teacher_student_acc
def main():
# Training settings
parser = argparse.ArgumentParser(description='CIFAR Classifier training')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1000, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--temp', default=20, type=float, help='Temperature for KD')
parser.add_argument('--lambda_', default=1, type=float, help='Weight of KD Loss during distillation')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
tfm = transforms.Compose([
transforms.ToTensor()
])
train_dataset = datasets.CIFAR100(
root='../../../../datasets', train=True,
download=True, transform=tfm)
val_dataset = datasets.CIFAR100(
root='../../../../datasets', train=True,
download=True, transform=tfm)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(0.2 * num_train))
np.random.seed(args.seed)
np.random.shuffle(indices)
train_idx, val_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
val_sampler = SubsetRandomSampler(val_idx)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, sampler=train_sampler, **kwargs
)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.test_batch_size, sampler=val_sampler,**kwargs
)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('../../../../datasets', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = AlexNet().to(device)
model.eval()
model.load_state_dict(torch.load("../CIFAR10_data/best_model.pth"))
netS = AlexNet_half().to(device)
#netS.load_state_dict(torch.load("./models/best_model.pth"))
optimizer = optim.Adam(netS.parameters(), lr=args.lr)
best_val_acc = 0
cnt = 0
temp = args.temp
# Used classes of CIFAR-100
inc_classes = [70, 47, 49, 37, 86, 53, 16, 94, 54, 25]
for epoch in range(1, args.epochs + 1):
train_loss_kd, train_teacher_student_acc = train(args, model, netS, device, train_loader, optimizer, epoch, temp, inc_classes)
val_loss_kd, val_teacher_student_acc = val(args, model, netS, device, val_loader, epoch, 'Validation', temp, inc_classes)
test_loss_kd, test_student_acc, test_teacher_student_acc = test(args, model, netS, device, test_loader, epoch, 'Test', temp)
if val_teacher_student_acc > best_val_acc:
print("Saving best model...")
torch.save(netS.state_dict(), "models/best_model_lr1.pth")
best_val_acc = val_teacher_student_acc
cnt = 0
train_st_acc_lr1 = train_teacher_student_acc
val_st_acc_lr1 = val_teacher_student_acc
test_st_acc_lr1 = test_teacher_student_acc
test_acc_lr1 = test_student_acc
else:
cnt += 1
writer.add_scalar("1_Train loss", train_loss_kd, epoch)
writer.add_scalar("2_Validation loss", val_loss_kd, epoch)
writer.add_scalar("3_Test loss", test_loss_kd, epoch)
writer.add_scalar("7_Test accuracy", test_student_acc, epoch)
writer.add_scalar("4_Train Teacher-Student accuracy", train_teacher_student_acc, epoch)
writer.add_scalar("5_Validation Teacher-Student accuracy", val_teacher_student_acc, epoch)
writer.add_scalar("6_Test Teacher-Student accuracy", test_teacher_student_acc, epoch)
if cnt > 100:
print('Model has converged with learning rate = {}!'.format(args.lr))
break
n_epochs_lr1 = epoch
optimizer = optim.Adam(netS.parameters(), lr=args.lr*0.1)
netS.load_state_dict(torch.load("models/best_model_lr1.pth"))
cnt = 0
train_st_acc_lr2 = train_st_acc_lr1
val_st_acc_lr2 = val_st_acc_lr1
test_st_acc_lr2 = test_st_acc_lr1
test_acc_lr2 = test_acc_lr1
torch.save(netS.state_dict(), "models/best_model_lr2.pth")
for epoch in range(1, args.epochs + 1):
train_loss_kd, train_teacher_student_acc = train(args, model, netS, device, train_loader, optimizer, epoch + n_epochs_lr1, temp, inc_classes)
val_loss_kd, val_teacher_student_acc = val(args, model, netS, device, val_loader, epoch + n_epochs_lr1, 'Validation', temp, inc_classes)
test_loss_kd, test_student_acc, test_teacher_student_acc = test(args, model, netS, device, test_loader, epoch + n_epochs_lr1, 'Test', temp)
if val_teacher_student_acc > best_val_acc:
print("Saving best model...")
torch.save(netS.state_dict(), "models/best_model_lr2.pth")
best_val_acc = val_teacher_student_acc
cnt = 0
train_st_acc_lr2 = train_teacher_student_acc
val_st_acc_lr2 = val_teacher_student_acc
test_st_acc_lr2 = test_teacher_student_acc
test_acc_lr2 = test_student_acc
else:
cnt += 1
writer.add_scalar("1_Train loss", train_loss_kd, epoch + n_epochs_lr1)
writer.add_scalar("2_Validation loss", val_loss_kd, epoch + n_epochs_lr1)
writer.add_scalar("3_Test loss", test_loss_kd, epoch + n_epochs_lr1)
writer.add_scalar("7_Test accuracy", test_student_acc, epoch + n_epochs_lr1)
writer.add_scalar("4_Train Teacher-Student accuracy", train_teacher_student_acc, epoch + n_epochs_lr1)
writer.add_scalar("5_Validation Teacher-Student accuracy", val_teacher_student_acc, epoch + n_epochs_lr1)
writer.add_scalar("6_Test Teacher-Student accuracy", test_teacher_student_acc, epoch + n_epochs_lr1)
if cnt > 100:
print('Model has converged with learning rate = {}!'.format(args.lr*0.1))
break
n_epochs_lr2 = epoch
print('Number of epochs with lr = {} are {} and number of epochs with lr = {} are {}'.format(
args.lr, n_epochs_lr1, args.lr*0.1, n_epochs_lr2))
print('Accuracy with lr = {}: Train ST accuracy = {:.2f}%, Validation ST accuracy = {:.2f}%, Test ST accuracy = {:.2f}%, Test accuracy = {:.2f}%'.format(
args.lr, train_st_acc_lr1, val_st_acc_lr1, test_st_acc_lr1, test_acc_lr1))
print('Accuracy with lr = {}: Train ST accuracy = {:.2f}%, Validation ST accuracy = {:.2f}%, Test ST accuracy = {:.2f}%, Test accuracy = {:.2f}%'.format(
args.lr*0.1, train_st_acc_lr2, val_st_acc_lr2, test_st_acc_lr2, test_acc_lr2))
writer.close()
if __name__ == '__main__':
main()
| [
"torchvision.datasets.CIFAR100",
"alexnet_half.AlexNet_half",
"numpy.isin",
"torch.cuda.is_available",
"torch.sum",
"torch.nn.functional.softmax",
"os.path.exists",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"numpy.random.seed",
"alexnet.AlexNet",
"torchvision.transforms.ToTensor... | [((674, 689), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (687, 689), False, 'from tensorboardX import SummaryWriter\n'), ((697, 721), 'os.path.exists', 'os.path.exists', (['"""models"""'], {}), "('models')\n", (711, 721), False, 'import os\n'), ((727, 748), 'os.makedirs', 'os.makedirs', (['"""models"""'], {}), "('models')\n", (738, 748), False, 'import os\n'), ((8150, 8214), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CIFAR Classifier training"""'}), "(description='CIFAR Classifier training')\n", (8173, 8214), False, 'import argparse\n'), ((9513, 9541), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (9530, 9541), False, 'import torch\n'), ((9555, 9598), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (9567, 9598), False, 'import torch\n'), ((9869, 9961), 'torchvision.datasets.CIFAR100', 'datasets.CIFAR100', ([], {'root': '"""../../../../datasets"""', 'train': '(True)', 'download': '(True)', 'transform': 'tfm'}), "(root='../../../../datasets', train=True, download=True,\n transform=tfm)\n", (9886, 9961), False, 'from torchvision import datasets, transforms\n'), ((9993, 10085), 'torchvision.datasets.CIFAR100', 'datasets.CIFAR100', ([], {'root': '"""../../../../datasets"""', 'train': '(True)', 'download': '(True)', 'transform': 'tfm'}), "(root='../../../../datasets', train=True, download=True,\n transform=tfm)\n", (10010, 10085), False, 'from torchvision import datasets, transforms\n'), ((10218, 10243), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (10232, 10243), True, 'import numpy as np\n'), ((10248, 10274), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (10265, 10274), True, 'import numpy as np\n'), ((10353, 10383), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_idx'], {}), '(train_idx)\n', (10372, 10383), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((10402, 10430), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['val_idx'], {}), '(val_idx)\n', (10421, 10430), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((10450, 10557), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'sampler': 'train_sampler'}), '(train_dataset, batch_size=args.batch_size,\n sampler=train_sampler, **kwargs)\n', (10477, 10557), False, 'import torch\n'), ((10585, 10693), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'args.test_batch_size', 'sampler': 'val_sampler'}), '(val_dataset, batch_size=args.test_batch_size,\n sampler=val_sampler, **kwargs)\n', (10612, 10693), False, 'import torch\n'), ((1466, 1508), 'torch.nn.functional.softmax', 'F.softmax', (['output_teacher_logits_ht'], {'dim': '(1)'}), '(output_teacher_logits_ht, dim=1)\n', (1475, 1508), True, 'import torch.nn.functional as F\n'), ((1532, 1574), 'torch.nn.functional.softmax', 'F.softmax', (['output_student_logits_ht'], {'dim': '(1)'}), '(output_student_logits_ht, dim=1)\n', (1541, 1574), True, 'import torch.nn.functional as F\n'), ((1595, 1634), 'torch.nn.functional.softmax', 'F.softmax', (['output_teacher_logits'], {'dim': '(1)'}), '(output_teacher_logits, dim=1)\n', (1604, 1634), True, 'import torch.nn.functional as F\n'), ((1656, 1695), 'torch.nn.functional.softmax', 'F.softmax', (['output_student_logits'], {'dim': '(1)'}), '(output_student_logits, dim=1)\n', (1665, 1695), True, 'import torch.nn.functional as F\n'), ((2445, 2510), 'torch.sum', 'torch.sum', (['(pred_class_argmax_student == pred_class_argmax_teacher)'], {}), '(pred_class_argmax_student == pred_class_argmax_teacher)\n', (2454, 2510), False, 'import torch\n'), ((3540, 3555), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3553, 3555), False, 'import torch\n'), ((5822, 5837), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5835, 5837), False, 'import torch\n'), ((9483, 9508), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9506, 9508), False, 'import torch\n'), ((10187, 10212), 'numpy.floor', 'np.floor', (['(0.2 * num_train)'], {}), '(0.2 * num_train)\n', (10195, 10212), True, 'import numpy as np\n'), ((11105, 11149), 'torch.load', 'torch.load', (['"""../CIFAR10_data/best_model.pth"""'], {}), "('../CIFAR10_data/best_model.pth')\n", (11115, 11149), False, 'import torch\n'), ((13163, 13202), 'torch.load', 'torch.load', (['"""models/best_model_lr1.pth"""'], {}), "('models/best_model_lr1.pth')\n", (13173, 13202), False, 'import torch\n'), ((1714, 1743), 'torch.nn.KLDivLoss', 'nn.KLDivLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (1726, 1743), True, 'import torch.nn as nn\n'), ((1744, 1790), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['output_student_logits_ht'], {'dim': '(1)'}), '(output_student_logits_ht, dim=1)\n', (1757, 1790), True, 'import torch.nn.functional as F\n'), ((1791, 1833), 'torch.nn.functional.softmax', 'F.softmax', (['output_teacher_logits_ht'], {'dim': '(1)'}), '(output_teacher_logits_ht, dim=1)\n', (1800, 1833), True, 'import torch.nn.functional as F\n'), ((4094, 4136), 'torch.nn.functional.softmax', 'F.softmax', (['output_teacher_logits_ht'], {'dim': '(1)'}), '(output_teacher_logits_ht, dim=1)\n', (4103, 4136), True, 'import torch.nn.functional as F\n'), ((4164, 4206), 'torch.nn.functional.softmax', 'F.softmax', (['output_student_logits_ht'], {'dim': '(1)'}), '(output_student_logits_ht, dim=1)\n', (4173, 4206), True, 'import torch.nn.functional as F\n'), ((4231, 4270), 'torch.nn.functional.softmax', 'F.softmax', (['output_teacher_logits'], {'dim': '(1)'}), '(output_teacher_logits, dim=1)\n', (4240, 4270), True, 'import torch.nn.functional as F\n'), ((4296, 4335), 'torch.nn.functional.softmax', 'F.softmax', (['output_student_logits'], {'dim': '(1)'}), '(output_student_logits, dim=1)\n', (4305, 4335), True, 'import torch.nn.functional as F\n'), ((5093, 5158), 'torch.sum', 'torch.sum', (['(pred_class_argmax_student == pred_class_argmax_teacher)'], {}), '(pred_class_argmax_student == pred_class_argmax_teacher)\n', (5102, 5158), False, 'import torch\n'), ((6288, 6330), 'torch.nn.functional.softmax', 'F.softmax', (['output_teacher_logits_ht'], {'dim': '(1)'}), '(output_teacher_logits_ht, dim=1)\n', (6297, 6330), True, 'import torch.nn.functional as F\n'), ((6358, 6400), 'torch.nn.functional.softmax', 'F.softmax', (['output_student_logits_ht'], {'dim': '(1)'}), '(output_student_logits_ht, dim=1)\n', (6367, 6400), True, 'import torch.nn.functional as F\n'), ((6425, 6464), 'torch.nn.functional.softmax', 'F.softmax', (['output_teacher_logits'], {'dim': '(1)'}), '(output_teacher_logits, dim=1)\n', (6434, 6464), True, 'import torch.nn.functional as F\n'), ((6490, 6529), 'torch.nn.functional.softmax', 'F.softmax', (['output_student_logits'], {'dim': '(1)'}), '(output_student_logits, dim=1)\n', (6499, 6529), True, 'import torch.nn.functional as F\n'), ((7279, 7325), 'torch.sum', 'torch.sum', (['(pred_class_argmax_student == target)'], {}), '(pred_class_argmax_student == target)\n', (7288, 7325), False, 'import torch\n'), ((7455, 7520), 'torch.sum', 'torch.sum', (['(pred_class_argmax_student == pred_class_argmax_teacher)'], {}), '(pred_class_argmax_student == pred_class_argmax_teacher)\n', (7464, 7520), False, 'import torch\n'), ((9820, 9841), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (9839, 9841), False, 'from torchvision import datasets, transforms\n'), ((11041, 11050), 'alexnet.AlexNet', 'AlexNet', ([], {}), '()\n', (11048, 11050), False, 'from alexnet import AlexNet\n'), ((11162, 11176), 'alexnet_half.AlexNet_half', 'AlexNet_half', ([], {}), '()\n', (11174, 11176), False, 'from alexnet_half import AlexNet_half\n'), ((4358, 4387), 'torch.nn.KLDivLoss', 'nn.KLDivLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (4370, 4387), True, 'import torch.nn as nn\n'), ((4388, 4434), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['output_student_logits_ht'], {'dim': '(1)'}), '(output_student_logits_ht, dim=1)\n', (4401, 4434), True, 'import torch.nn.functional as F\n'), ((4435, 4477), 'torch.nn.functional.softmax', 'F.softmax', (['output_teacher_logits_ht'], {'dim': '(1)'}), '(output_teacher_logits_ht, dim=1)\n', (4444, 4477), True, 'import torch.nn.functional as F\n'), ((6552, 6581), 'torch.nn.KLDivLoss', 'nn.KLDivLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (6564, 6581), True, 'import torch.nn as nn\n'), ((6582, 6628), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['output_student_logits_ht'], {'dim': '(1)'}), '(output_student_logits_ht, dim=1)\n', (6595, 6628), True, 'import torch.nn.functional as F\n'), ((6629, 6671), 'torch.nn.functional.softmax', 'F.softmax', (['output_teacher_logits_ht'], {'dim': '(1)'}), '(output_teacher_logits_ht, dim=1)\n', (6638, 6671), True, 'import torch.nn.functional as F\n'), ((1049, 1077), 'numpy.isin', 'np.isin', (['target', 'inc_classes'], {}), '(target, inc_classes)\n', (1056, 1077), True, 'import numpy as np\n'), ((10913, 10934), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (10932, 10934), False, 'from torchvision import datasets, transforms\n'), ((3731, 3759), 'numpy.isin', 'np.isin', (['target', 'inc_classes'], {}), '(target, inc_classes)\n', (3738, 3759), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Camera image classification demo code.
Runs continuous image detection on the VisionBonnet and prints the object and
probability for top three objects.
Example:
image_classification_camera.py --num_frames 10
"""
import argparse
import time
import os
import numpy as np
import keras
import paramiko
import sshconfig as cfg
from numpy.linalg import norm
from aiy.vision.inference import CameraInference
from aiy.vision.models import feature_extraction
from picamera import PiCamera
def main():
"""Image classification camera inference example."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--num_frames',
'-n',
type=int,
dest='num_frames',
default=-1,
help='Sets the number of frames to run for, otherwise runs forever.')
parser.add_argument(
'--num_objects',
'-c',
type=int,
dest='num_objects',
default=3,
help='Sets the number of object interences to print.')
parser.add_argument(
'--save_frequency',
'-s',
type=int,
dest='save_frequency',
help='Sets the number of feature vectors which are bundled for '
'saving.')
parser.add_argument(
'--frame_rate',
'-f',
type=int,
dest='frame_rate',
default=10, # this has been changed
help='Sets the frame rate.')
parser.add_argument(
'--resolution',
'-r',
type=int,
nargs='+',
dest='resolution',
help='Sets the resolution.')
parser.add_argument(
'--sensor_mode',
'-m',
type=int,
dest='sensor_mode',
default=4,
help='Sets the sensor mode. For details see '
'https://picamera.readthedocs.io/en/release-1.13/fov.html'
'#sensor-modes')
args = parser.parse_args()
if args.resolution is None:
args.resolution = (1640, 1232)
if args.save_frequency is None:
args.save_frequency = args.frame_rate
# import keras stuff
data_path = '/home/pi/Desktop/'
network = keras.models.load_model(data_path + 'model.h5')
labels = np.load(data_path + 'train_5w_w2v_embeddings.npz')
# ssh stuff
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(cfg.host, username=cfg.username, password=cfg.password)
except:
print("Won't be speaking")
with PiCamera() as camera:
camera.sensor_mode = args.sensor_mode
camera.resolution = args.resolution
camera.framerate = args.frame_rate
camera.start_preview(fullscreen=False, window=(100, 100, 640, 480))
try:
with CameraInference(feature_extraction.model()) as inference:
feature_list = []
for i, result in enumerate(inference.run()):
if i == args.num_frames:
break
feature_list.append(
feature_extraction.get_output_features(result))
if i % args.save_frequency == 0 and i > 0:
fts = np.array(feature_list)
fts = fts / norm(fts, axis=1)[:, None]
vec_pred = network.predict(fts)
pred_wrd = []
for vec in vec_pred:
lab_idx = np.argmin(
norm(vec - labels['embeddings'], axis=1))
word = labels['words'][lab_idx]
pred_wrd.append(word)
words, counts = np.unique(pred_wrd,
return_counts=True)
max_word = words[np.argmax(counts)]
print(max_word)
_, _, _ = ssh.exec_command(
'python3 speak.py {}'.format(max_word))
feature_list = []
except KeyboardInterrupt:
ssh.close()
camera.stop_preview()
if __name__ == '__main__':
main()
| [
"keras.models.load_model",
"numpy.unique",
"argparse.ArgumentParser",
"paramiko.AutoAddPolicy",
"aiy.vision.models.feature_extraction.model",
"picamera.PiCamera",
"numpy.argmax",
"numpy.array",
"numpy.linalg.norm",
"numpy.load",
"paramiko.SSHClient",
"aiy.vision.models.feature_extraction.get_o... | [((1168, 1193), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1191, 1193), False, 'import argparse\n'), ((2715, 2762), 'keras.models.load_model', 'keras.models.load_model', (["(data_path + 'model.h5')"], {}), "(data_path + 'model.h5')\n", (2738, 2762), False, 'import keras\n'), ((2776, 2826), 'numpy.load', 'np.load', (["(data_path + 'train_5w_w2v_embeddings.npz')"], {}), "(data_path + 'train_5w_w2v_embeddings.npz')\n", (2783, 2826), True, 'import numpy as np\n'), ((2854, 2874), 'paramiko.SSHClient', 'paramiko.SSHClient', ([], {}), '()\n', (2872, 2874), False, 'import paramiko\n'), ((2911, 2935), 'paramiko.AutoAddPolicy', 'paramiko.AutoAddPolicy', ([], {}), '()\n', (2933, 2935), False, 'import paramiko\n'), ((3079, 3089), 'picamera.PiCamera', 'PiCamera', ([], {}), '()\n', (3087, 3089), False, 'from picamera import PiCamera\n'), ((3358, 3384), 'aiy.vision.models.feature_extraction.model', 'feature_extraction.model', ([], {}), '()\n', (3382, 3384), False, 'from aiy.vision.models import feature_extraction\n'), ((3639, 3685), 'aiy.vision.models.feature_extraction.get_output_features', 'feature_extraction.get_output_features', (['result'], {}), '(result)\n', (3677, 3685), False, 'from aiy.vision.models import feature_extraction\n'), ((3780, 3802), 'numpy.array', 'np.array', (['feature_list'], {}), '(feature_list)\n', (3788, 3802), True, 'import numpy as np\n'), ((4307, 4346), 'numpy.unique', 'np.unique', (['pred_wrd'], {'return_counts': '(True)'}), '(pred_wrd, return_counts=True)\n', (4316, 4346), True, 'import numpy as np\n'), ((4420, 4437), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (4429, 4437), True, 'import numpy as np\n'), ((3839, 3856), 'numpy.linalg.norm', 'norm', (['fts'], {'axis': '(1)'}), '(fts, axis=1)\n', (3843, 3856), False, 'from numpy.linalg import norm\n'), ((4115, 4155), 'numpy.linalg.norm', 'norm', (["(vec - labels['embeddings'])"], {'axis': '(1)'}), "(vec - labels['embeddings'], axis=1)\n", (4119, 4155), False, 'from numpy.linalg import norm\n')] |
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the AddExplicitOutputLayers pass"""
import numpy as np
import pyxir as px
from typing import List
from .. import XGraph
from ..passing import XGraphMutator
from ..layer.xlayer import XLayer
class AddExplicitOutputLayers(XGraphMutator):
"""Add explicit output layers"""
def __init__(self, out_tensor_names: List[str] = None, layout: str = None):
super().__init__()
self.out_tensor_names = (
set(out_tensor_names) if out_tensor_names is not None else set([])
)
self.out_tensor_map = {}
self.layout = layout
def transform(self, xgraph: XGraph) -> XGraph:
"""Add XGraph output names to out tensor names attribute"""
self.out_tensor_names |= set(xgraph.get_output_names())
return xgraph
def visit(self, X: XLayer) -> XLayer:
if (
X.name in self.out_tensor_names
and len(X.tops) > 0
and len(X.shapes) == 4
and self.layout is not None
):
layer_name = X.name
new_name = X.name + "_hidden"
X.name = new_name
self.out_tensor_map[layer_name] = new_name
if any([b in self.out_tensor_map for b in X.bottoms]):
X.bottoms[:] = [
b if b not in self.out_tensor_map else self.out_tensor_map[b]
for b in X.bottoms
]
channels = X.shapes[self.layout.index("C")]
weights = np.identity(channels, dtype=np.float32).reshape(
channels, channels, 1, 1
)
wX = px.ops.constant(new_name + "_w", weights)
idX = px.ops.conv2d(
layer_name, X, wX, kernel_size=[1, 1], data_layout=self.layout
)
return [X, idX]
elif any([b in self.out_tensor_map for b in X.bottoms]):
X.bottoms[:] = [
b if b not in self.out_tensor_map else self.out_tensor_map[b]
for b in X.bottoms
]
return X
return super().visit(X)
| [
"pyxir.ops.constant",
"numpy.identity",
"pyxir.ops.conv2d"
] | [((2183, 2224), 'pyxir.ops.constant', 'px.ops.constant', (["(new_name + '_w')", 'weights'], {}), "(new_name + '_w', weights)\n", (2198, 2224), True, 'import pyxir as px\n'), ((2243, 2320), 'pyxir.ops.conv2d', 'px.ops.conv2d', (['layer_name', 'X', 'wX'], {'kernel_size': '[1, 1]', 'data_layout': 'self.layout'}), '(layer_name, X, wX, kernel_size=[1, 1], data_layout=self.layout)\n', (2256, 2320), True, 'import pyxir as px\n'), ((2062, 2101), 'numpy.identity', 'np.identity', (['channels'], {'dtype': 'np.float32'}), '(channels, dtype=np.float32)\n', (2073, 2101), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import numpy.testing as npt
from scipy.linalg import toeplitz
from doatools.model.array_elements import CustomNonisotropicSensor
from doatools.model.perturbations import LocationErrors, GainErrors, \
PhaseErrors, MutualCoupling
from doatools.model.arrays import GridBasedArrayDesign
from doatools.model.arrays import UniformLinearArray, CoPrimeArray, \
NestedArray, MinimumRedundancyLinearArray, \
UniformCircularArray, UniformRectangularArray
from doatools.model.sources import FarField1DSourcePlacement
class Test1DArrayDesigns(unittest.TestCase):
def setUp(self):
self.wavelength = 1
def test_ula(self):
d0 = 2.
custom_name = 'TestULA'
ula = UniformLinearArray(6, d0, custom_name)
self.assertEqual(ula.size, 6)
self.assertEqual(ula.ndim, 1)
self.assertEqual(ula.name, custom_name)
npt.assert_allclose(ula.d0, np.array([d0]))
npt.assert_allclose(ula.bases, np.array([[d0]]))
npt.assert_array_equal(
ula.element_indices,
np.array([0, 1, 2, 3, 4, 5]).reshape((-1, 1))
)
npt.assert_array_equal(
ula.element_locations,
np.array([0., 2., 4., 6., 8., 10.]).reshape((-1, 1))
)
def test_nested(self):
d0 = 1.
nea = NestedArray(4, 3, d0)
self.assertEqual(nea.n1, 4)
self.assertEqual(nea.n2, 3)
self.assertEqual(nea.size, 7)
self.assertEqual(nea.ndim, 1)
npt.assert_allclose(nea.d0, np.array([d0]))
npt.assert_allclose(nea.bases, np.array([[d0]]))
npt.assert_array_equal(
nea.element_indices,
np.array([0, 1, 2, 3, 4, 9, 14]).reshape((-1, 1))
)
npt.assert_array_equal(
nea.element_locations,
np.array([0., 1., 2., 3., 4., 9., 14.]).reshape((-1, 1))
)
def test_coprime(self):
d0 = self.wavelength / 2
# M
cpa1 = CoPrimeArray(3, 5, d0, 'm')
self.assertEqual(cpa1.coprime_pair, (3, 5))
self.assertEqual(cpa1.mode, 'm')
self.assertEqual(cpa1.size, 7)
self.assertEqual(cpa1.ndim, 1)
npt.assert_array_equal(cpa1.d0, np.array([d0]))
npt.assert_array_equal(cpa1.bases, np.array([[d0]]))
npt.assert_array_equal(
cpa1.element_indices,
np.array([0, 3, 6, 9, 12, 5, 10]).reshape((-1, 1))
)
npt.assert_allclose(
cpa1.element_locations,
np.array([0., 1.5, 3., 4.5, 6., 2.5, 5.]).reshape((-1, 1))
)
# 2M
cpa2 = CoPrimeArray(3, 5, d0, '2m')
self.assertEqual(cpa2.coprime_pair, (3, 5))
self.assertEqual(cpa2.mode, '2m')
self.assertEqual(cpa2.size, 10)
self.assertEqual(cpa2.ndim, 1)
npt.assert_array_equal(cpa2.d0, np.array([d0]))
npt.assert_array_equal(cpa2.bases, np.array([[d0]]))
npt.assert_array_equal(
cpa2.element_indices,
np.array([0, 3, 6, 9, 12, 5, 10, 15, 20, 25]).reshape((-1, 1))
)
npt.assert_allclose(
cpa2.element_locations,
np.array([0., 1.5, 3., 4.5, 6., 2.5, 5., 7.5, 10., 12.5]).reshape((-1, 1))
)
def test_mra(self):
custom_name = 'TestMRA'
d0 = self.wavelength / 2
mra = MinimumRedundancyLinearArray(5, d0, custom_name)
self.assertEqual(mra.size, 5)
self.assertEqual(mra.ndim, 1)
npt.assert_array_equal(mra.d0, np.array([d0]))
npt.assert_array_equal(mra.bases, np.array([[d0]]))
npt.assert_array_equal(
mra.element_indices,
np.array([0, 1, 4, 7, 9]).reshape((-1, 1))
)
npt.assert_allclose(
mra.element_locations,
np.array([0.0, 0.5, 2.0, 3.5, 4.5]).reshape((-1, 1))
)
class Test2DArrayDesigns(unittest.TestCase):
def setUp(self):
self.wavelength = 1
def test_uca(self):
custom_name = 'TestUCA'
n = 4
r = 2.0
uca = UniformCircularArray(n, r, custom_name)
self.assertEqual(uca.size, n)
self.assertEqual(uca.ndim, 2)
self.assertEqual(uca.name, custom_name)
self.assertEqual(uca.radius, r)
locations_expected = np.array([
[2., 0.], [0., 2.], [-2., 0.], [0., -2.]
])
npt.assert_allclose(uca.element_locations, locations_expected, atol=1e-8)
def test_ura(self):
custom_name = 'TestURA'
m, n = 3, 4
indices_expected = np.array([
[0, 0], [0, 1], [0, 2], [0, 3],
[1, 0], [1, 1], [1, 2], [1, 3],
[2, 0], [2, 1], [2, 2], [2, 3]
])
# Square cells
d0 = self.wavelength / 2
ura1 = UniformRectangularArray(m, n, d0, custom_name)
self.assertEqual(ura1.size, m * n)
self.assertEqual(ura1.ndim, 2)
self.assertEqual(ura1.name, custom_name)
self.assertEqual(ura1.shape, (m, n))
npt.assert_allclose(ura1.d0, np.array([d0, d0]))
npt.assert_allclose(ura1.bases, np.eye(2) * d0)
npt.assert_array_equal(ura1.element_indices, indices_expected)
npt.assert_allclose(ura1.element_locations, indices_expected * d0)
# Rectangular cells
d0 = (self.wavelength / 2, self.wavelength / 3)
ura2 = UniformRectangularArray(m, n, d0, custom_name)
self.assertEqual(ura2.size, m * n)
self.assertEqual(ura2.ndim, 2)
self.assertEqual(ura2.name, custom_name)
self.assertEqual(ura2.shape, (m, n))
npt.assert_allclose(ura2.d0, np.array(d0))
npt.assert_allclose(ura2.bases, np.diag(d0))
npt.assert_array_equal(ura2.element_indices, indices_expected)
npt.assert_allclose(
ura2.element_locations,
indices_expected * np.array(d0)
)
class TestGeneralGridBasedArrays(unittest.TestCase):
def test_3d(self):
bases = np.array([
[0., 0.5, 0.],
[1., 0., 0.],
[0., 0., 2.]
])
indices = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 1, 1]
])
locations_expected = indices @ bases
array = GridBasedArrayDesign(indices, bases=bases)
self.assertEqual(array.size, indices.shape[0])
self.assertEqual(array.ndim, bases.shape[1])
npt.assert_allclose(array.d0, np.linalg.norm(bases, ord=2, axis=1))
npt.assert_allclose(array.element_indices, indices)
npt.assert_allclose(array.bases, bases)
npt.assert_allclose(array.element_locations, locations_expected)
class TestSteeringMatrix(unittest.TestCase):
def setUp(self):
self.wavelength = 1.0
def test_without_perturbations(self):
cpa = CoPrimeArray(2, 3, self.wavelength / 2)
sources = FarField1DSourcePlacement(np.linspace(-np.pi/3, np.pi/3, 3))
A, DA = cpa.steering_matrix(sources, self.wavelength, True)
A_expected = np.array([
[ 1.000000+0.000000j, 1.000000+0.000000j, 1.000000+0.000000j],
[ 0.666131+0.745835j, 1.000000+0.000000j, 0.666131-0.745835j],
[-0.112539+0.993647j, 1.000000+0.000000j, -0.112539-0.993647j],
[-0.303263-0.952907j, 1.000000+0.000000j, -0.303263+0.952907j],
[-0.816063+0.577964j, 1.000000+0.000000j, -0.816063-0.577964j],
[ 0.798227+0.602356j, 1.000000+0.000000j, 0.798227-0.602356j]
])
DA_expected = np.array([
[ 0.000000+ 0.000000j, 0.000000+ 0.000000j, 0.000000+ 0.000000j],
[-2.343109+ 2.092712j, 0.000000+ 6.283185j, 2.343109+ 2.092712j],
[-6.243270- 0.707105j, 0.000000+12.566371j, 6.243270- 0.707105j],
[ 4.490467- 1.429095j, 0.000000+ 9.424778j, -4.490467- 1.429095j],
[-5.447178- 7.691209j, 0.000000+18.849556j, 5.447178- 7.691209j],
[-8.515612+11.284673j, 0.000000+28.274334j, 8.515612+11.284673j]
])
npt.assert_allclose(A, A_expected, rtol=1e-6)
npt.assert_allclose(DA, DA_expected, rtol=1e-6)
def test_with_perturbations(self):
pass
def test_custom_nonisotropic_1d(self):
# Sine response for azimuth angles (cosine for broadside angles)
f_sr = lambda r, az, el, pol: np.sin(az)
element = CustomNonisotropicSensor(f_sr)
ula = UniformLinearArray(5, self.wavelength / 2, element=element)
sources = FarField1DSourcePlacement(np.linspace(-np.pi/3, np.pi/4, 3))
A_expected = np.array([
[ 5.000000e-1+0.000000e+0j, 9.914449e-1+0.000000e+0j, 7.071068e-1+0.000000e+0j],
[-4.563621e-1-2.042881e-1j, 9.092510e-1-3.952538e-1j, -4.282945e-1+5.626401e-1j],
[ 3.330655e-1+3.729174e-1j, 6.762975e-1-7.249721e-1j, -1.882710e-1-6.815820e-1j],
[-1.516317e-1-4.764534e-1j, 3.312097e-1-9.344854e-1j, 6.563659e-1+2.630282e-1j],
[-5.626959e-2+4.968236e-1j, -6.879475e-2-9.890552e-1j, -6.068505e-1+3.629497e-1j]
])
A = ula.steering_matrix(sources, self.wavelength)
npt.assert_allclose(A, A_expected, rtol=1e-6)
def test_custom_vector_sensor_1d(self):
# Each sensor has three outputs with different gains.
gains = [1.0, 0.5, 0.1]
output_size = len(gains)
def f_sr(r, az, el, pol):
# Sine response.
res = np.sin(az)
return np.stack([res * g for g in gains])
element = CustomNonisotropicSensor(f_sr, output_size=output_size)
ula = UniformLinearArray(4, self.wavelength / 2, element=element)
sources = FarField1DSourcePlacement(np.linspace(-np.pi/3, np.pi/4, 3))
A_expected = np.array([
[ 5.000000e-1+0.000000e+0j, 9.914449e-1+0.000000e+0j, 7.071068e-1+0.000000e+0j],
[-4.563621e-1-2.042881e-1j, 9.092510e-1-3.952538e-1j, -4.282945e-1+5.626401e-1j],
[ 3.330655e-1+3.729174e-1j, 6.762975e-1-7.249721e-1j, -1.882710e-1-6.815820e-1j],
[-1.516317e-1-4.764534e-1j, 3.312097e-1-9.344854e-1j, 6.563659e-1+2.630282e-1j],
[ 2.500000e-1+0.000000e+0j, 4.957224e-1+0.000000e+0j, 3.535534e-1+0.000000e+0j],
[-2.281810e-1-1.021441e-1j, 4.546255e-1-1.976269e-1j, -2.141472e-1+2.813200e-1j],
[ 1.665327e-1+1.864587e-1j, 3.381488e-1-3.624861e-1j, -9.413548e-2-3.407910e-1j],
[-7.581586e-2-2.382267e-1j, 1.656049e-1-4.672427e-1j, 3.281829e-1+1.315141e-1j],
[ 5.000000e-2+0.000000e+0j, 9.914449e-2+0.000000e+0j, 7.071068e-2+0.000000e+0j],
[-4.563621e-2-2.042881e-2j, 9.092510e-2-3.952538e-2j, -4.282945e-2+5.626401e-2j],
[ 3.330655e-2+3.729174e-2j, 6.762975e-2-7.249721e-2j, -1.882710e-2-6.815820e-2j],
[-1.516317e-2-4.764534e-2j, 3.312097e-2-9.344854e-2j, 6.563659e-2+2.630282e-2j]
])
A = ula.steering_matrix(sources, self.wavelength)
npt.assert_allclose(A, A_expected, rtol=1e-6)
class TestArrayPerturbations(unittest.TestCase):
def setUp(self):
self.wavelength = 1
def test_array_perturbations(self):
d0 = self.wavelength / 2
ula = UniformLinearArray(5, d0)
ptype2str = {
LocationErrors: 'location_errors',
GainErrors: 'gain_errors',
PhaseErrors: 'phase_errors',
MutualCoupling: 'mutual_coupling'
}
str2ptype = {v: k for k, v in ptype2str.items()}
# No perturbations yet.
self.assertFalse(ula.is_perturbed)
for ptype in ptype2str.keys():
self.assertFalse(ula.has_perturbation(ptype))
# Now we add perturbations.
gain_errors = np.random.uniform(-0.5, 0.5, (ula.size,))
phase_errors = np.random.uniform(-np.pi, np.pi, (ula.size,))
mutual_coupling = toeplitz([1.0, 0.4+0.2j, 0.0, 0.0, 0.0])
perturbed_name = 'PerturbedULA'
# Test for 1D, 2D, 3D location errors.
for ndim in [1, 2, 3]:
location_errors = np.random.uniform(-0.1 * d0, 0.1 * d0, (ula.size, ndim))
perturb_defs = {
'gain_errors': (gain_errors, True),
'phase_errors': (phase_errors, True),
'location_errors': (location_errors, False),
'mutual_coupling': (mutual_coupling, True)
}
ula_perturbed = ula.get_perturbed_copy(perturb_defs, perturbed_name)
self.assertEqual(ula_perturbed.name, perturbed_name)
self.assertTrue(ula_perturbed.is_perturbed)
for k, v in perturb_defs.items():
cur_ptype = str2ptype[k]
self.assertEqual(ula_perturbed.has_perturbation(cur_ptype), True)
npt.assert_allclose(ula_perturbed.get_perturbation_params(cur_ptype), v[0])
self.assertEqual(ula_perturbed.is_perturbation_known(cur_ptype), v[1])
# Verify location error calculations.
self.assertEqual(ula_perturbed.actual_ndim, ndim)
npt.assert_allclose(
ula_perturbed.actual_element_locations,
np.pad(ula.element_locations, ((0, 0), (0, ndim - 1)), 'constant') + location_errors
)
# The `perturbation` property should return a list of perturbations.
perturbs_actual = ula_perturbed.perturbations
self.assertEqual(len(perturbs_actual), len(perturb_defs))
for perturb in perturbs_actual:
params_expected, known_expected = perturb_defs[ptype2str[perturb.__class__]]
npt.assert_allclose(perturb.params, params_expected)
self.assertEqual(perturb.is_known, known_expected)
# Perturbation-free copies should not have perturbations.
self.assertFalse(ula_perturbed.get_perturbation_free_copy().is_perturbed)
def test_perturbation_updates(self):
d0 = self.wavelength / 2
ula = UniformLinearArray(5, d0)
gain_errors = np.random.uniform(-0.5, 0.5, (ula.size,))
ula_perturbed = ula.get_perturbed_copy([
GainErrors(gain_errors, True)
])
for known in [False, True, True, False]:
phase_errors = np.random.uniform(-np.pi, np.pi, (ula.size, ))
ula_perturbed = ula_perturbed.get_perturbed_copy([
PhaseErrors(phase_errors, known)
])
# The gain errors should remain there.
self.assertTrue(ula_perturbed.has_perturbation(GainErrors))
self.assertTrue(ula_perturbed.is_perturbation_known(GainErrors))
npt.assert_allclose(
ula_perturbed.get_perturbation_params(GainErrors),
gain_errors
)
# The phase errors should be updated.
self.assertTrue(ula_perturbed.has_perturbation(PhaseErrors))
self.assertEqual(ula_perturbed.is_perturbation_known(PhaseErrors), known)
npt.assert_allclose(
ula_perturbed.get_perturbation_params(PhaseErrors),
phase_errors
)
if __name__ == '__main__':
unittest.main()
| [
"doatools.model.arrays.UniformCircularArray",
"doatools.model.perturbations.PhaseErrors",
"numpy.array",
"doatools.model.arrays.GridBasedArrayDesign",
"numpy.linalg.norm",
"numpy.sin",
"unittest.main",
"doatools.model.arrays.NestedArray",
"numpy.testing.assert_allclose",
"numpy.stack",
"numpy.li... | [((15256, 15271), 'unittest.main', 'unittest.main', ([], {}), '()\n', (15269, 15271), False, 'import unittest\n'), ((832, 870), 'doatools.model.arrays.UniformLinearArray', 'UniformLinearArray', (['(6)', 'd0', 'custom_name'], {}), '(6, d0, custom_name)\n', (850, 870), False, 'from doatools.model.arrays import UniformLinearArray, CoPrimeArray, NestedArray, MinimumRedundancyLinearArray, UniformCircularArray, UniformRectangularArray\n'), ((1437, 1458), 'doatools.model.arrays.NestedArray', 'NestedArray', (['(4)', '(3)', 'd0'], {}), '(4, 3, d0)\n', (1448, 1458), False, 'from doatools.model.arrays import UniformLinearArray, CoPrimeArray, NestedArray, MinimumRedundancyLinearArray, UniformCircularArray, UniformRectangularArray\n'), ((2088, 2115), 'doatools.model.arrays.CoPrimeArray', 'CoPrimeArray', (['(3)', '(5)', 'd0', '"""m"""'], {}), "(3, 5, d0, 'm')\n", (2100, 2115), False, 'from doatools.model.arrays import UniformLinearArray, CoPrimeArray, NestedArray, MinimumRedundancyLinearArray, UniformCircularArray, UniformRectangularArray\n'), ((2717, 2745), 'doatools.model.arrays.CoPrimeArray', 'CoPrimeArray', (['(3)', '(5)', 'd0', '"""2m"""'], {}), "(3, 5, d0, '2m')\n", (2729, 2745), False, 'from doatools.model.arrays import UniformLinearArray, CoPrimeArray, NestedArray, MinimumRedundancyLinearArray, UniformCircularArray, UniformRectangularArray\n'), ((3457, 3505), 'doatools.model.arrays.MinimumRedundancyLinearArray', 'MinimumRedundancyLinearArray', (['(5)', 'd0', 'custom_name'], {}), '(5, d0, custom_name)\n', (3485, 3505), False, 'from doatools.model.arrays import UniformLinearArray, CoPrimeArray, NestedArray, MinimumRedundancyLinearArray, UniformCircularArray, UniformRectangularArray\n'), ((4163, 4202), 'doatools.model.arrays.UniformCircularArray', 'UniformCircularArray', (['n', 'r', 'custom_name'], {}), '(n, r, custom_name)\n', (4183, 4202), False, 'from doatools.model.arrays import UniformLinearArray, CoPrimeArray, NestedArray, MinimumRedundancyLinearArray, UniformCircularArray, UniformRectangularArray\n'), ((4396, 4456), 'numpy.array', 'np.array', (['[[2.0, 0.0], [0.0, 2.0], [-2.0, 0.0], [0.0, -2.0]]'], {}), '([[2.0, 0.0], [0.0, 2.0], [-2.0, 0.0], [0.0, -2.0]])\n', (4404, 4456), True, 'import numpy as np\n'), ((4479, 4553), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['uca.element_locations', 'locations_expected'], {'atol': '(1e-08)'}), '(uca.element_locations, locations_expected, atol=1e-08)\n', (4498, 4553), True, 'import numpy.testing as npt\n'), ((4657, 4768), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1, 3], [2, 0], [2,\n 1], [2, 2], [2, 3]]'], {}), '([[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1, 3], [\n 2, 0], [2, 1], [2, 2], [2, 3]])\n', (4665, 4768), True, 'import numpy as np\n'), ((4881, 4927), 'doatools.model.arrays.UniformRectangularArray', 'UniformRectangularArray', (['m', 'n', 'd0', 'custom_name'], {}), '(m, n, d0, custom_name)\n', (4904, 4927), False, 'from doatools.model.arrays import UniformLinearArray, CoPrimeArray, NestedArray, MinimumRedundancyLinearArray, UniformCircularArray, UniformRectangularArray\n'), ((5225, 5287), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['ura1.element_indices', 'indices_expected'], {}), '(ura1.element_indices, indices_expected)\n', (5247, 5287), True, 'import numpy.testing as npt\n'), ((5296, 5362), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['ura1.element_locations', '(indices_expected * d0)'], {}), '(ura1.element_locations, indices_expected * d0)\n', (5315, 5362), True, 'import numpy.testing as npt\n'), ((5462, 5508), 'doatools.model.arrays.UniformRectangularArray', 'UniformRectangularArray', (['m', 'n', 'd0', 'custom_name'], {}), '(m, n, d0, custom_name)\n', (5485, 5508), False, 'from doatools.model.arrays import UniformLinearArray, CoPrimeArray, NestedArray, MinimumRedundancyLinearArray, UniformCircularArray, UniformRectangularArray\n'), ((5797, 5859), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['ura2.element_indices', 'indices_expected'], {}), '(ura2.element_indices, indices_expected)\n', (5819, 5859), True, 'import numpy.testing as npt\n'), ((6073, 6134), 'numpy.array', 'np.array', (['[[0.0, 0.5, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 2.0]]'], {}), '([[0.0, 0.5, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 2.0]])\n', (6081, 6134), True, 'import numpy as np\n'), ((6193, 6247), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 1, 1]]'], {}), '([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 1, 1]])\n', (6201, 6247), True, 'import numpy as np\n'), ((6367, 6409), 'doatools.model.arrays.GridBasedArrayDesign', 'GridBasedArrayDesign', (['indices'], {'bases': 'bases'}), '(indices, bases=bases)\n', (6387, 6409), False, 'from doatools.model.arrays import GridBasedArrayDesign\n'), ((6602, 6653), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['array.element_indices', 'indices'], {}), '(array.element_indices, indices)\n', (6621, 6653), True, 'import numpy.testing as npt\n'), ((6662, 6701), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['array.bases', 'bases'], {}), '(array.bases, bases)\n', (6681, 6701), True, 'import numpy.testing as npt\n'), ((6710, 6774), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['array.element_locations', 'locations_expected'], {}), '(array.element_locations, locations_expected)\n', (6729, 6774), True, 'import numpy.testing as npt\n'), ((6930, 6969), 'doatools.model.arrays.CoPrimeArray', 'CoPrimeArray', (['(2)', '(3)', '(self.wavelength / 2)'], {}), '(2, 3, self.wavelength / 2)\n', (6942, 6969), False, 'from doatools.model.arrays import UniformLinearArray, CoPrimeArray, NestedArray, MinimumRedundancyLinearArray, UniformCircularArray, UniformRectangularArray\n'), ((7138, 7500), 'numpy.array', 'np.array', (['[[1.0 + 0.0j, 1.0 + 0.0j, 1.0 + 0.0j], [0.666131 + 0.745835j, 1.0 + 0.0j, \n 0.666131 - 0.745835j], [-0.112539 + 0.993647j, 1.0 + 0.0j, -0.112539 - \n 0.993647j], [-0.303263 - 0.952907j, 1.0 + 0.0j, -0.303263 + 0.952907j],\n [-0.816063 + 0.577964j, 1.0 + 0.0j, -0.816063 - 0.577964j], [0.798227 +\n 0.602356j, 1.0 + 0.0j, 0.798227 - 0.602356j]]'], {}), '([[1.0 + 0.0j, 1.0 + 0.0j, 1.0 + 0.0j], [0.666131 + 0.745835j, 1.0 +\n 0.0j, 0.666131 - 0.745835j], [-0.112539 + 0.993647j, 1.0 + 0.0j, -\n 0.112539 - 0.993647j], [-0.303263 - 0.952907j, 1.0 + 0.0j, -0.303263 + \n 0.952907j], [-0.816063 + 0.577964j, 1.0 + 0.0j, -0.816063 - 0.577964j],\n [0.798227 + 0.602356j, 1.0 + 0.0j, 0.798227 - 0.602356j]])\n', (7146, 7500), True, 'import numpy as np\n'), ((7637, 8032), 'numpy.array', 'np.array', (['[[0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [-2.343109 + 2.092712j, 0.0 + \n 6.283185j, 2.343109 + 2.092712j], [-6.24327 - 0.707105j, 0.0 + \n 12.566371j, 6.24327 - 0.707105j], [4.490467 - 1.429095j, 0.0 + \n 9.424778j, -4.490467 - 1.429095j], [-5.447178 - 7.691209j, 0.0 + \n 18.849556j, 5.447178 - 7.691209j], [-8.515612 + 11.284673j, 0.0 + \n 28.274334j, 8.515612 + 11.284673j]]'], {}), '([[0.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j], [-2.343109 + 2.092712j, 0.0 +\n 6.283185j, 2.343109 + 2.092712j], [-6.24327 - 0.707105j, 0.0 + \n 12.566371j, 6.24327 - 0.707105j], [4.490467 - 1.429095j, 0.0 + \n 9.424778j, -4.490467 - 1.429095j], [-5.447178 - 7.691209j, 0.0 + \n 18.849556j, 5.447178 - 7.691209j], [-8.515612 + 11.284673j, 0.0 + \n 28.274334j, 8.515612 + 11.284673j]])\n', (7645, 8032), True, 'import numpy as np\n'), ((8140, 8186), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['A', 'A_expected'], {'rtol': '(1e-06)'}), '(A, A_expected, rtol=1e-06)\n', (8159, 8186), True, 'import numpy.testing as npt\n'), ((8194, 8242), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['DA', 'DA_expected'], {'rtol': '(1e-06)'}), '(DA, DA_expected, rtol=1e-06)\n', (8213, 8242), True, 'import numpy.testing as npt\n'), ((8479, 8509), 'doatools.model.array_elements.CustomNonisotropicSensor', 'CustomNonisotropicSensor', (['f_sr'], {}), '(f_sr)\n', (8503, 8509), False, 'from doatools.model.array_elements import CustomNonisotropicSensor\n'), ((8524, 8583), 'doatools.model.arrays.UniformLinearArray', 'UniformLinearArray', (['(5)', '(self.wavelength / 2)'], {'element': 'element'}), '(5, self.wavelength / 2, element=element)\n', (8542, 8583), False, 'from doatools.model.arrays import UniformLinearArray, CoPrimeArray, NestedArray, MinimumRedundancyLinearArray, UniformCircularArray, UniformRectangularArray\n'), ((8684, 9069), 'numpy.array', 'np.array', (['[[0.5 + 0.0j, 0.9914449 + 0.0j, 0.7071068 + 0.0j], [-0.4563621 - 0.2042881j,\n 0.909251 - 0.3952538j, -0.4282945 + 0.5626401j], [0.3330655 + \n 0.3729174j, 0.6762975 - 0.7249721j, -0.188271 - 0.681582j], [-0.1516317 -\n 0.4764534j, 0.3312097 - 0.9344854j, 0.6563659 + 0.2630282j], [-\n 0.05626959 + 0.4968236j, -0.06879475 - 0.9890552j, -0.6068505 + 0.3629497j]\n ]'], {}), '([[0.5 + 0.0j, 0.9914449 + 0.0j, 0.7071068 + 0.0j], [-0.4563621 - \n 0.2042881j, 0.909251 - 0.3952538j, -0.4282945 + 0.5626401j], [0.3330655 +\n 0.3729174j, 0.6762975 - 0.7249721j, -0.188271 - 0.681582j], [-0.1516317 -\n 0.4764534j, 0.3312097 - 0.9344854j, 0.6563659 + 0.2630282j], [-\n 0.05626959 + 0.4968236j, -0.06879475 - 0.9890552j, -0.6068505 + \n 0.3629497j]])\n', (8692, 9069), True, 'import numpy as np\n'), ((9246, 9292), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['A', 'A_expected'], {'rtol': '(1e-06)'}), '(A, A_expected, rtol=1e-06)\n', (9265, 9292), True, 'import numpy.testing as npt\n'), ((9632, 9687), 'doatools.model.array_elements.CustomNonisotropicSensor', 'CustomNonisotropicSensor', (['f_sr'], {'output_size': 'output_size'}), '(f_sr, output_size=output_size)\n', (9656, 9687), False, 'from doatools.model.array_elements import CustomNonisotropicSensor\n'), ((9702, 9761), 'doatools.model.arrays.UniformLinearArray', 'UniformLinearArray', (['(4)', '(self.wavelength / 2)'], {'element': 'element'}), '(4, self.wavelength / 2, element=element)\n', (9720, 9761), False, 'from doatools.model.arrays import UniformLinearArray, CoPrimeArray, NestedArray, MinimumRedundancyLinearArray, UniformCircularArray, UniformRectangularArray\n'), ((9862, 10770), 'numpy.array', 'np.array', (['[[0.5 + 0.0j, 0.9914449 + 0.0j, 0.7071068 + 0.0j], [-0.4563621 - 0.2042881j,\n 0.909251 - 0.3952538j, -0.4282945 + 0.5626401j], [0.3330655 + \n 0.3729174j, 0.6762975 - 0.7249721j, -0.188271 - 0.681582j], [-0.1516317 -\n 0.4764534j, 0.3312097 - 0.9344854j, 0.6563659 + 0.2630282j], [0.25 + \n 0.0j, 0.4957224 + 0.0j, 0.3535534 + 0.0j], [-0.228181 - 0.1021441j, \n 0.4546255 - 0.1976269j, -0.2141472 + 0.28132j], [0.1665327 + 0.1864587j,\n 0.3381488 - 0.3624861j, -0.09413548 - 0.340791j], [-0.07581586 - \n 0.2382267j, 0.1656049 - 0.4672427j, 0.3281829 + 0.1315141j], [0.05 + \n 0.0j, 0.09914449 + 0.0j, 0.07071068 + 0.0j], [-0.04563621 - 0.02042881j,\n 0.0909251 - 0.03952538j, -0.04282945 + 0.05626401j], [0.03330655 + \n 0.03729174j, 0.06762975 - 0.07249721j, -0.0188271 - 0.0681582j], [-\n 0.01516317 - 0.04764534j, 0.03312097 - 0.09344854j, 0.06563659 + \n 0.02630282j]]'], {}), '([[0.5 + 0.0j, 0.9914449 + 0.0j, 0.7071068 + 0.0j], [-0.4563621 - \n 0.2042881j, 0.909251 - 0.3952538j, -0.4282945 + 0.5626401j], [0.3330655 +\n 0.3729174j, 0.6762975 - 0.7249721j, -0.188271 - 0.681582j], [-0.1516317 -\n 0.4764534j, 0.3312097 - 0.9344854j, 0.6563659 + 0.2630282j], [0.25 + \n 0.0j, 0.4957224 + 0.0j, 0.3535534 + 0.0j], [-0.228181 - 0.1021441j, \n 0.4546255 - 0.1976269j, -0.2141472 + 0.28132j], [0.1665327 + 0.1864587j,\n 0.3381488 - 0.3624861j, -0.09413548 - 0.340791j], [-0.07581586 - \n 0.2382267j, 0.1656049 - 0.4672427j, 0.3281829 + 0.1315141j], [0.05 + \n 0.0j, 0.09914449 + 0.0j, 0.07071068 + 0.0j], [-0.04563621 - 0.02042881j,\n 0.0909251 - 0.03952538j, -0.04282945 + 0.05626401j], [0.03330655 + \n 0.03729174j, 0.06762975 - 0.07249721j, -0.0188271 - 0.0681582j], [-\n 0.01516317 - 0.04764534j, 0.03312097 - 0.09344854j, 0.06563659 + \n 0.02630282j]])\n', (9870, 10770), True, 'import numpy as np\n'), ((11077, 11123), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['A', 'A_expected'], {'rtol': '(1e-06)'}), '(A, A_expected, rtol=1e-06)\n', (11096, 11123), True, 'import numpy.testing as npt\n'), ((11315, 11340), 'doatools.model.arrays.UniformLinearArray', 'UniformLinearArray', (['(5)', 'd0'], {}), '(5, d0)\n', (11333, 11340), False, 'from doatools.model.arrays import UniformLinearArray, CoPrimeArray, NestedArray, MinimumRedundancyLinearArray, UniformCircularArray, UniformRectangularArray\n'), ((11842, 11883), 'numpy.random.uniform', 'np.random.uniform', (['(-0.5)', '(0.5)', '(ula.size,)'], {}), '(-0.5, 0.5, (ula.size,))\n', (11859, 11883), True, 'import numpy as np\n'), ((11907, 11952), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi', '(ula.size,)'], {}), '(-np.pi, np.pi, (ula.size,))\n', (11924, 11952), True, 'import numpy as np\n'), ((11979, 12021), 'scipy.linalg.toeplitz', 'toeplitz', (['[1.0, 0.4 + 0.2j, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.4 + 0.2j, 0.0, 0.0, 0.0])\n', (11987, 12021), False, 'from scipy.linalg import toeplitz\n'), ((14087, 14112), 'doatools.model.arrays.UniformLinearArray', 'UniformLinearArray', (['(5)', 'd0'], {}), '(5, d0)\n', (14105, 14112), False, 'from doatools.model.arrays import UniformLinearArray, CoPrimeArray, NestedArray, MinimumRedundancyLinearArray, UniformCircularArray, UniformRectangularArray\n'), ((14135, 14176), 'numpy.random.uniform', 'np.random.uniform', (['(-0.5)', '(0.5)', '(ula.size,)'], {}), '(-0.5, 0.5, (ula.size,))\n', (14152, 14176), True, 'import numpy as np\n'), ((1031, 1045), 'numpy.array', 'np.array', (['[d0]'], {}), '([d0])\n', (1039, 1045), True, 'import numpy as np\n'), ((1086, 1102), 'numpy.array', 'np.array', (['[[d0]]'], {}), '([[d0]])\n', (1094, 1102), True, 'import numpy as np\n'), ((1643, 1657), 'numpy.array', 'np.array', (['[d0]'], {}), '([d0])\n', (1651, 1657), True, 'import numpy as np\n'), ((1698, 1714), 'numpy.array', 'np.array', (['[[d0]]'], {}), '([[d0]])\n', (1706, 1714), True, 'import numpy as np\n'), ((2327, 2341), 'numpy.array', 'np.array', (['[d0]'], {}), '([d0])\n', (2335, 2341), True, 'import numpy as np\n'), ((2386, 2402), 'numpy.array', 'np.array', (['[[d0]]'], {}), '([[d0]])\n', (2394, 2402), True, 'import numpy as np\n'), ((2959, 2973), 'numpy.array', 'np.array', (['[d0]'], {}), '([d0])\n', (2967, 2973), True, 'import numpy as np\n'), ((3018, 3034), 'numpy.array', 'np.array', (['[[d0]]'], {}), '([[d0]])\n', (3026, 3034), True, 'import numpy as np\n'), ((3621, 3635), 'numpy.array', 'np.array', (['[d0]'], {}), '([d0])\n', (3629, 3635), True, 'import numpy as np\n'), ((3679, 3695), 'numpy.array', 'np.array', (['[[d0]]'], {}), '([[d0]])\n', (3687, 3695), True, 'import numpy as np\n'), ((5141, 5159), 'numpy.array', 'np.array', (['[d0, d0]'], {}), '([d0, d0])\n', (5149, 5159), True, 'import numpy as np\n'), ((5722, 5734), 'numpy.array', 'np.array', (['d0'], {}), '(d0)\n', (5730, 5734), True, 'import numpy as np\n'), ((5776, 5787), 'numpy.diag', 'np.diag', (['d0'], {}), '(d0)\n', (5783, 5787), True, 'import numpy as np\n'), ((6556, 6592), 'numpy.linalg.norm', 'np.linalg.norm', (['bases'], {'ord': '(2)', 'axis': '(1)'}), '(bases, ord=2, axis=1)\n', (6570, 6592), True, 'import numpy as np\n'), ((7014, 7051), 'numpy.linspace', 'np.linspace', (['(-np.pi / 3)', '(np.pi / 3)', '(3)'], {}), '(-np.pi / 3, np.pi / 3, 3)\n', (7025, 7051), True, 'import numpy as np\n'), ((8450, 8460), 'numpy.sin', 'np.sin', (['az'], {}), '(az)\n', (8456, 8460), True, 'import numpy as np\n'), ((8628, 8665), 'numpy.linspace', 'np.linspace', (['(-np.pi / 3)', '(np.pi / 4)', '(3)'], {}), '(-np.pi / 3, np.pi / 4, 3)\n', (8639, 8665), True, 'import numpy as np\n'), ((9549, 9559), 'numpy.sin', 'np.sin', (['az'], {}), '(az)\n', (9555, 9559), True, 'import numpy as np\n'), ((9579, 9615), 'numpy.stack', 'np.stack', (['[(res * g) for g in gains]'], {}), '([(res * g) for g in gains])\n', (9587, 9615), True, 'import numpy as np\n'), ((9806, 9843), 'numpy.linspace', 'np.linspace', (['(-np.pi / 3)', '(np.pi / 4)', '(3)'], {}), '(-np.pi / 3, np.pi / 4, 3)\n', (9817, 9843), True, 'import numpy as np\n'), ((12168, 12224), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1 * d0)', '(0.1 * d0)', '(ula.size, ndim)'], {}), '(-0.1 * d0, 0.1 * d0, (ula.size, ndim))\n', (12185, 12224), True, 'import numpy as np\n'), ((14355, 14400), 'numpy.random.uniform', 'np.random.uniform', (['(-np.pi)', 'np.pi', '(ula.size,)'], {}), '(-np.pi, np.pi, (ula.size,))\n', (14372, 14400), True, 'import numpy as np\n'), ((5201, 5210), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (5207, 5210), True, 'import numpy as np\n'), ((5956, 5968), 'numpy.array', 'np.array', (['d0'], {}), '(d0)\n', (5964, 5968), True, 'import numpy as np\n'), ((13722, 13774), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['perturb.params', 'params_expected'], {}), '(perturb.params, params_expected)\n', (13741, 13774), True, 'import numpy.testing as npt\n'), ((14238, 14267), 'doatools.model.perturbations.GainErrors', 'GainErrors', (['gain_errors', '(True)'], {}), '(gain_errors, True)\n', (14248, 14267), False, 'from doatools.model.perturbations import LocationErrors, GainErrors, PhaseErrors, MutualCoupling\n'), ((1181, 1209), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (1189, 1209), True, 'import numpy as np\n'), ((1316, 1357), 'numpy.array', 'np.array', (['[0.0, 2.0, 4.0, 6.0, 8.0, 10.0]'], {}), '([0.0, 2.0, 4.0, 6.0, 8.0, 10.0])\n', (1324, 1357), True, 'import numpy as np\n'), ((1793, 1825), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 9, 14]'], {}), '([0, 1, 2, 3, 4, 9, 14])\n', (1801, 1825), True, 'import numpy as np\n'), ((1932, 1978), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 3.0, 4.0, 9.0, 14.0]'], {}), '([0.0, 1.0, 2.0, 3.0, 4.0, 9.0, 14.0])\n', (1940, 1978), True, 'import numpy as np\n'), ((2482, 2515), 'numpy.array', 'np.array', (['[0, 3, 6, 9, 12, 5, 10]'], {}), '([0, 3, 6, 9, 12, 5, 10])\n', (2490, 2515), True, 'import numpy as np\n'), ((2620, 2665), 'numpy.array', 'np.array', (['[0.0, 1.5, 3.0, 4.5, 6.0, 2.5, 5.0]'], {}), '([0.0, 1.5, 3.0, 4.5, 6.0, 2.5, 5.0])\n', (2628, 2665), True, 'import numpy as np\n'), ((3114, 3159), 'numpy.array', 'np.array', (['[0, 3, 6, 9, 12, 5, 10, 15, 20, 25]'], {}), '([0, 3, 6, 9, 12, 5, 10, 15, 20, 25])\n', (3122, 3159), True, 'import numpy as np\n'), ((3264, 3326), 'numpy.array', 'np.array', (['[0.0, 1.5, 3.0, 4.5, 6.0, 2.5, 5.0, 7.5, 10.0, 12.5]'], {}), '([0.0, 1.5, 3.0, 4.5, 6.0, 2.5, 5.0, 7.5, 10.0, 12.5])\n', (3272, 3326), True, 'import numpy as np\n'), ((3774, 3799), 'numpy.array', 'np.array', (['[0, 1, 4, 7, 9]'], {}), '([0, 1, 4, 7, 9])\n', (3782, 3799), True, 'import numpy as np\n'), ((3903, 3938), 'numpy.array', 'np.array', (['[0.0, 0.5, 2.0, 3.5, 4.5]'], {}), '([0.0, 0.5, 2.0, 3.5, 4.5])\n', (3911, 3938), True, 'import numpy as np\n'), ((13261, 13327), 'numpy.pad', 'np.pad', (['ula.element_locations', '((0, 0), (0, ndim - 1))', '"""constant"""'], {}), "(ula.element_locations, ((0, 0), (0, ndim - 1)), 'constant')\n", (13267, 13327), True, 'import numpy as np\n'), ((14481, 14513), 'doatools.model.perturbations.PhaseErrors', 'PhaseErrors', (['phase_errors', 'known'], {}), '(phase_errors, known)\n', (14492, 14513), False, 'from doatools.model.perturbations import LocationErrors, GainErrors, PhaseErrors, MutualCoupling\n')] |
import itertools
from functools import reduce
from logging import getLogger
from typing import List, Iterable, Set, Tuple, Dict
import numpy as np
from scipy.stats import binom
from ..pattern import ResponseIdentity, Rank
from ..api import RangeDatabase, RangeAttack, LeakagePattern
log = getLogger(__name__)
class LMPrank(RangeAttack):
"""
Implements the Full data reconstruction Range attack from [LMP17] based on Access pattern & Rank leakage.
PS. The dataset should always be dense and as a best practice N should be a multiple of 4
"""
@classmethod
def name(cls) -> str:
return "LMP-rank"
@classmethod
def required_leakage(cls) -> List[LeakagePattern[Set[int]]]:
return [ResponseIdentity(), Rank()]
def __partition(self, rids: List[Set[int]], rank: List[Tuple[int, int]]) \
-> Dict[int, int]:
leakage = list(zip(rank, rids))
m_r: Dict[int, int] = dict()
for r in range(len(self.db())):
intersect_cand = [np.arange(_[0], _[1] + 1) for _, R in leakage if r in R]
union_cand = [np.arange(_[0], _[1] + 1) for _, R in leakage if r not in R]
intersect_set = reduce(np.intersect1d, intersect_cand) if len(intersect_cand) > 0 else []
union_set = reduce(np.union1d, union_cand) if len(union_cand) > 0 else []
set_diff = np.setdiff1d(intersect_set, union_set, assume_unique=True)
m_r[r] = np.amax(set_diff) if len(set_diff) > 0 else np.amax(intersect_set) if len(
intersect_set) > 0 else np.amin(union_set)
return m_r
def __sorting(self, rids: List[Set[int]], rank: List[Tuple[int, int]]) \
-> Dict[int, int]:
val_r: Dict[int, int] = dict()
big_n = self.db().get_num_of_values()
m_r = self.__partition(rids, rank)
big_m = sorted(set(m_r.values()))
if len(big_m) < big_n:
log.warning(
f"{self.name()} Failed, The number of recreated partitions is not enough... Return min(DB) n times")
val_r = {_: self.db().get_min() for _ in range(len(self.db()))}
else:
for r in range(len(self.db())):
val_r[r + 1] = big_m.index(m_r[r]) + 1
return val_r
def recover(self, queries: Iterable[Tuple[int, int]]) -> List[int]:
log.info(f"Starting {self.name()}.")
rids = self.required_leakage()[0](self.db(), queries)
rank = self.required_leakage()[1](self.db(), queries)
val = self.__sorting(rids, rank)
log.info(f"Reconstruction completed.")
return [val[i + 1] for i in range(len(val.keys()))]
class LMPrid(RangeAttack):
"""
Implements the Full data reconstruction Range attack from [LMP17] based on Access Pattern leakage.
PS. The dataset should always be dense and as a best practice N should be a multiple of 4
"""
@classmethod
def name(cls) -> str:
return "LMP-rid"
@classmethod
def required_leakage(cls) -> List[LeakagePattern[Set[int]]]:
return [ResponseIdentity()]
def partitioning(self, rids: List[Set[int]]) -> Dict[int, Set[int]]:
leakage = set(tuple(sorted(row)) for row in rids)
p_r: Dict[int, Set[int]] = dict()
for r in range(len(self.db())):
intersect_cand = [c for c in leakage if r in c]
union_cand = [c for c in leakage if r not in c]
intersect_set = reduce(np.intersect1d, intersect_cand) if len(intersect_cand) > 0 else []
union_set = reduce(np.union1d, union_cand) if len(union_cand) > 0 else []
p_r[r] = set(np.setdiff1d(intersect_set, union_set, assume_unique=True))
return p_r
def sorting(self, rids: List[Set[int]]) -> Dict[int, int]:
val_r: Dict[int, int] = dict()
big_i: Dict[int, Set[int]] = dict()
big_n = self.db().get_num_of_values()
big_r = self.db().get_n()
p_r = self.partitioning(rids)
leakage = set(tuple(sorted(row)) for row in rids)
points_set = set(tuple(sorted(row)) for row in p_r.values())
# set of distinct points; in which each point contains a set of records
if len(points_set) < big_n:
log.warning(f"{self.name()} Failed, The Set of recreated points are not enough... Return min(DB) n times")
val_r = {_: self.db().get_min() for _ in range(len(self.db()))}
else:
s = [a for a in leakage if len(a) < len(self.db())].pop(0)
for q in leakage:
if (len(np.intersect1d(q, s, assume_unique=True)) > 0
and len(np.setdiff1d(q, s, assume_unique=True)) > 0
and len(np.union1d(q, s)) < big_r):
s = np.union1d(s, q)
r_s = tuple(np.setdiff1d(np.arange(big_r), s, assume_unique=True))
if r_s not in points_set:
log.warning(
f"{self.name()} failed, The set diffrence of records R\S doesn't match a single point..."
f" Return min(DB) n times")
val_r = {_: self.db().get_min() for _ in range(len(self.db()))}
return val_r
else:
big_i[1] = r_s
for i in range(1, big_n):
q_prim = set()
for q in leakage:
if (len(np.intersect1d(q, big_i[i], assume_unique=True)) > 0
and len(np.setdiff1d(q, big_i[i], assume_unique=True)) > 0):
q_prim.add(q)
big_t = np.setdiff1d(reduce(np.intersect1d, q_prim), big_i[i], assume_unique=True)
for q in leakage:
if (len(np.intersect1d(q, big_t, assume_unique=True)) > 0
and len(np.setdiff1d(q, np.union1d(big_t, big_i[i]), assume_unique=True)) > 0
and len(np.setdiff1d(big_t, q)) > 0):
big_t = np.setdiff1d(big_t, q)
if tuple(big_t) not in points_set:
log.warning(
f"{self.name()} Failed, The |Val(T)|!=1... Return min(DB) n times")
"No need to check the next big_i[i+1] nor set it since the attack should fail and return ⊥"
val_r = {_: self.db().get_min() for _ in range(len(self.db()))}
return val_r
else:
big_i[i + 1] = np.union1d(big_t, big_i[i])
for r in range(len(self.db())):
val_r[r] = min([key for (key, value) in big_i.items() if r in value])
return val_r
def recover(self, queries: Iterable[Iterable[int]]) -> List[int]:
log.info(f"Starting {self.name()}.")
rids = self.required_leakage()[0](self.db(), queries)
val = self.sorting(rids)
log.info(f"Reconstruction completed.")
return [val[i] for i in range(len(val.keys()))]
class LMPappRec(RangeAttack):
"""
Implements the Access Pattern Approximate reconstruction Range attack from [LMP17].
PS. The dataset should always be dense; Max accepted error is 75% and N should be a multiple of 4 for optimal result
If return_mid_point is True, the attack will return the mid-point of the calculated interval. If False, the real
value will be returned if it is in the interval, i.e., if the interval was correct.
"""
__return_mid_point: bool
__error: float
def __init__(self, db: RangeDatabase, return_mid_point: bool = True, error=0.25):
super().__init__(db)
self.__return_mid_point = return_mid_point
self.__error = error
@classmethod
def name(cls) -> str:
return "LMP-approx"
@classmethod
def required_leakage(cls) -> List[LeakagePattern[Set[int]]]:
return [ResponseIdentity()]
@classmethod
def __partitioning(cls, r: int, rids: List[Set[int]]) -> List[Set[int]]:
leakage = set(tuple(sorted(row)) for row in rids)
halves = dict()
halves_l = dict()
halves_r = dict()
intersect_cand = [c for c in leakage if r in c]
big_m = set(reduce(np.intersect1d, intersect_cand)) if len(intersect_cand) > 0 else set()
for a, b in itertools.combinations(leakage, 2):
if big_m == set(np.intersect1d(a, b, assume_unique=True)) and len(a) > 1 and len(b) > 1:
halves.update({len(np.union1d(a, b)): [a, b]})
max_length = max(halves.keys())
q_l = halves[max_length][0]
q_r = halves[max_length][1]
for q in leakage:
if (len(np.intersect1d(q, q_l, assume_unique=True)) > 0
and set(np.intersect1d(q, q_r, assume_unique=True)).issubset(big_m)):
halves_l.update({len(np.union1d(q, q_l)): [q, q_l]})
if (len(np.intersect1d(q, q_r, assume_unique=True)) > 0
and set(np.intersect1d(q, q_l, assume_unique=True)).issubset(big_m)):
halves_r.update({len(np.union1d(q, q_r)): [q, q_r]})
max_length_r = max(halves_r.keys())
max_length_l = max(halves_l.keys())
q_l_prime = halves_l[max_length_l][0]
q_r_prime = halves_r[max_length_r][0]
return [q_l_prime, q_l, q_r, q_r_prime, big_m]
def __sorting(self, error: float, rids: List[Set[int]]) -> Dict[int, int]:
val_r: Dict[int, int] = dict()
big_n = self.db().get_num_of_values()
big_r = set(range(self.db().get_n()))
leakage = set(tuple(sorted(row)) for row in rids)
flag = False
for i in range(len(self.db())):
if flag is True:
break
try:
p_r = self.__partitioning(i, rids)
except ValueError:
log.debug(f"Encountered ValueError")
continue
*union_cand, big_m = p_r # get the first 4 elements in union_cand and the last element in big_m
union_set = reduce(np.union1d, union_cand) if len(union_cand) > 0 else set()
if set(union_set) == big_r:
q_l_prime = p_r[0]
q_l = p_r[1]
q_r = p_r[2]
q_r_prime = p_r[3]
coupon_l = set()
coupon_r = set()
half_l = np.union1d(q_l_prime, q_l)
half_r = np.union1d(q_r_prime, q_r)
for q in leakage:
if big_m.issubset(q):
coupon_l.add(frozenset(np.setdiff1d(q, half_r)))
coupon_r.add(frozenset(np.setdiff1d(q, half_l)))
coupon_l = list(filter(None, coupon_l))
coupon_r = list(filter(None, coupon_r))
n_l = len(coupon_l)
n_r = len(coupon_r)
if (big_n - (n_l + n_r + 1)) <= (error * big_n):
log.debug(f"Approximate reconstruction succeeded with precision ɛN={(error * big_n)}... ")
coupon_l = sorted(coupon_l, key=len)
coupon_r = sorted(coupon_r, key=len)
for r in range(len(self.db())):
min_val_r = n_l + 1
if r in half_l and len([coupon_l.index(_) + 1 for _ in coupon_l if r in _]) > 0:
min_val_r = n_l + 1 - min([coupon_l.index(_) + 1 for _ in coupon_l if r in _])
elif r in big_m:
min_val_r = n_l + 1
elif r in half_r and len([coupon_r.index(_) + 1 for _ in coupon_r if r in _]) > 0:
min_val_r = n_l + 1 + min([coupon_r.index(_) + 1 for _ in coupon_r if r in _])
max_val_r = min_val_r + (big_n - (n_l + n_r + 1))
"""Return either the exact value if val_r[r]∈[minVal_r, minVal_r+k] (or its reflection)
or mid-point of the recovered interval"""
if self.db().__getitem__(r) in range(min_val_r, max_val_r + 1) and not self.__return_mid_point:
val_r[r] = self.db().__getitem__(r)
elif big_n - self.db().__getitem__(r) + 1 in range(min_val_r, max_val_r + 1) and \
not self.__return_mid_point:
val_r[r] = big_n - self.db().__getitem__(r) + 1
else:
val_r[r] = min(min_val_r, max_val_r) + abs(min_val_r - max_val_r) // 2
flag = True
if flag is False:
log.warning(f"{self.name()} The approximate reconstruction has Failed... Return min(DB) n times")
val_r = {_: self.db().get_min() for _ in range(len(self.db()))}
return val_r
def recover(self, queries: Iterable[Iterable[int]]) -> List[int]:
log.info(f"Starting {self.name()} with {self.__return_mid_point}, {self.__error}.")
rids = self.required_leakage()[0](self.db(), queries)
val = self.__sorting(error=self.__error, rids=rids)
log.info(f"Reconstruction completed.")
return [val[i] for i in range(len(val.keys()))]
class LMPaux(RangeAttack):
"""
Implements the data reconstruction Range attack from [LMP17] using auxiliary distribution for the target dataset
based on Access pattern & Rank leakage.
"""
@classmethod
def name(cls) -> str:
return "LMP-aux"
@classmethod
def required_leakage(cls) -> List[LeakagePattern[Set[int]]]:
return [ResponseIdentity(), Rank()]
def __partition(self, rids: List[Set[int]], rank: List[Tuple[int, int]]) \
-> Dict[int, Tuple[int, int]]:
leakage = list(zip(rank, rids))
s_r: Dict[int, Tuple[int, int]] = dict()
for r in range(len(self.db())):
intersect_cand = [np.arange(_[0] + 1, _[1] + 1) for _, R in leakage if r in R]
union_cand = [np.arange(_[0] + 1, _[1] + 1) for _, R in leakage if r not in R]
intersect_set = reduce(np.intersect1d, intersect_cand) if len(intersect_cand) > 0 else []
union_set = reduce(np.union1d, union_cand) if len(union_cand) > 0 else []
max_pos = np.amax(np.setdiff1d(intersect_set, union_set, assume_unique=True))
min_pos = np.amin(np.setdiff1d(intersect_set, union_set, assume_unique=True))
s_r[r] = (min_pos, max_pos)
return s_r
def __sorting(self, rids: List[Set[int]], rank: List[Tuple[int, int]]) \
-> Dict[int, int]:
val_r: Dict[int, int] = dict()
big_r = self.db().get_n()
# computing the minimal intervals containing the position of each record
try:
s_r = self.__partition(rids, rank)
except ValueError:
log.warning(f"{self.name()} Partition Failed... Return min(DB) n times")
val_r = {_: self.db().get_min() for _ in range(1, len(self.db()) + 1)}
return val_r
prob_dist = self.db().get_weights()
prob_dist = dict(sorted(prob_dist.items()))
big_z = list(prob_dist.keys()) # distinct values occuring in the DB
pdf = list(prob_dist.values())
cdf = np.cumsum(pdf) # cumulative distribution function of the weights of values in DB
for r in range(len(self.db())):
a = s_r[r][0] - 1
b = s_r[r][1]
# x-1 = 1/rank(a)= z or z+1 we pick the optimal one based on it's probability
# check if z+1 ϵ [1,N] and also P_r[z+1]>P_r[z]
p_ra = [(z, binom.pmf(k=a, n=big_r, p=cdf[big_z.index(z)]))
for z in big_z] # list[Tuple(z, prob mass func(z))]
est_z = max(p_ra, key=lambda t: t[1]) # return max(p_ra) based on second emnt which is pmf
if est_z[0] + 1 in big_z and est_z[1] < binom.pmf(k=a, n=big_r, p=cdf[big_z.index(est_z[0] + 1)]):
x = est_z[0] + 2
else:
x = est_z[0] + 1
# y = 1/rank(b)= z or z+1 we pick the optimal one based on it's probability
p_rb = [(z, binom.pmf(k=b, n=big_r, p=cdf[big_z.index(z)])) for z in big_z]
est_z = max(p_rb, key=lambda t: t[1])
if est_z[0] + 1 in big_z and est_z[1] < binom.pmf(k=b, n=big_r, p=cdf[big_z.index(est_z[0] + 1)]):
y = est_z[0] + 1
else:
y = est_z[0]
if x > y:
x, y = y, x # switch x & y values if x>y to generate range[x,y]
val_r[r + 1] = np.round(sum([i * prob_dist[i] for i in range(x, y + 1) if i in big_z]
) / sum([prob_dist[i] for i in range(x, y + 1) if i in big_z]))
return val_r
def recover(self, queries: Iterable[Tuple[int, int]]) -> List[int]:
log.info(f"Starting {self.name()}.")
rids = self.required_leakage()[0](self.db(), queries)
rank = self.required_leakage()[1](self.db(), queries)
val = self.__sorting(rids, rank)
log.info(f"Reconstruction completed.")
return [val[i + 1] for i in range(len(val.keys()))]
| [
"logging.getLogger",
"numpy.intersect1d",
"numpy.union1d",
"numpy.amin",
"functools.reduce",
"itertools.combinations",
"numpy.setdiff1d",
"numpy.cumsum",
"numpy.amax",
"numpy.arange"
] | [((292, 311), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (301, 311), False, 'from logging import getLogger\n'), ((8361, 8395), 'itertools.combinations', 'itertools.combinations', (['leakage', '(2)'], {}), '(leakage, 2)\n', (8383, 8395), False, 'import itertools\n'), ((15307, 15321), 'numpy.cumsum', 'np.cumsum', (['pdf'], {}), '(pdf)\n', (15316, 15321), True, 'import numpy as np\n'), ((1372, 1430), 'numpy.setdiff1d', 'np.setdiff1d', (['intersect_set', 'union_set'], {'assume_unique': '(True)'}), '(intersect_set, union_set, assume_unique=True)\n', (1384, 1430), True, 'import numpy as np\n'), ((1017, 1042), 'numpy.arange', 'np.arange', (['_[0]', '(_[1] + 1)'], {}), '(_[0], _[1] + 1)\n', (1026, 1042), True, 'import numpy as np\n'), ((1100, 1125), 'numpy.arange', 'np.arange', (['_[0]', '(_[1] + 1)'], {}), '(_[0], _[1] + 1)\n', (1109, 1125), True, 'import numpy as np\n'), ((1189, 1227), 'functools.reduce', 'reduce', (['np.intersect1d', 'intersect_cand'], {}), '(np.intersect1d, intersect_cand)\n', (1195, 1227), False, 'from functools import reduce\n'), ((1287, 1317), 'functools.reduce', 'reduce', (['np.union1d', 'union_cand'], {}), '(np.union1d, union_cand)\n', (1293, 1317), False, 'from functools import reduce\n'), ((1452, 1469), 'numpy.amax', 'np.amax', (['set_diff'], {}), '(set_diff)\n', (1459, 1469), True, 'import numpy as np\n'), ((3455, 3493), 'functools.reduce', 'reduce', (['np.intersect1d', 'intersect_cand'], {}), '(np.intersect1d, intersect_cand)\n', (3461, 3493), False, 'from functools import reduce\n'), ((3553, 3583), 'functools.reduce', 'reduce', (['np.union1d', 'union_cand'], {}), '(np.union1d, union_cand)\n', (3559, 3583), False, 'from functools import reduce\n'), ((3640, 3698), 'numpy.setdiff1d', 'np.setdiff1d', (['intersect_set', 'union_set'], {'assume_unique': '(True)'}), '(intersect_set, union_set, assume_unique=True)\n', (3652, 3698), True, 'import numpy as np\n'), ((8262, 8300), 'functools.reduce', 'reduce', (['np.intersect1d', 'intersect_cand'], {}), '(np.intersect1d, intersect_cand)\n', (8268, 8300), False, 'from functools import reduce\n'), ((10087, 10117), 'functools.reduce', 'reduce', (['np.union1d', 'union_cand'], {}), '(np.union1d, union_cand)\n', (10093, 10117), False, 'from functools import reduce\n'), ((10412, 10438), 'numpy.union1d', 'np.union1d', (['q_l_prime', 'q_l'], {}), '(q_l_prime, q_l)\n', (10422, 10438), True, 'import numpy as np\n'), ((10464, 10490), 'numpy.union1d', 'np.union1d', (['q_r_prime', 'q_r'], {}), '(q_r_prime, q_r)\n', (10474, 10490), True, 'import numpy as np\n'), ((13958, 13987), 'numpy.arange', 'np.arange', (['(_[0] + 1)', '(_[1] + 1)'], {}), '(_[0] + 1, _[1] + 1)\n', (13967, 13987), True, 'import numpy as np\n'), ((14045, 14074), 'numpy.arange', 'np.arange', (['(_[0] + 1)', '(_[1] + 1)'], {}), '(_[0] + 1, _[1] + 1)\n', (14054, 14074), True, 'import numpy as np\n'), ((14138, 14176), 'functools.reduce', 'reduce', (['np.intersect1d', 'intersect_cand'], {}), '(np.intersect1d, intersect_cand)\n', (14144, 14176), False, 'from functools import reduce\n'), ((14236, 14266), 'functools.reduce', 'reduce', (['np.union1d', 'union_cand'], {}), '(np.union1d, union_cand)\n', (14242, 14266), False, 'from functools import reduce\n'), ((14328, 14386), 'numpy.setdiff1d', 'np.setdiff1d', (['intersect_set', 'union_set'], {'assume_unique': '(True)'}), '(intersect_set, union_set, assume_unique=True)\n', (14340, 14386), True, 'import numpy as np\n'), ((14418, 14476), 'numpy.setdiff1d', 'np.setdiff1d', (['intersect_set', 'union_set'], {'assume_unique': '(True)'}), '(intersect_set, union_set, assume_unique=True)\n', (14430, 14476), True, 'import numpy as np\n'), ((1496, 1518), 'numpy.amax', 'np.amax', (['intersect_set'], {}), '(intersect_set)\n', (1503, 1518), True, 'import numpy as np\n'), ((1567, 1585), 'numpy.amin', 'np.amin', (['union_set'], {}), '(union_set)\n', (1574, 1585), True, 'import numpy as np\n'), ((4772, 4788), 'numpy.union1d', 'np.union1d', (['s', 'q'], {}), '(s, q)\n', (4782, 4788), True, 'import numpy as np\n'), ((4827, 4843), 'numpy.arange', 'np.arange', (['big_r'], {}), '(big_r)\n', (4836, 4843), True, 'import numpy as np\n'), ((5633, 5663), 'functools.reduce', 'reduce', (['np.intersect1d', 'q_prim'], {}), '(np.intersect1d, q_prim)\n', (5639, 5663), False, 'from functools import reduce\n'), ((6550, 6577), 'numpy.union1d', 'np.union1d', (['big_t', 'big_i[i]'], {}), '(big_t, big_i[i])\n', (6560, 6577), True, 'import numpy as np\n'), ((8425, 8465), 'numpy.intersect1d', 'np.intersect1d', (['a', 'b'], {'assume_unique': '(True)'}), '(a, b, assume_unique=True)\n', (8439, 8465), True, 'import numpy as np\n'), ((8721, 8763), 'numpy.intersect1d', 'np.intersect1d', (['q', 'q_l'], {'assume_unique': '(True)'}), '(q, q_l, assume_unique=True)\n', (8735, 8763), True, 'import numpy as np\n'), ((8949, 8991), 'numpy.intersect1d', 'np.intersect1d', (['q', 'q_r'], {'assume_unique': '(True)'}), '(q, q_r, assume_unique=True)\n', (8963, 8991), True, 'import numpy as np\n'), ((4566, 4606), 'numpy.intersect1d', 'np.intersect1d', (['q', 's'], {'assume_unique': '(True)'}), '(q, s, assume_unique=True)\n', (4580, 4606), True, 'import numpy as np\n'), ((4644, 4682), 'numpy.setdiff1d', 'np.setdiff1d', (['q', 's'], {'assume_unique': '(True)'}), '(q, s, assume_unique=True)\n', (4656, 4682), True, 'import numpy as np\n'), ((4720, 4736), 'numpy.union1d', 'np.union1d', (['q', 's'], {}), '(q, s)\n', (4730, 4736), True, 'import numpy as np\n'), ((6032, 6054), 'numpy.setdiff1d', 'np.setdiff1d', (['big_t', 'q'], {}), '(big_t, q)\n', (6044, 6054), True, 'import numpy as np\n'), ((8533, 8549), 'numpy.union1d', 'np.union1d', (['a', 'b'], {}), '(a, b)\n', (8543, 8549), True, 'import numpy as np\n'), ((8797, 8839), 'numpy.intersect1d', 'np.intersect1d', (['q', 'q_r'], {'assume_unique': '(True)'}), '(q, q_r, assume_unique=True)\n', (8811, 8839), True, 'import numpy as np\n'), ((8896, 8914), 'numpy.union1d', 'np.union1d', (['q', 'q_l'], {}), '(q, q_l)\n', (8906, 8914), True, 'import numpy as np\n'), ((9025, 9067), 'numpy.intersect1d', 'np.intersect1d', (['q', 'q_l'], {'assume_unique': '(True)'}), '(q, q_l, assume_unique=True)\n', (9039, 9067), True, 'import numpy as np\n'), ((9124, 9142), 'numpy.union1d', 'np.union1d', (['q', 'q_r'], {}), '(q, q_r)\n', (9134, 9142), True, 'import numpy as np\n'), ((10615, 10638), 'numpy.setdiff1d', 'np.setdiff1d', (['q', 'half_r'], {}), '(q, half_r)\n', (10627, 10638), True, 'import numpy as np\n'), ((10688, 10711), 'numpy.setdiff1d', 'np.setdiff1d', (['q', 'half_l'], {}), '(q, half_l)\n', (10700, 10711), True, 'import numpy as np\n'), ((5403, 5450), 'numpy.intersect1d', 'np.intersect1d', (['q', 'big_i[i]'], {'assume_unique': '(True)'}), '(q, big_i[i], assume_unique=True)\n', (5417, 5450), True, 'import numpy as np\n'), ((5496, 5541), 'numpy.setdiff1d', 'np.setdiff1d', (['q', 'big_i[i]'], {'assume_unique': '(True)'}), '(q, big_i[i], assume_unique=True)\n', (5508, 5541), True, 'import numpy as np\n'), ((5766, 5810), 'numpy.intersect1d', 'np.intersect1d', (['q', 'big_t'], {'assume_unique': '(True)'}), '(q, big_t, assume_unique=True)\n', (5780, 5810), True, 'import numpy as np\n'), ((5966, 5988), 'numpy.setdiff1d', 'np.setdiff1d', (['big_t', 'q'], {}), '(big_t, q)\n', (5978, 5988), True, 'import numpy as np\n'), ((5872, 5899), 'numpy.union1d', 'np.union1d', (['big_t', 'big_i[i]'], {}), '(big_t, big_i[i])\n', (5882, 5899), True, 'import numpy as np\n')] |
import numpy as np
import copy
import cv2
from parameter import *
class BoundBox:
def __init__(self, class_num):
self.x, self.y, self.w, self.h, self.c = 0., 0., 0., 0., 0.
self.probs = np.zeros((class_num,))
def iou(self, box):
intersection = self.intersect(box)
union = self.w * self.h + box.w * box.h - intersection
return intersection / union
def intersect(self, box):
width = self.__overlap([self.x - self.w / 2, self.x + self.w / 2], [box.x - box.w / 2, box.x + box.w / 2])
height = self.__overlap([self.y - self.h / 2, self.y + self.h / 2], [box.y - box.h / 2, box.y + box.h / 2])
return width * height
def __overlap(self, interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2, x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2, x4) - x1
class WeightReader:
def __init__(self, weight_file):
self.offset = 4
self.all_weights = np.fromfile(weight_file, dtype='float32')
def read_bytes(self, size):
self.offset = self.offset + size
return self.all_weights[self.offset - size:self.offset]
def reset(self):
self.offset = 4
def interpret_netout(image, netout):
boxes = []
# interpret the output by the network
for row in range(GRID_H):
for col in range(GRID_W):
for b in range(BOX):
box = BoundBox(CLASS)
# first 5 weights for x, y, w, h and confidence
box.x, box.y, box.w, box.h, box.c = netout[row, col, b, :5]
box.x = (col + sigmoid(box.x)) / GRID_W
box.y = (row + sigmoid(box.y)) / GRID_H
box.w = ANCHORS[2 * b + 0] * np.exp(box.w) / GRID_W
box.h = ANCHORS[2 * b + 1] * np.exp(box.h) / GRID_H
box.c = sigmoid(box.c)
# last 20 weights for class likelihoods
classes = netout[row, col, b, 5:]
box.probs = softmax(classes) * box.c
box.probs *= box.probs > THRESHOLD
boxes.append(box)
# suppress non-maximal boxes
for c in range(CLASS):
sorted_indices = list(reversed(np.argsort([box.probs[c] for box in boxes])))
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].probs[c] == 0:
continue
else:
for j in range(i + 1, len(sorted_indices)):
index_j = sorted_indices[j]
if boxes[index_i].iou(boxes[index_j]) >= 0.4:
boxes[index_j].probs[c] = 0
# draw the boxes using a threshold
mark = []
for box in boxes:
max_indx = np.argmax(box.probs)
max_prob = box.probs[max_indx]
thresh = THRESHOLD
#if LABELS[max_indx] == 'traffic_light':
# thresh = 0.6
if max_prob > thresh:
xmin = int((box.x - box.w / 2) * image.shape[1])
xmax = int((box.x + box.w / 2) * image.shape[1])
ymin = int((box.y - box.h / 2) * image.shape[0])
ymax = int((box.y + box.h / 2) * image.shape[0])
#if LABELS[max_indx] == 'traffic_light':
# ymin = int(ymax * 0.7)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), COLORS[max_indx], 5)
cv2.putText(image, LABELS[max_indx]+" "+str(round(max_prob,2)), (xmin, ymin - 12), 0, 1e-3 * image.shape[0], (0, 255, 0), 2)
mark.append({'label' : LABELS[max_indx], 'prob' : max_prob, 'xmin' : xmin,
'ymin' : ymin, 'xmax' : xmax, 'ymax' : ymax})
return image, mark
def parse_annotation(ann_dir):
f = open(ann_dir, 'r')
_f = f.read()
f_content = _f.split('\n')
all_img = []
current = ""
for ann in f_content:
img_data = ann.split(' ')
if img_data == ['']:
break
file_name, width, height, xmin, ymin, xmax, ymax, label = img_data
if not current == file_name:
img = {'height': float(width), 'width': float(height), 'object': [], 'filename': file_name}
current = file_name
all_img.append(img)
img['object'].append({'xmin': float(xmin), 'ymin': float(ymin),
'name': label, 'xmax': float(xmax),
'ymax': float(ymax)})
return all_img
def aug_img(train_instance):
path = train_instance['filename']
all_obj = copy.deepcopy(train_instance['object'][:])
img = cv2.imread(img_dir + path)
h, w, c = img.shape
# scale the image
scale = np.random.uniform() / 10. + 1.
img = cv2.resize(img, (0, 0), fx=scale, fy=scale)
# translate the image
max_offx = (scale - 1.) * w
max_offy = (scale - 1.) * h
offx = int(np.random.uniform() * max_offx)
offy = int(np.random.uniform() * max_offy)
img = img[offy: (offy + h), offx: (offx + w)]
# flip the image
flip = np.random.binomial(1, .5)
if flip > 0.5: img = cv2.flip(img, 1)
# re-color
t = [np.random.uniform()]
t += [np.random.uniform()]
t += [np.random.uniform()]
t = np.array(t)
img = img * (1 + t)
img = img / (255. * 2.)
# resize the image to standard size
img = cv2.resize(img, (NORM_H, NORM_W))
img = img[:, :, ::-1]
# fix object's position and size
for obj in all_obj:
for attr in ['xmin', 'xmax']:
obj[attr] = int(obj[attr] * scale - offx)
obj[attr] = int(obj[attr] * float(NORM_W) / w)
obj[attr] = max(min(obj[attr], NORM_W), 0)
for attr in ['ymin', 'ymax']:
obj[attr] = int(obj[attr] * scale - offy)
obj[attr] = int(obj[attr] * float(NORM_H) / h)
obj[attr] = max(min(obj[attr], NORM_H), 0)
if flip > 0.5:
xmin = obj['xmin']
obj['xmin'] = NORM_W - obj['xmax']
obj['xmax'] = NORM_W - xmin
return img, all_obj
def data_gen(all_img, batch_size):
num_img = len(all_img)
shuffled_indices = np.random.permutation(np.arange(num_img))
l_bound = 0
r_bound = batch_size if batch_size < num_img else num_img
while True:
if l_bound == r_bound:
l_bound = 0
r_bound = batch_size if batch_size < num_img else num_img
shuffled_indices = np.random.permutation(np.arange(num_img))
batch_size = r_bound - l_bound
currt_inst = 0
x_batch = np.zeros((batch_size, NORM_W, NORM_H, 3))
y_batch = np.zeros((batch_size, GRID_W, GRID_H, BOX, 5 + CLASS))
for index in shuffled_indices[l_bound:r_bound]:
train_instance = all_img[index]
# augment input image and fix object's position and size
img, all_obj = aug_img(train_instance)
# for obj in all_obj:
# cv2.rectangle(img[:,:,::-1], (obj['xmin'],obj['ymin']), (obj['xmax'],obj['ymax']), (1,1,0), 3)
# plt.imshow(img); plt.show()
# construct output from object's position and size
for obj in all_obj:
box = []
center_x = .5 * (obj['xmin'] + obj['xmax']) # xmin, xmax
center_x = center_x / (float(NORM_W) / GRID_W)
center_y = .5 * (obj['ymin'] + obj['ymax']) # ymin, ymax
center_y = center_y / (float(NORM_H) / GRID_H)
grid_x = int(np.floor(center_x))
grid_y = int(np.floor(center_y))
if grid_x < GRID_W and grid_y < GRID_H:
obj_indx = LABELS.index(obj['name'])
box = [obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax']]
y_batch[currt_inst, grid_y, grid_x, :, 0:4] = BOX * [box]
y_batch[currt_inst, grid_y, grid_x, :, 4] = BOX * [1.]
y_batch[currt_inst, grid_y, grid_x, :, 5:] = BOX * [[0.] * CLASS]
y_batch[currt_inst, grid_y, grid_x, :, 5 + obj_indx] = 1.0
# concatenate batch input from the image
x_batch[currt_inst] = img
currt_inst += 1
del img, all_obj
yield x_batch, y_batch
l_bound = r_bound
r_bound = r_bound + batch_size
if r_bound > num_img: r_bound = num_img
def sigmoid(x):
return 1. / (1. + np.exp(-x))
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
def Rotate(src, degrees):
if degrees == 90:
dst = cv2.transpose(src)
dst = cv2.flip(dst, 1)
elif degrees == 180:
dst = cv2.flip(src, -1)
elif degrees == 270:
dst = cv2.transpose(src)
dst = cv2.flip(dst, 0)
return dst
def get_Object(image, mark, Check):
label, prob, xmin, ymin, xmax, ymax = mark['label'], mark['prob'], mark['xmin'], \
mark['ymin'], mark['xmax'], mark['ymax']
#print("ymax : ",ymax)
#print("label : ", label, "prob : ", prob, "xmin : ", xmin, "ymin : ", ymin, "xmax : ", xmax, "ymax : ", ymax)
try:
if label == 'traffic_light':
Object = image[ymin:ymax, xmin:xmax, :]
b, g, r = 0, 0, 0
for y in range(ymax - ymin):
for x in range(xmax - xmin):
try:
b += Object[y, x, 0]
g += Object[y, x, 1]
r += Object[y, x, 2]
except:
continue
h, s, v = rgb2hsv(r,g,b)
if h < 120:
label = "red"
print("red ", h)
elif h >= 120:
label = "green"
print("green ", h)
# 발견했을때
Check[label][2] = True
Check[label][0] += 1
Check[label][1] = 0
except:
print("error in color extracting")
return None, 0, 0, 0, 0
return label, int(xmin), int(xmax), int(ymin), int(ymax)
def ccw(line, p2): # 시계반대방향알고리즘
p0 = [line[0], line[1]]
p1 = [line[2], line[3]]
dx1 = p1[0] - p0[0];
dy1 = p1[1] - p0[1];
dx2 = p2[0] - p0[0];
dy2 = p2[1] - p0[1];
if (dx1 * dy2 > dy1 * dx2):
return 1 #right
if (dx1 * dy2 < dy1 * dx2) :
return -1 #left
return 0
def rgb2hsv(r, g, b):
r, g, b = r/255.0, g/255.0, b/255.0
mx = max(r, g, b)
mn = min(r, g, b)
df = mx-mn
if mx == mn:
h = 0
elif mx == r:
h = (60 * ((g-b)/df) + 360) % 360
elif mx == g:
h = (60 * ((b-r)/df) + 120) % 360
elif mx == b:
h = (60 * ((r-g)/df) + 240) % 360
if mx == 0:
s = 0
else:
s = df/mx
v = mx
return h, s, v | [
"cv2.rectangle",
"numpy.fromfile",
"cv2.flip",
"cv2.transpose",
"numpy.arange",
"numpy.floor",
"numpy.argmax",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.random.uniform",
"numpy.argsort",
"copy.deepcopy",
"cv2.resize",
"cv2.imread",
"numpy.random.binomial"
] | [((4679, 4721), 'copy.deepcopy', 'copy.deepcopy', (["train_instance['object'][:]"], {}), "(train_instance['object'][:])\n", (4692, 4721), False, 'import copy\n'), ((4732, 4758), 'cv2.imread', 'cv2.imread', (['(img_dir + path)'], {}), '(img_dir + path)\n', (4742, 4758), False, 'import cv2\n'), ((4859, 4902), 'cv2.resize', 'cv2.resize', (['img', '(0, 0)'], {'fx': 'scale', 'fy': 'scale'}), '(img, (0, 0), fx=scale, fy=scale)\n', (4869, 4902), False, 'import cv2\n'), ((5171, 5197), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)'], {}), '(1, 0.5)\n', (5189, 5197), True, 'import numpy as np\n'), ((5355, 5366), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (5363, 5366), True, 'import numpy as np\n'), ((5471, 5504), 'cv2.resize', 'cv2.resize', (['img', '(NORM_H, NORM_W)'], {}), '(img, (NORM_H, NORM_W))\n', (5481, 5504), False, 'import cv2\n'), ((207, 229), 'numpy.zeros', 'np.zeros', (['(class_num,)'], {}), '((class_num,))\n', (215, 229), True, 'import numpy as np\n'), ((1153, 1194), 'numpy.fromfile', 'np.fromfile', (['weight_file'], {'dtype': '"""float32"""'}), "(weight_file, dtype='float32')\n", (1164, 1194), True, 'import numpy as np\n'), ((2928, 2948), 'numpy.argmax', 'np.argmax', (['box.probs'], {}), '(box.probs)\n', (2937, 2948), True, 'import numpy as np\n'), ((5222, 5238), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (5230, 5238), False, 'import cv2\n'), ((5264, 5283), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5281, 5283), True, 'import numpy as np\n'), ((5295, 5314), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5312, 5314), True, 'import numpy as np\n'), ((5326, 5345), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5343, 5345), True, 'import numpy as np\n'), ((6282, 6300), 'numpy.arange', 'np.arange', (['num_img'], {}), '(num_img)\n', (6291, 6300), True, 'import numpy as np\n'), ((6676, 6717), 'numpy.zeros', 'np.zeros', (['(batch_size, NORM_W, NORM_H, 3)'], {}), '((batch_size, NORM_W, NORM_H, 3))\n', (6684, 6717), True, 'import numpy as np\n'), ((6736, 6790), 'numpy.zeros', 'np.zeros', (['(batch_size, GRID_W, GRID_H, BOX, 5 + CLASS)'], {}), '((batch_size, GRID_W, GRID_H, BOX, 5 + CLASS))\n', (6744, 6790), True, 'import numpy as np\n'), ((8584, 8593), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (8590, 8593), True, 'import numpy as np\n'), ((8685, 8703), 'cv2.transpose', 'cv2.transpose', (['src'], {}), '(src)\n', (8698, 8703), False, 'import cv2\n'), ((8718, 8734), 'cv2.flip', 'cv2.flip', (['dst', '(1)'], {}), '(dst, 1)\n', (8726, 8734), False, 'import cv2\n'), ((3471, 3540), 'cv2.rectangle', 'cv2.rectangle', (['image', '(xmin, ymin)', '(xmax, ymax)', 'COLORS[max_indx]', '(5)'], {}), '(image, (xmin, ymin), (xmax, ymax), COLORS[max_indx], 5)\n', (3484, 3540), False, 'import cv2\n'), ((4818, 4837), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4835, 4837), True, 'import numpy as np\n'), ((5009, 5028), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5026, 5028), True, 'import numpy as np\n'), ((5056, 5075), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5073, 5075), True, 'import numpy as np\n'), ((8543, 8553), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (8549, 8553), True, 'import numpy as np\n'), ((8603, 8612), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (8609, 8612), True, 'import numpy as np\n'), ((8775, 8792), 'cv2.flip', 'cv2.flip', (['src', '(-1)'], {}), '(src, -1)\n', (8783, 8792), False, 'import cv2\n'), ((2385, 2428), 'numpy.argsort', 'np.argsort', (['[box.probs[c] for box in boxes]'], {}), '([box.probs[c] for box in boxes])\n', (2395, 2428), True, 'import numpy as np\n'), ((6575, 6593), 'numpy.arange', 'np.arange', (['num_img'], {}), '(num_img)\n', (6584, 6593), True, 'import numpy as np\n'), ((8833, 8851), 'cv2.transpose', 'cv2.transpose', (['src'], {}), '(src)\n', (8846, 8851), False, 'import cv2\n'), ((8866, 8882), 'cv2.flip', 'cv2.flip', (['dst', '(0)'], {}), '(dst, 0)\n', (8874, 8882), False, 'import cv2\n'), ((7626, 7644), 'numpy.floor', 'np.floor', (['center_x'], {}), '(center_x)\n', (7634, 7644), True, 'import numpy as np\n'), ((7675, 7693), 'numpy.floor', 'np.floor', (['center_y'], {}), '(center_y)\n', (7683, 7693), True, 'import numpy as np\n'), ((1909, 1922), 'numpy.exp', 'np.exp', (['box.w'], {}), '(box.w)\n', (1915, 1922), True, 'import numpy as np\n'), ((1977, 1990), 'numpy.exp', 'np.exp', (['box.h'], {}), '(box.h)\n', (1983, 1990), True, 'import numpy as np\n')] |
# Copyright 2018 Northwest University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import h5py
import numpy as np
# data_load_all(): load the training sets and testing sets
def data_load_all():
f = h5py.File('gait_100.mat')
# data: Denoised signals
data = np.transpose(f['data_all_1'])
nums = np.transpose(f['num_idxs'])
# num_idxs: The training sets and testing sets are chosen at random
num_idxs = np.transpose(f[nums[0, 0]])
print(num_idxs.shape)
# person_num: The number of participants
person_num = 100
# each_person_sample: The number of samples per user
each_person_sample = 20
Train = []
Test = []
num = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
for i in range(person_num):
num_idx1 = num_idxs[i,:]
num_idx2 = [p for p in num if p not in num_idx1]
for j in range(int(each_person_sample/2)):
Train.append(np.transpose(f[data[i, int(num_idx1[j])-1]]))
Test.append(np.transpose(f[data[i, int(num_idx2[j])-1]]))
return Train,Test,person_num | [
"numpy.transpose",
"h5py.File"
] | [((706, 731), 'h5py.File', 'h5py.File', (['"""gait_100.mat"""'], {}), "('gait_100.mat')\n", (715, 731), False, 'import h5py\n'), ((772, 801), 'numpy.transpose', 'np.transpose', (["f['data_all_1']"], {}), "(f['data_all_1'])\n", (784, 801), True, 'import numpy as np\n'), ((813, 840), 'numpy.transpose', 'np.transpose', (["f['num_idxs']"], {}), "(f['num_idxs'])\n", (825, 840), True, 'import numpy as np\n'), ((928, 955), 'numpy.transpose', 'np.transpose', (['f[nums[0, 0]]'], {}), '(f[nums[0, 0]])\n', (940, 955), True, 'import numpy as np\n')] |
import numpy as np
from collections import OrderedDict
from misc.math_utils import find
from osu.local.beatmap.beatmap import Beatmap
from osu.local.hitobject.hitobject import Hitobject
from osu.local.hitobject.std.std import Std
from osu.local.hitobject.taiko.taiko import Taiko
from osu.local.hitobject.catch.catch import Catch
from osu.local.hitobject.mania.mania import Mania
from osu.local.hitobject.std.std_singlenote_io import StdSingleNoteIO
from osu.local.hitobject.std.std_holdnote_io import StdHoldNoteIO
from osu.local.hitobject.std.std_spinner_io import StdSpinnerIO
from osu.local.hitobject.taiko.taiko_singlenote_hitobject import TaikoSingleNoteHitobject
from osu.local.hitobject.taiko.taiko_holdnote_hitobject import TaikoHoldNoteHitobject
from osu.local.hitobject.taiko.taiko_spinner_hitobject import TaikoSpinnerHitobject
from osu.local.hitobject.catch.catch_singlenote_hitobject import CatchSingleNoteHitobject
from osu.local.hitobject.catch.catch_holdnote_hitobject import CatchHoldNoteHitobject
from osu.local.hitobject.catch.catch_spinner_hitobject import CatchSpinnerHitobject
from osu.local.hitobject.mania.mania_singlenote_io import ManiaSingleNoteIO
from osu.local.hitobject.mania.mania_holdnote_io import ManiaHoldNoteIO
'''
Handles beatmap loading
Input:
load_beatmap - load the beatmap specified
Output:
metadata - information about the beatmap
hitobjects - list of hitobjects present in the map
timingpoints - list of timing points present in the map
'''
class BeatmapIO():
class Section():
SECTION_NONE = 0
SECTION_GENERAL = 1
SECTION_EDITOR = 2
SECTION_METADATA = 3
SECTION_DIFFICULTY = 4
SECTION_EVENTS = 5
SECTION_TIMINGPOINTS = 6
SECTION_COLOURS = 7
SECTION_HITOBJECTS = 8
@staticmethod
def init():
BeatmapIO.SECTION_MAP = {
BeatmapIO.Section.SECTION_GENERAL : BeatmapIO.__parse_general_section,
BeatmapIO.Section.SECTION_EDITOR : BeatmapIO.__parse_editor_section,
BeatmapIO.Section.SECTION_METADATA : BeatmapIO.__parse_metadata_section,
BeatmapIO.Section.SECTION_DIFFICULTY : BeatmapIO.__parse_difficulty_section,
BeatmapIO.Section.SECTION_EVENTS : BeatmapIO.__parse_events_section,
BeatmapIO.Section.SECTION_TIMINGPOINTS : BeatmapIO.__parse_timingpoints_section,
BeatmapIO.Section.SECTION_COLOURS : BeatmapIO.__parse_colour_section,
BeatmapIO.Section.SECTION_HITOBJECTS : BeatmapIO.__parse_hitobjects_section
}
"""
Opens a beatmap file and reads it
Args:
filepath: (string) filepath to the beatmap file to load
"""
@staticmethod
def open_beatmap(filepath=None):
with open(filepath, 'rt', encoding='utf-8') as beatmap_file:
beatmap = BeatmapIO.load_beatmap(beatmap_file)
return beatmap
"""
Loads beatmap data
Args:
beatmap_file: (string) contents of the beatmap file
"""
@staticmethod
def load_beatmap(beatmap_data):
beatmap = Beatmap()
BeatmapIO.__parse_beatmap_data(beatmap_data, beatmap)
BeatmapIO.__process_timing_points(beatmap)
if beatmap.gamemode == Beatmap.GAMEMODE_OSU or beatmap.gamemode == None:
BeatmapIO.__process_slider_timings(beatmap)
BeatmapIO.__process_hitobject_end_times(beatmap)
BeatmapIO.__process_slider_tick_times(beatmap)
if beatmap.gamemode == Beatmap.GAMEMODE_MANIA:
BeatmapIO.__process_columns(beatmap)
BeatmapIO.__validate(beatmap)
beatmap.set_cs_val(beatmap.difficulty.cs)
beatmap.set_ar_val(beatmap.difficulty.ar)
beatmap.set_od_val(beatmap.difficulty.od)
return beatmap
"""
Saves beatmap file data
Args:
filepath: (string) what to save the beatmap as
"""
@staticmethod
def save_beatmap(beatmap_data, filepath):
with open(filepath, 'wt', encoding='utf-8') as f:
f.write(beatmap_data)
"""
Returns:
MD5 checksum of the beatmap file
"""
@staticmethod
def get_md5(beatmap):
pass
@staticmethod
def __process_hitobject_end_times(beatmap):
beatmap.end_times = {}
for i in range(len(beatmap.hitobjects)):
if not beatmap.hitobjects[i].is_hitobject_type(Hitobject.CIRCLE):
beatmap.end_times[beatmap.hitobjects[i].end_time] = i
else:
beatmap.end_times[beatmap.hitobjects[i].time] = i
beatmap.end_times = OrderedDict(sorted(beatmap.end_times.items(), key=lambda x: x[0]))
# Validates beatmap data
@staticmethod
def __validate(beatmap):
if beatmap.difficulty.ar == None:
beatmap.difficulty.ar = beatmap.difficulty.od
if beatmap.difficulty.hp == None:
beatmap.difficulty.hp = beatmap.difficulty.od
if beatmap.gamemode == None:
beatmap.gamemode = Beatmap.GAMEMODE_OSU
@staticmethod
def __parse_beatmap_data(beatmap_data, beatmap):
BeatmapIO.__parse_beatmap_file_format(beatmap_data, beatmap)
BeatmapIO.__parse_beatmap_content(beatmap_data, beatmap)
beatmap.metadata.name = beatmap.metadata.artist + ' - ' + beatmap.metadata.title + ' (' + beatmap.metadata.creator + ') ' + '[' + beatmap.metadata.version + ']'
@staticmethod
def __parse_beatmap_file_format(beatmap_data, beatmap):
line = beatmap_data.readline()
data = line.split('osu file format v')
try: beatmap.metadata.beatmap_format = int(data[1])
except: return
@staticmethod
def __parse_beatmap_content(beatmap_data, beatmap):
if beatmap.metadata.beatmap_format == -1: return
section = BeatmapIO.Section.SECTION_NONE
line = ''
while True:
line = beatmap_data.readline()
if line.strip() == '[General]': section = BeatmapIO.Section.SECTION_GENERAL
elif line.strip() == '[Editor]': section = BeatmapIO.Section.SECTION_EDITOR
elif line.strip() == '[Metadata]': section = BeatmapIO.Section.SECTION_METADATA
elif line.strip() == '[Difficulty]': section = BeatmapIO.Section.SECTION_DIFFICULTY
elif line.strip() == '[Events]': section = BeatmapIO.Section.SECTION_EVENTS
elif line.strip() == '[TimingPoints]': section = BeatmapIO.Section.SECTION_TIMINGPOINTS
elif line.strip() == '[Colours]': section = BeatmapIO.Section.SECTION_COLOURS
elif line.strip() == '[HitObjects]': section = BeatmapIO.Section.SECTION_HITOBJECTS
elif line == '':
return
else:
BeatmapIO.__parse_section(section, line, beatmap)
@staticmethod
def __parse_section(section, line, beatmap):
if section != BeatmapIO.Section.SECTION_NONE:
BeatmapIO.SECTION_MAP[section](line, beatmap)
@staticmethod
def __parse_general_section(line, beatmap):
data = line.split(':', 1)
if len(data) < 2: return
data[0] = data[0].strip()
if data[0] == 'PreviewTime':
# ignore
return
if data[0] == 'Countdown':
# ignore
return
if data[0] == 'SampleSet':
# ignore
return
if data[0] == 'StackLeniency':
# ignore
return
if data[0] == 'Mode':
beatmap.gamemode = int(data[1])
return
if data[0] == 'LetterboxInBreaks':
# ignore
return
if data[0] == 'SpecialStyle':
# ignore
return
if data[0] == 'WidescreenStoryboard':
# ignore
return
@staticmethod
def __parse_editor_section(line, beatmap):
data = line.split(':', 1)
if len(data) < 2: return
if data[0] == 'DistanceSpacing':
# ignore
return
if data[0] == 'BeatDivisor':
# ignore
return
if data[0] == 'GridSize':
# ignore
return
if data[0] == 'TimelineZoom':
# ignore
return
@staticmethod
def __parse_metadata_section(line, beatmap):
data = line.split(':', 1)
if len(data) < 2: return
data[0] = data[0].strip()
if data[0] == 'Title':
beatmap.metadata.title = data[1].strip()
return
if data[0] == 'TitleUnicode':
# ignore
return
if data[0] == 'Artist':
beatmap.metadata.artist = data[1].strip()
return
if data[0] == 'ArtistUnicode':
# ignore
return
if data[0] == 'Creator':
beatmap.metadata.creator = data[1].strip()
return
if data[0] == 'Version':
beatmap.metadata.version = data[1].strip()
return
if data[0] == 'Source':
# ignore
return
if data[0] == 'Tags':
# ignore
return
if data[0] == 'BeatmapID':
beatmap.metadata.beatmap_id = data[1].strip()
return
if data[0] == 'BeatmapSetID':
beatmap.metadata.beatmapset_id = data[1].strip()
return
@staticmethod
def __parse_difficulty_section(line, beatmap):
data = line.split(':', 1)
if len(data) < 2: return
data[0] = data[0].strip()
if data[0] == 'HPDrainRate':
beatmap.difficulty.hp = float(data[1])
return
if data[0] == 'CircleSize':
beatmap.difficulty.cs = float(data[1])
return
if data[0] == 'OverallDifficulty':
beatmap.difficulty.od = float(data[1])
return
if data[0] == 'ApproachRate':
beatmap.difficulty.ar = float(data[1])
return
if data[0] == 'SliderMultiplier':
beatmap.difficulty.sm = float(data[1])
return
if data[0] == 'SliderTickRate':
beatmap.difficulty.st = float(data[1])
return
@staticmethod
def __parse_events_section(line, beatmap):
# ignore
return
@staticmethod
def __parse_timingpoints_section(line, beatmap):
data = line.split(',')
if len(data) < 2: return
timing_point = Beatmap.TimingPoint()
timing_point.offset = float(data[0])
timing_point.beat_interval = float(data[1])
# Old maps don't have meteres
if len(data) > 2: timing_point.meter = int(data[2])
else: timing_point.meter = 4
if len(data) > 6: timing_point.inherited = False if int(data[6]) == 1 else True
else: timing_point.inherited = False
beatmap.timing_points.append(timing_point)
@staticmethod
def __parse_colour_section(self, line):
# ignore
return
@staticmethod
def __parse_hitobjects_section(line, beatmap):
data = line.split(',')
if len(data) < 2: return
hitobject_type = int(data[3])
if beatmap.gamemode == Beatmap.GAMEMODE_OSU or beatmap.gamemode == None:
if Std.is_hitobject_type(hitobject_type, Hitobject.CIRCLE):
beatmap.hitobjects.append(StdSingleNoteIO.load_singlenote(data, beatmap.difficulty))
return
if Std.is_hitobject_type(hitobject_type, Hitobject.SLIDER):
beatmap.hitobjects.append(StdHoldNoteIO.load_holdnote(data, beatmap.difficulty))
return
if Std.is_hitobject_type(hitobject_type, Hitobject.SPINNER):
beatmap.hitobjects.append(StdSpinnerIO.load_spinner(data, beatmap.difficulty))
return
if beatmap.gamemode == Beatmap.GAMEMODE_TAIKO:
''' TODO: Fix
if Taiko.is_hitobject_type(hitobject_type, Hitobject.CIRCLE):
beatmap.hitobjects.append(TaikoSingleNoteHitobject(data))
return
if Taiko.is_hitobject_type(hitobject_type, Hitobject.SLIDER):
beatmap.hitobjects.append(TaikoHoldNoteHitobject(data))
return
if Taiko.is_hitobject_type(hitobject_type, Hitobject.SPINNER):
beatmap.hitobjects.append(TaikoSpinnerHitobject(data))
return
'''
return
if beatmap.gamemode == Beatmap.GAMEMODE_CATCH:
''' TODO: Fix
if Catch.is_hitobject_type(hitobject_type, Hitobject.CIRCLE):
beatmap.hitobjects.append(CatchSingleNoteHitobject(data))
return
if Catch.is_hitobject_type(hitobject_type, Hitobject.SLIDER):
beatmap.hitobjects.append(CatchHoldNoteHitobject(data))
return
if Catch.is_hitobject_type(hitobject_type, Hitobject.SPINNER):
beatmap.hitobjects.append(CatchSpinnerHitobject(data))
return
'''
return
if beatmap.gamemode == Beatmap.GAMEMODE_MANIA:
if Mania.is_hitobject_type(hitobject_type, Hitobject.CIRCLE):
beatmap.hitobjects.append(ManiaSingleNoteIO.load_singlenote(data, beatmap.difficulty))
return
if Mania.is_hitobject_type(hitobject_type, Hitobject.MANIALONG):
beatmap.hitobjects.append(ManiaHoldNoteIO.load_holdnote(data, beatmap.difficulty))
return
@staticmethod
def __process_timing_points(beatmap):
beatmap.bpm_min = float('inf')
beatmap.bpm_max = float('-inf')
bpm = 0
slider_multiplier = -100
old_beat = -100
base = 0
for timing_point in beatmap.timing_points:
if timing_point.inherited:
timing_point.beat_length = base
if timing_point.beat_interval < 0:
slider_multiplier = timing_point.beat_interval
old_beat = timing_point.beat_interval
else:
slider_multiplier = old_beat
else:
slider_multiplier = -100
bpm = 60000 / timing_point.beat_interval
timing_point.beat_length = timing_point.beat_interval
base = timing_point.beat_interval
beatmap.bpm_min = min(beatmap.bpm_min, bpm)
beatmap.bpm_max = max(beatmap.bpm_max, bpm)
timing_point.bpm = bpm
timing_point.slider_multiplier = slider_multiplier
@staticmethod
def __process_slider_timings(beatmap):
for hitobject in beatmap.hitobjects:
if not hitobject.is_hitobject_type(Hitobject.SLIDER):
continue
try: idx_timing_point = find(beatmap.timing_points, hitobject.time, lambda timing_point: timing_point.offset)
except:
print(beatmap.timing_points)
raise
timing_point = beatmap.timing_points[idx_timing_point]
hitobject.to_repeat_time = round(((-600.0/timing_point.bpm) * hitobject.pixel_length * timing_point.slider_multiplier) / (100.0 * beatmap.difficulty.sm))
hitobject.end_time = hitobject.time + hitobject.to_repeat_time*hitobject.repeat
@staticmethod
def __process_slider_tick_times(beatmap):
beatmap.slider_tick_times = []
for hitobject in beatmap.hitobjects:
if not hitobject.is_hitobject_type(Hitobject.SLIDER):
continue
ms_per_beat = (100.0 * beatmap.difficulty.sm)/(hitobject.get_velocity() * beatmap.difficulty.st)
hitobject.tick_times = []
for beat_time in np.arange(hitobject.time, hitobject.end_time, ms_per_beat):
hitobject.tick_times.append(beat_time)
if hitobject.tick_times[-1] != hitobject.end_time:
hitobject.tick_times.append(hitobject.end_time)
@staticmethod
def __process_columns(beatmap):
hitobjects = beatmap.hitobjects
beatmap.hitobjects = []
for column in range(int(beatmap.difficulty.cs)):
beatmap.hitobjects.append([])
for hitobject in hitobjects:
column = Mania.get_column(hitobject.pos.x, beatmap.difficulty.cs)
beatmap.hitobjects[column].append(hitobject)
'''
for column in range(len(beatmap.hitobjects)):
beatmap.hitobjects[column] = sorted(beatmap.hitobjects[column], key=lambda hitobject: hitobject.time)
'''
BeatmapIO.init() | [
"osu.local.hitobject.std.std_singlenote_io.StdSingleNoteIO.load_singlenote",
"osu.local.beatmap.beatmap.Beatmap.TimingPoint",
"osu.local.hitobject.std.std_spinner_io.StdSpinnerIO.load_spinner",
"osu.local.hitobject.mania.mania.Mania.get_column",
"osu.local.hitobject.std.std.Std.is_hitobject_type",
"osu.lo... | [((3175, 3184), 'osu.local.beatmap.beatmap.Beatmap', 'Beatmap', ([], {}), '()\n', (3182, 3184), False, 'from osu.local.beatmap.beatmap import Beatmap\n'), ((10638, 10659), 'osu.local.beatmap.beatmap.Beatmap.TimingPoint', 'Beatmap.TimingPoint', ([], {}), '()\n', (10657, 10659), False, 'from osu.local.beatmap.beatmap import Beatmap\n'), ((11494, 11549), 'osu.local.hitobject.std.std.Std.is_hitobject_type', 'Std.is_hitobject_type', (['hitobject_type', 'Hitobject.CIRCLE'], {}), '(hitobject_type, Hitobject.CIRCLE)\n', (11515, 11549), False, 'from osu.local.hitobject.std.std import Std\n'), ((11691, 11746), 'osu.local.hitobject.std.std.Std.is_hitobject_type', 'Std.is_hitobject_type', (['hitobject_type', 'Hitobject.SLIDER'], {}), '(hitobject_type, Hitobject.SLIDER)\n', (11712, 11746), False, 'from osu.local.hitobject.std.std import Std\n'), ((11884, 11940), 'osu.local.hitobject.std.std.Std.is_hitobject_type', 'Std.is_hitobject_type', (['hitobject_type', 'Hitobject.SPINNER'], {}), '(hitobject_type, Hitobject.SPINNER)\n', (11905, 11940), False, 'from osu.local.hitobject.std.std import Std\n'), ((13387, 13444), 'osu.local.hitobject.mania.mania.Mania.is_hitobject_type', 'Mania.is_hitobject_type', (['hitobject_type', 'Hitobject.CIRCLE'], {}), '(hitobject_type, Hitobject.CIRCLE)\n', (13410, 13444), False, 'from osu.local.hitobject.mania.mania import Mania\n'), ((13588, 13648), 'osu.local.hitobject.mania.mania.Mania.is_hitobject_type', 'Mania.is_hitobject_type', (['hitobject_type', 'Hitobject.MANIALONG'], {}), '(hitobject_type, Hitobject.MANIALONG)\n', (13611, 13648), False, 'from osu.local.hitobject.mania.mania import Mania\n'), ((16030, 16088), 'numpy.arange', 'np.arange', (['hitobject.time', 'hitobject.end_time', 'ms_per_beat'], {}), '(hitobject.time, hitobject.end_time, ms_per_beat)\n', (16039, 16088), True, 'import numpy as np\n'), ((16560, 16616), 'osu.local.hitobject.mania.mania.Mania.get_column', 'Mania.get_column', (['hitobject.pos.x', 'beatmap.difficulty.cs'], {}), '(hitobject.pos.x, beatmap.difficulty.cs)\n', (16576, 16616), False, 'from osu.local.hitobject.mania.mania import Mania\n'), ((15111, 15200), 'misc.math_utils.find', 'find', (['beatmap.timing_points', 'hitobject.time', '(lambda timing_point: timing_point.offset)'], {}), '(beatmap.timing_points, hitobject.time, lambda timing_point:\n timing_point.offset)\n', (15115, 15200), False, 'from misc.math_utils import find\n'), ((11593, 11650), 'osu.local.hitobject.std.std_singlenote_io.StdSingleNoteIO.load_singlenote', 'StdSingleNoteIO.load_singlenote', (['data', 'beatmap.difficulty'], {}), '(data, beatmap.difficulty)\n', (11624, 11650), False, 'from osu.local.hitobject.std.std_singlenote_io import StdSingleNoteIO\n'), ((11790, 11843), 'osu.local.hitobject.std.std_holdnote_io.StdHoldNoteIO.load_holdnote', 'StdHoldNoteIO.load_holdnote', (['data', 'beatmap.difficulty'], {}), '(data, beatmap.difficulty)\n', (11817, 11843), False, 'from osu.local.hitobject.std.std_holdnote_io import StdHoldNoteIO\n'), ((11984, 12035), 'osu.local.hitobject.std.std_spinner_io.StdSpinnerIO.load_spinner', 'StdSpinnerIO.load_spinner', (['data', 'beatmap.difficulty'], {}), '(data, beatmap.difficulty)\n', (12009, 12035), False, 'from osu.local.hitobject.std.std_spinner_io import StdSpinnerIO\n'), ((13488, 13547), 'osu.local.hitobject.mania.mania_singlenote_io.ManiaSingleNoteIO.load_singlenote', 'ManiaSingleNoteIO.load_singlenote', (['data', 'beatmap.difficulty'], {}), '(data, beatmap.difficulty)\n', (13521, 13547), False, 'from osu.local.hitobject.mania.mania_singlenote_io import ManiaSingleNoteIO\n'), ((13692, 13747), 'osu.local.hitobject.mania.mania_holdnote_io.ManiaHoldNoteIO.load_holdnote', 'ManiaHoldNoteIO.load_holdnote', (['data', 'beatmap.difficulty'], {}), '(data, beatmap.difficulty)\n', (13721, 13747), False, 'from osu.local.hitobject.mania.mania_holdnote_io import ManiaHoldNoteIO\n')] |
import numpy as np
import scipy.sparse as spsparse
import pytest
import numpy.testing as npt
from poissonlearning.poisson_learning import PoissonSolver
@pytest.fixture(params=[(2, 1)])
def solver(request):
p, eps = request.param
return PoissonSolver(eps=eps, p=p, disp=True, tol=1e-10, maxiter=100)
@pytest.mark.parametrize(
"y, expected",
[
(
np.array([0, 0, 1, 2, 0]),
np.array([[1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]]),
)
],
)
def test_encode_labels(y, solver, expected):
output = solver._encode_labels(y)
npt.assert_allclose(expected, output)
@pytest.mark.parametrize(
"W, encoded_labels, expected",
[
(
spsparse.csr_matrix(
np.array(
[
[0.0, 0.5, 0.0, 0.0],
[0.5, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.5, 0.0],
]
)
),
np.array([[1, 0], [1, 0], [0, 1]]),
np.array([[1 / 3, -1 / 3], [1 / 3, -1 / 3], [-2 / 3, 2 / 3], [0, 0]]),
)
],
)
def test_rhs_dirac_delta(W, encoded_labels, solver, expected):
output = solver._rhs_dirac_delta(W, encoded_labels)
npt.assert_allclose(expected, output)
@pytest.mark.parametrize(
"u0, W, b, p, expected",
[
(
None,
spsparse.csr_matrix(
np.array(
[
[0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],
]
)
),
np.array([1.0, 0.0, 0.0, -1.0]),
2,
np.array([1.5, 0.5, -0.5, -1.5]),
),
(
np.array([0.0, 0.0, 0.0, 0.0]),
spsparse.csr_matrix(
np.array(
[
[0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],
]
)
),
np.array([1.0, 0.0, 0.0, -1.0]),
2,
np.array([1.5, 0.5, -0.5, -1.5]),
),
(
np.array([0.0, 0.0, 0.0, 0.0]),
spsparse.csr_matrix(
np.array(
[
[0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0],
]
)
),
np.array([1.0, 0.0, 0.0, -1.0]),
2,
np.array([1.125, 0.125, -0.20833333, -0.54166667]),
),
],
)
def test_solve_using_minimizer(u0, W, b, p, solver, expected):
if p != solver.p:
pytest.skip("`p` values don't match.")
output = solver._solve_using_minimizer(u0=u0, W=W, b=b)
npt.assert_allclose(expected, output)
@pytest.mark.parametrize(
"W, b, p, expected",
[
(
spsparse.csr_matrix(
np.array(
[
[0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0],
]
)
),
np.array([1.0, 0.0, 0.0, -1.0]),
2,
np.array([1.125, 0.125, -0.20833333, -0.54166667]),
)
],
)
def test_solve_using_iteration(W, b, p, solver, expected):
if p != solver.p:
pytest.skip("`p` values don't match.")
output = solver._solve_using_iteration(W=W, b=b)
npt.assert_allclose(expected, output)
| [
"numpy.testing.assert_allclose",
"numpy.array",
"poissonlearning.poisson_learning.PoissonSolver",
"pytest.fixture",
"pytest.skip"
] | [((157, 188), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[(2, 1)]'}), '(params=[(2, 1)])\n', (171, 188), False, 'import pytest\n'), ((248, 310), 'poissonlearning.poisson_learning.PoissonSolver', 'PoissonSolver', ([], {'eps': 'eps', 'p': 'p', 'disp': '(True)', 'tol': '(1e-10)', 'maxiter': '(100)'}), '(eps=eps, p=p, disp=True, tol=1e-10, maxiter=100)\n', (261, 310), False, 'from poissonlearning.poisson_learning import PoissonSolver\n'), ((598, 635), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'output'], {}), '(expected, output)\n', (617, 635), True, 'import numpy.testing as npt\n'), ((1308, 1345), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'output'], {}), '(expected, output)\n', (1327, 1345), True, 'import numpy.testing as npt\n'), ((3080, 3117), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'output'], {}), '(expected, output)\n', (3099, 3117), True, 'import numpy.testing as npt\n'), ((3836, 3873), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'output'], {}), '(expected, output)\n', (3855, 3873), True, 'import numpy.testing as npt\n'), ((2976, 3014), 'pytest.skip', 'pytest.skip', (['"""`p` values don\'t match."""'], {}), '("`p` values don\'t match.")\n', (2987, 3014), False, 'import pytest\n'), ((3739, 3777), 'pytest.skip', 'pytest.skip', (['"""`p` values don\'t match."""'], {}), '("`p` values don\'t match.")\n', (3750, 3777), False, 'import pytest\n'), ((386, 411), 'numpy.array', 'np.array', (['[0, 0, 1, 2, 0]'], {}), '([0, 0, 1, 2, 0])\n', (394, 411), True, 'import numpy as np\n'), ((425, 490), 'numpy.array', 'np.array', (['[[1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]]'], {}), '([[1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0]])\n', (433, 490), True, 'import numpy as np\n'), ((1047, 1081), 'numpy.array', 'np.array', (['[[1, 0], [1, 0], [0, 1]]'], {}), '([[1, 0], [1, 0], [0, 1]])\n', (1055, 1081), True, 'import numpy as np\n'), ((1095, 1164), 'numpy.array', 'np.array', (['[[1 / 3, -1 / 3], [1 / 3, -1 / 3], [-2 / 3, 2 / 3], [0, 0]]'], {}), '([[1 / 3, -1 / 3], [1 / 3, -1 / 3], [-2 / 3, 2 / 3], [0, 0]])\n', (1103, 1164), True, 'import numpy as np\n'), ((1769, 1800), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, -1.0]'], {}), '([1.0, 0.0, 0.0, -1.0])\n', (1777, 1800), True, 'import numpy as np\n'), ((1829, 1861), 'numpy.array', 'np.array', (['[1.5, 0.5, -0.5, -1.5]'], {}), '([1.5, 0.5, -0.5, -1.5])\n', (1837, 1861), True, 'import numpy as np\n'), ((1896, 1926), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (1904, 1926), True, 'import numpy as np\n'), ((2260, 2291), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, -1.0]'], {}), '([1.0, 0.0, 0.0, -1.0])\n', (2268, 2291), True, 'import numpy as np\n'), ((2320, 2352), 'numpy.array', 'np.array', (['[1.5, 0.5, -0.5, -1.5]'], {}), '([1.5, 0.5, -0.5, -1.5])\n', (2328, 2352), True, 'import numpy as np\n'), ((2387, 2417), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0])\n', (2395, 2417), True, 'import numpy as np\n'), ((2751, 2782), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, -1.0]'], {}), '([1.0, 0.0, 0.0, -1.0])\n', (2759, 2782), True, 'import numpy as np\n'), ((2811, 2861), 'numpy.array', 'np.array', (['[1.125, 0.125, -0.20833333, -0.54166667]'], {}), '([1.125, 0.125, -0.20833333, -0.54166667])\n', (2819, 2861), True, 'import numpy as np\n'), ((3519, 3550), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, -1.0]'], {}), '([1.0, 0.0, 0.0, -1.0])\n', (3527, 3550), True, 'import numpy as np\n'), ((3579, 3629), 'numpy.array', 'np.array', (['[1.125, 0.125, -0.20833333, -0.54166667]'], {}), '([1.125, 0.125, -0.20833333, -0.54166667])\n', (3587, 3629), True, 'import numpy as np\n'), ((764, 866), 'numpy.array', 'np.array', (['[[0.0, 0.5, 0.0, 0.0], [0.5, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0], [0.0, \n 0.0, 0.5, 0.0]]'], {}), '([[0.0, 0.5, 0.0, 0.0], [0.5, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0],\n [0.0, 0.0, 0.5, 0.0]])\n', (772, 866), True, 'import numpy as np\n'), ((1486, 1588), 'numpy.array', 'np.array', (['[[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0], [0.0, \n 0.0, 1.0, 0.0]]'], {}), '([[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0],\n [0.0, 0.0, 1.0, 0.0]])\n', (1494, 1588), True, 'import numpy as np\n'), ((1977, 2079), 'numpy.array', 'np.array', (['[[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0], [0.0, \n 0.0, 1.0, 0.0]]'], {}), '([[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0],\n [0.0, 0.0, 1.0, 0.0]])\n', (1985, 2079), True, 'import numpy as np\n'), ((2468, 2570), 'numpy.array', 'np.array', (['[[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0], [0.0, \n 1.0, 1.0, 0.0]]'], {}), '([[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0],\n [0.0, 1.0, 1.0, 0.0]])\n', (2476, 2570), True, 'import numpy as np\n'), ((3236, 3338), 'numpy.array', 'np.array', (['[[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0], [0.0, \n 1.0, 1.0, 0.0]]'], {}), '([[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0],\n [0.0, 1.0, 1.0, 0.0]])\n', (3244, 3338), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from copy import deepcopy
import os
from data.download import DatasetDownloader
import tarfile
import sys
from scipy.interpolate import interp1d
from pyts.visualization import plot_paa
from pyts.transformation import PAA
import pickle
from scipy.spatial.distance import cdist, squareform
from data.DTWThread import DTWThread
import psutil
class Preprocessor:
"""
Class for preprocessing routines on the mobility data set.
"""
# Names of columns in all dataframes. Used to inject columns into empty dataframes.
DATAFRAME_COLUMN_NAMES = {
"cell": ['time', 'cid', 'lac', 'asu'],
"annotation": ['time', 'mode', 'notes'],
"location": ['time', 'gpstime', 'provider', 'longitude', 'latitude', 'altitude', 'speed', 'bearing',
'accuracy'],
"sensor": ['sensor', 'time', 'x', 'y', 'z', 'total'],
"mac": ['time', 'ssid', 'level'],
"marker": ['time', 'marker'],
"event": ['time', 'event', 'state']
}
@staticmethod
def preprocess(tokens, filename: str = None, distance_metric: str = "euclidean", use_individual_columns: bool = False, load_preprocessed: str = None):
"""
Executes all preprocessing steps.
:param tokens: List with keys of tokens to preprocess.
:param filename: Specifies name of file data should be dumped to. Not persisted to disk if specified value is
None. Note that filename is relative; all files are stored in /data/preprocessed.
:param distance_metric: Distance metric to apply for comparison between trip segments.
:param use_individual_columns: Defines whether individual columns (x, y, z) or the total (n2) value should be
used for distance calculation.
:load_preprocessed: str, default=None, specifies a path to a pickled preprocessed_data.dat file.
if this parameter is not None the preprocessing step is skipped and the pickled data will be
loaded.
:return: Dictionary with preprocessed data. Specified tokens are used as keys.
"""
# 1. Preprocess data per token.
if load_preprocessed is not None:
# Load dataframes from disk.
preprocessed_data = Preprocessor.restore_preprocessed_data_from_disk(filename=load_preprocessed)
else:
preprocessed_data = Preprocessor._preprocess_data_per_token(tokens=tokens)
# 2. Cut all trips in 30 second snippets
trips_cut_per_30_sec = Preprocessor.get_cut_trip_snippets_for_targets(
preprocessed_data,
snippet_length=30,
sensor_type="acceleration",
target_column_names=["total", "x", "y", "z"]
)
# 3. Apply distance metric and calculate distance matrix
distance_matrix = None
if distance_metric is not None:
if use_individual_columns:
distance_matrix = Preprocessor.calculate_distance_for_individual_columns(
dataframes=trips_cut_per_30_sec[1:4]
)
else:
distance_matrix = Preprocessor.calculate_distance_for_n2(
trips_cut_per_30_sec[0],
metric=distance_metric
)
# 4. Dump data to file, if requested.
if filename is not None:
Preprocessor.persist_results(
filename=filename,
preprocessed_data=preprocessed_data,
trips_cut_per_30_sec=trips_cut_per_30_sec,
distance_metric=distance_metric,
distance_matrix_n2=distance_matrix,
use_individual_columns=use_individual_columns
)
return preprocessed_data
@staticmethod
def _preprocess_data_per_token(tokens: list):
"""
List of tokens whose data is to be processed.
:param tokens:
:return: Dictionary with preprocessed data per token.
"""
preprocessed_data = {}
for token in tokens:
# 1. Get travel data per token, remove dataframes without annotations.
dfs = Preprocessor.replace_none_values_with_empty_dataframes(
# Drop dataframes w/o annotations.
Preprocessor._remove_dataframes_without_annotation(
# Get travel data per token.
Preprocessor.get_data_per_token(token)
)
)
# 2. Remove trips less than 10 minutes long.
dfs = Preprocessor.replace_none_values_with_empty_dataframes(
Preprocessor._remove_dataframes_by_duration_limit(dfs, 10 * 60)
)
# 3. Cut first and last 30 seconds from scripted trips.
dfs = Preprocessor.replace_none_values_with_empty_dataframes(
Preprocessor._cut_off_start_and_end_in_dataframes(
dataframes=dfs, list_of_dataframe_names_to_cut=["sensor", "location"], cutoff_in_seconds=60
)
)
# 4. Perform PAA.
resampled_sensor_values = Preprocessor.replace_none_values_with_empty_dataframes(
Preprocessor.calculate_paa(dfs)
)
# Prepare dictionary with results.
preprocessed_data[token] = resampled_sensor_values
return preprocessed_data
@staticmethod
def persist_results(filename: str, preprocessed_data: dict, trips_cut_per_30_sec: list,
distance_metric: str, distance_matrix_n2: pd.DataFrame, use_individual_columns=False):
"""
Stores preprocessing results on disk.
:param filename:
:param preprocessed_data:
:param trips_cut_per_30_sec:
:param distance_metric:
:param distance_matrix_n2:
:param use_individual_columns: indicates if individual columns were used
:return:
"""
data_dir = DatasetDownloader.get_data_dir()
preprocessed_path = os.path.join(data_dir, "preprocessed")
# make sure the directory exists
DatasetDownloader.setup_directory(preprocessed_path)
full_path = os.path.join(preprocessed_path, filename)
with open(full_path, "wb") as file:
file.write(pickle.dumps(preprocessed_data))
trips_cut_per_30_sec[0].to_csv(full_path[:-4] + "_total.csv", sep=";", index=False)
trips_cut_per_30_sec[1].to_csv(full_path[:-4] + "_x.csv", sep=";", index=False)
trips_cut_per_30_sec[2].to_csv(full_path[:-4] + "_y.csv", sep=";", index=False)
trips_cut_per_30_sec[3].to_csv(full_path[:-4] + "_z.csv", sep=";", index=False)
if distance_metric is not None:
if use_individual_columns:
distance_matrix_n2_path = full_path[:-4] + "_" + "individual" + "_" + distance_metric + "_xyz" +".csv"
else:
distance_matrix_n2_path = full_path[:-4] + "_" + distance_metric + ".csv"
distance_matrix_n2.to_csv(distance_matrix_n2_path, sep=";", index=False)
@staticmethod
def replace_none_values_with_empty_dataframes(dataframe_dicts: list):
"""
Checks every dictionary in every dictionary in specified list for None values, replaces them with empty data-
frames.
:param dataframe_dicts: List of dictionaries containing one dataframe for each key.
:return: List in same format with Nones replaced by empty dataframes.
"""
# For every key in every dictionary in list: Create new dictionary with Nones replaced by empty dataframes;
# concatenate new dictionaries to list.
return [
{
key: pd.DataFrame(columns=Preprocessor.DATAFRAME_COLUMN_NAMES[key])
if df_dict[key] is None else df_dict[key]
for key in df_dict
} for df_dict in dataframe_dicts
]
@staticmethod
def get_cut_trip_snippets_for_targets(dfs, target_column_names: list, snippet_length=30, sensor_type="acceleration"):
"""
This method gets a dictionary of trips per token and cuts them in the
specified snippet_length. It uses the columns of the specified names
(i. e. one or several of: "total", "x", "y", "z") in the sensor table.
Parameters
----------
dfs: dictionary with the assumed nested structure
dict[token] = list of trips per token and each trip consists of tables for
at least "annotation" and "sensor"
snippet_length: int, default=30,
specifies the length of the time snippets in seconds
sensor_type: string, default="acceleration"
specifies which sensor type should be used for each entry
target_column_names: list
Specifies which columns should represent trip observation.
Returns
-------
result: returns a list of pandas.DataFrames where each row is a snippet with length snippet_length
and each column is one recording step. Each entry corresponds
to the total aka n2 value of the original data. Additional columns are:
"mode","notes","scripted","token","trip_id", where scripted is a binary variable
where scripted=1 and ordinary=0. "trip_id" helps to identify which snippet, belongs
to which trip.
Each element in the list corresponds to one of the specified target columns (in the same sequence).
"""
return [
Preprocessor.get_cut_trip_snippets_for_target(
dfs=dfs,
snippet_length=snippet_length,
sensor_type=sensor_type,
target_column_name=target_column
)
for target_column in target_column_names
]
@staticmethod
def get_cut_trip_snippets_for_target(dfs, snippet_length=30, sensor_type="acceleration", target_column_name: str = "total"):
"""
This method gets a dictionary of trips per token and cuts them in the
specified snippet_length. It uses the one dimensional column of the specified name
(i. e. one of: "total", "x", "y", "z") in the sensor table.
Parameters
----------
dfs: dictionary with the assumed nested structure
dict[token] = list of trips per token and each trip consists of tables for
at least "annotation" and "sensor"
snippet_length: int, default=30,
specifies the length of the time snippets in seconds
sensor_type: string, default="acceleration"
specifies which sensor type should be used for each entry
target_column_name: string, default="total"
Specifies which column should represent trip observation.
Returns
-------
result: returns a pandas.DataFrame where each row is a snippet with length snippet_length
and each column is one recording step. Each entry corresponds
to the total aka n2 value of the original data. Additional columns are:
"mode","notes","scripted","token","trip_id", where scripted is a binary variable
where scripted=1 and ordinary=0. "trip_id" helps to identify which snippet, belongs
to which trip.
"""
HERTZ_RATE = 20
column_names = ["snippet_"+str(i) for i in range(snippet_length * HERTZ_RATE)]
column_names = column_names + ["mode","notes","scripted","token", "trip_id"]
result = pd.DataFrame(columns=column_names)
trip_index = 0
for token_i, trips in sorted(dfs.items()):
for trip_i in trips:
sensor_data, mode, notes, scripted = Preprocessor._get_row_entries_for_trip(trip_i, sensor_type=sensor_type)
splitted_trip = Preprocessor._cut_trip(
sensor_data=sensor_data,
target_column_name=target_column_name,
snippet_length=snippet_length,
column_names=column_names
)
splitted_trip["mode"]=mode
if str(notes).lower() == "nan":
splitted_trip["notes"]="empty"
else:
splitted_trip["notes"]=notes
splitted_trip["scripted"]=scripted
splitted_trip["token"]=token_i
splitted_trip["trip_id"]=trip_index
trip_index += 1
result = pd.concat([result, splitted_trip])
result.reset_index(drop=True, inplace=True)
return result
@staticmethod
def calculate_distance_for_n2(data, metric="euclidean"):
"""
This method calculates the specified distance metric for norms of the x,y,z signal,
also called n2 or total in the assignment.
Parameters
----------
data: pandas.DataFrame of the trip segments and the
["mode","notes","scripted","token", "trip_id"] columns
metric: string, default="euclidean",
specifies which distance metric method should be used. The distance is calculated
with the highly optimized cdist function of scipy.spatial.distance.
This makes it simple to use a wide variety of distance metrics, some
of them listed below.
Mandatory Distance Calculations:
"euclidean" : calculates the euclidean distance
"cityblock" : calculates the manhattan distance
"cosine" : calculates the cosine distance
"dtw" : Calculates distance with dynamic time warping. Utilizes l1 norm.
for a full list of all distances see:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html
Returns
-------
result: returns a pandas.DataFrame where each each point in the distance matrix
is the distance of one trip segment to another one and each row of the
distance matrix corresponds to the trips segment distances to all other
trip segments. Additional columns are: "mode","notes","scripted","token",
where scripted is a binary variable where scripted=1 and ordinary=0
Note that the dimensionality of the result can be (for most cases)
different to the dimensionality of the incoming data pandas.DataFrame.
"""
categorical_colnames=["mode","notes","scripted","token", "trip_id"]
small_df = data.drop(categorical_colnames, axis=1)
#column_names = list(small_df.columns.values)
nr_of_rows = small_df.shape[0]
nr_of_columns = small_df.shape[1]
# The new dataframe has dimensionality of nr_of_rows x nr_of_rows
column_names = ["distance_"+str(i) for i in range(nr_of_rows)]
result = pd.DataFrame(columns=column_names)
distance_matrix = \
cdist(small_df, small_df, metric=metric) if metric != 'dtw' \
else Preprocessor._calculate_distance_with_dtw(small_df, 1)
result = pd.concat([result, pd.DataFrame(distance_matrix, columns=column_names)])
# Reappend the categorical columns
for colname in categorical_colnames:
result[colname] = data[colname]
return result
@staticmethod
def calculate_distance_for_individual_columns(dataframes: list):
"""
This method calculates the specified distance metric for the individual x, y, z columns.
Note that due to the data structure required for calculating distances between the individual columns currently
only the Euclidean norm is supported, since I haven't found a way to concile scipy's cdist-function with the
additional dimension (individual columns) in the dataset.
Parameters
----------
dataframes: List of pandas.DataFrame of the trip segments and the
["mode","notes","scripted","token", "trip_id"] columns with length 3 - has to contain dataframe
for columns "x", "y" and "z".
Returns
-------
result: returns a pandas.DataFrame where each each point in the distance matrix
is the distance of one trip segment to another one and each row of the
distance matrix corresponds to the trips segment distances to all other
trip segments. Additional columns are: "mode","notes","scripted","token",
where scripted is a binary variable where scripted=1 and ordinary=0
Note that the dimensionality of the result can be (for most cases)
different to the dimensionality of the incoming data pandas.DataFrame.
"""
categorical_colnames=["mode","notes","scripted","token", "trip_id"]
# Drop categorical column names for all dataframes.
small_dfs = [data.drop(categorical_colnames, axis=1) for data in dataframes]
# The new dataframe has dimensionality of nr_of_rows x nr_of_rows
nr_of_rows = small_dfs[0].shape[0]
column_names = ["distance_" + str(i) for i in range(nr_of_rows)]
result = pd.DataFrame(columns=column_names)
# Calculating distance matrix manually, since cdist(...) doesn't take 3D-arrays and I don't know how to solve
# this more elegantly.
distance_matrix = np.zeros([nr_of_rows, nr_of_rows])
for i in range(0, nr_of_rows):
for j in range(i + 1, nr_of_rows):
distance_matrix[i, j] = np.sqrt(
(
(small_dfs[0].iloc[i] - small_dfs[0].iloc[j]) ** 2 +
(small_dfs[1].iloc[i] - small_dfs[1].iloc[j]) ** 2 +
(small_dfs[2].iloc[i] - small_dfs[2].iloc[j]) ** 2
).sum()
)
distance_matrix[j, i] = distance_matrix[i, j]
result = pd.concat([result, pd.DataFrame(distance_matrix, columns=column_names)])
# Reappend the categorical columns
for colname in categorical_colnames:
result[colname] = dataframes[0][colname]
return result
@staticmethod
def _calculate_distance_with_dtw(data, norm: int = 1):
"""
Calculates metric for specified dataframe using dynamic time warping utilizing norm.
:param data:
:param norm: Defines which L-norm is to be used.
:return result: A 2D-nd-array containing distance from each segment to each other segment (same as with scipy's
cdist() - zeros(shape, dtype=float, order='C'))
"""
# Initialize empty distance matrix.
dist_matrix = np.zeros((data.shape[0], data.shape[0]), dtype=float)
# Note regarding multithreading: Splitting up by rows leads to imbalance amongst thread workloads.
# Instead, we split up all possible pairings to ensure even workloads and collect the results (and assemble
# the distance matrix) after the threads finished their calculations.
# Generate all pairings.
segment_pairings = [(i, j) for i in range(0, data.shape[0]) for j in range(0, data.shape[0]) if j > i]
# Set up multithreading. Run as many threads as logical cores are available on this machine - 1.
num_threads = psutil.cpu_count(logical=True)
threads = []
for i in range(0, num_threads):
# Calculate distance with fastDTW between each pairing of segments. Distances between elements to themselves
# are ignored and hence retain their intial value of 0.
thread = DTWThread(thread_id=i,
num_threads=num_threads,
segment_pairings=segment_pairings,
distance_matrix=dist_matrix,
data_to_process=data,
norm=norm)
threads.append(thread)
thread.start()
# Wait for threads to finish.
for thread in threads:
thread.join()
return dist_matrix
@staticmethod
def _cut_trip(sensor_data, target_column_name: str, snippet_length=30, column_names=None):
"""
Helper function to cut one trip into segments of snippet_length
and return the new pandas.DataFrame that includes the "total"
of each value.
:param target_column_name: Name of column to use as observation in trip (i. e. one of: "total", "x", "y", "z").
"""
HERTZ_RATE = 20
nr_of_trip_columns = HERTZ_RATE * snippet_length
categorical_colnames = ["mode","notes","scripted","token", "trip_id"]
if column_names is None:
column_names = ["snippet_"+str(i) for i in range(nr_of_trip_columns)]
column_names = column_names + categorical_colnames
result = pd.DataFrame(columns=column_names).drop(categorical_colnames, axis=1)
copied_sensor_data = sensor_data.reset_index(drop=True)
copied_sensor_data = copied_sensor_data
end_index = copied_sensor_data.index[-1]
# // floor division syntax
# the last segment wich is smaller than 30 seconds will be dropped
nr_of_rows = end_index // nr_of_trip_columns
start_index = 0
for row_index in range(nr_of_rows):
to_index = start_index + nr_of_trip_columns
row_i = copied_sensor_data.loc[start_index:to_index-1, target_column_name]
result.loc[row_index,:] = list(row_i)
start_index = to_index
return result
@staticmethod
def _get_row_entries_for_trip(trip, sensor_type="acceleration"):
"""
Helper function which splits on trip into the four parts
sensor_data, mode, notes and scripted, where scripted is
a binary variable where scripted=1 and ordinary=0
"""
sensor_data, mode, notes, scripted = None, None, None, None
for table_name, table_content in trip.items():
if table_name == "sensor":
sensor_data = table_content[table_content["sensor"] == sensor_type]
if table_name == "annotation":
annotation_data = table_content
mode = annotation_data["mode"][0]
notes = annotation_data["notes"][0]
if "scripted" in str(notes).lower():
scripted = 1
else:
scripted = 0
return sensor_data, mode, notes, scripted
@staticmethod
def unpack_all_trips(dfs: dict, keep_tokens=False):
"""
Helper method that takes a dictionary of the trips per token and returns a list
of all trips. Assumed nested structure is:
dict[token] = list of trips per token
:param keep_tokens: bool, default=False,
if True, the token is appended to the annotation dataframe.
This makes it easier to identify the trips later.
"""
result = []
dfs_copy = deepcopy(dfs)
for token, trips in sorted(dfs_copy.items()):
if keep_tokens:
for trip_i in trips:
if trip_i["annotation"] is not None:
trip_i["annotation"]["token"]=token
result += trips
return result
@staticmethod
def restore_preprocessed_data_from_disk(filename: str):
"""
Loads pickled object from disk.
:param filename: File name/relative path in /data/preprocessed.
:return: Dictionary holding data for tokens (same format as returned by Preprocessor.preprocess().
"""
data_dir = DatasetDownloader.get_data_dir()
full_path = os.path.join(data_dir, "preprocessed", filename)
with open(full_path, "rb") as file:
preprocessed_data = file.read()
# https://media.giphy.com/media/9zXWAIcr6jycE/giphy.gif
return pickle.loads(preprocessed_data)
@staticmethod
def _filter_nan_values(dataframes: list, properties_to_check: list, allowed_nan_ratio: float = 0.2):
"""
Filter NAN values from dataframes sensor data. Note that dataframe is dismissed if at least (!) one of the
specified columns exceeds the allowed ratio of NANs.
:param dataframes:
:param properties_to_check: Properties to check for NAN values (e.g.: "sensor", "location").
:param allowed_nan_ratio: Dataframe is removed if ratio (0 to 1) of NAN values relative to total count
exceeds defined threshold.
:return:
"""
filtered_dataframes = []
for i, df in enumerate(dataframes):
# Check if threshold was reached for one of the specified columns.
threshold_reached = True if np.count_nonzero(
[
df[prop].isnull().sum().sum() / float(len(df[prop])) > allowed_nan_ratio
for prop in properties_to_check
]
) > 0 else False
# Dismiss dataframe if share of NAN values is above defined_threshold.
if not threshold_reached:
# Drop rows with NANs.
for key in properties_to_check:
df[key].dropna(axis=0, how='any', inplace=True)
# Append to list.
filtered_dataframes.append(df)
return filtered_dataframes
@staticmethod
def _recalculate_accerelometer_2norm(resampled_dataframes):
"""
Recalculates 2-norm for x-/y-/z-values in accerelometer data.
Note that the original column 'total' is overwritten with the new value.
:param resampled_dataframes:
:return: List of dataframes with updated values for column 'total'.
"""
for i, df in enumerate(resampled_dataframes):
# Chain x-/y-/z-valuees for all entries in current dataframe and apply 2-norm on resulting (2, n)-shaped
# vector.
resampled_dataframes[i]["total"] = np.linalg.norm(
np.array([df["x"], df["y"], df["z"]]),
ord=2, axis=0
)
return resampled_dataframes
@staticmethod
def _cut_off_start_and_end_in_dataframes(dataframes, list_of_dataframe_names_to_cut, cutoff_in_seconds=30):
"""
Auxiliary method with boilerplate code for cutting off start and end of timeseries in specified list of
dataframes.
:param dataframes:
:param list_of_dataframe_names_to_cut:
:param cutoff_in_seconds:
:return: List of cleaned/cut dataframes.
"""
trips = {"scripted": {"TRAM": 0, "METRO": 0, "WALK": 0}, "unscripted": {"TRAM": 0, "METRO": 0, "WALK": 0}}
for i, df in enumerate(dataframes):
# Assuming "notes" only has one entry per trip and scripted trips' notes contain the word "scripted",
# while ordinary trips' notes don't.
if "scripted" in str(df["annotation"]["notes"][0]).lower():
trips["scripted"][df["annotation"]["mode"][0]] += 1
for dataframe_name in list_of_dataframe_names_to_cut:
# Cut off time series data.
dataframes[i][dataframe_name] = Preprocessor._cut_off_start_and_end_in_dataframe(
dataframe=df[dataframe_name], cutoff_in_seconds=cutoff_in_seconds
)
else:
trips["unscripted"][df["annotation"]["mode"][0]] += 1
return dataframes
@staticmethod
def _cut_off_start_and_end_in_dataframe(dataframe, cutoff_in_seconds=30):
"""
Removes entries with first and last cutoff_in_seconds in series.
Assumes time in dataframe is specified in milliseconds.
:param dataframe: Dataframe containing time series data. Expects specified dataframe to have a column "time".
:param cutoff_in_seconds:
:return: Cleaned dataframe.
"""
# Only cut if enough values exist. If not (e. g. "location" data not available) return None.
if not dataframe.empty:
# Calculate time thresholds.
lower_time_threshold = dataframe.head(1)["time"].values[0]
upper_time_threshold = dataframe.tail(1)["time"].values[0]
# Assuming time is specified as UTC timestamp in milliseconds, so let's convert the cutoff to milliseconds.
cutoff_in_seconds *= 1000
# Drop all rows with a time value less than 30 seconds after the initial entry and less than 30 seconds
# before the last entry.
dataframe = dataframe[
(dataframe["time"] >= lower_time_threshold + cutoff_in_seconds) &
(dataframe["time"] <= upper_time_threshold - cutoff_in_seconds)
]
return dataframe
else:
return None
@staticmethod
def _resample_trip_time_series(dataframes):
"""
Resamples trips' time series to hardcoded value (? per second).
Returns list of dataframes with resampled time series.
:param dataframes: List of dataframes with trip information.
:return: List of resampled time series.
"""
return [
Preprocessor.downsample_time_series_per_category(df["sensor"], categorical_colnames=["sensor"])
for df in dataframes
]
@staticmethod
def _remove_dataframes_without_annotation(dataframes):
"""
Removes dataframes w/o annotation data (since we don't know the transport mode and hence can't use it for
training.
:param dataframes: ist of dataframes with trip data.
:return:
"""
filtered_dataframes = []
for df in dataframes:
if ("annotation" in df.keys()) and (not df["annotation"].empty):
filtered_dataframes.append(df)
return filtered_dataframes
@staticmethod
def _remove_dataframes_by_duration_limit(dataframes, min_duration=0, max_duration=sys.maxsize):
"""
Removes dataframes outside the defined time thresholds.
:param dataframes: ist of dataframes with trip data.
:param min_duration: Minimum duration in seconds.
:param max_duration: Maximum duration in seconds.
:return:
"""
# Fetch summaries for all trips.
trip_summaries = Preprocessor.get_trip_summaries(dataframes, convert_time=True)
filtered_dataframes = []
for i, df in enumerate(dataframes):
trip_length = trip_summaries.iloc[i]["trip_length"].total_seconds()
if trip_length >= min_duration and trip_length >= min_duration:
filtered_dataframes.append(df)
return filtered_dataframes
@staticmethod
def _convert_timestamps_from_dataframe(df, unit="ms", time_col_names=["time", "gpstime"]):
"""
This method converts integer timestamp columns in a pandas.DataFrame object
to datetime objects.
DataFrames in mobility data with datetime columns:
cell, event, location, marker, sensor
Parameters
----------
data: input data pandas DataFrame.
unit: string, default="ms"
time unit for transformation of the input integer timestamps.
Possible values: D,s,ms,us,ns
see "unit" at: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.to_datetime.html
for further information.
Returns
-------
result: returns a deepcopy of the data with transformed time columns.
"""
result = pd.DataFrame()
if df is not None:
df_column_names = list(df.columns.values)
if any(name_i in time_col_names for name_i in df_column_names):
result = deepcopy(df)
for time_column_name in time_col_names:
if time_column_name in df_column_names:
index_copy = result.index
result.set_index(time_column_name,inplace=True)
result.index = pd.to_datetime(result.index, unit=unit)
result.reset_index(inplace=True)
result.index = index_copy
return result
@staticmethod
def _convert_timestamps_from_dictionary_of_dataframes(d, unit="ms", time_col_names=["time","gpstime"]):
""" Convenience function to loop over dicionary of one track recording.
"""
result = dict()
for df_name, df in d.items():
result[df_name] = Preprocessor._convert_timestamps_from_dataframe(df,unit=unit, time_col_names=time_col_names)
return result
@staticmethod
def _convert_timestamps_from_list_of_total_trips(all_trips, unit="ms", time_col_names=["time","gpstime"]):
""" Convenience function to loop over list af all track recordings.
"""
result = []
for i, trip_i in enumerate(all_trips):
result.append(Preprocessor._convert_timestamps_from_dictionary_of_dataframes(trip_i, unit=unit, time_col_names=time_col_names))
return result
@staticmethod
def convert_timestamps(data, unit="ms", time_col_names=["time","gpstime"]):
"""
This function converts the integer timestamps in the specified columns to
datetime objects in the format YYYY-MM-DD HH-MM-SS-uu-.., where uu stands for
the specified unit.
It is assumed that the time colums are integer as it is the case for the mobility data.
Accepted input types are pandas.DataFrame, dict, list which follow the convention
of the projects nesting structure. Special case if data is of type pandas.DataFrame
then the behaviour of this function equals _convert_timestamps_from_dataframe:
Parameters
----------
data: input data, can be a list of all tracks, a dict of one track or a
pandas DataFrame of one table.
unit: string, default="ms"
time unit for transformation of the input integer timestamps.
Possible values: D,s,ms,us,ns
see "unit" at: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.to_datetime.html
for further information.
time_col_names: list of strings, default=["time","gpstime"]
names of the time colums in the table which should be transformed.
Returns
-------
result: returns a deepcopy of the data with transformed time columns.
The datatype of data will be the same as of the input type. Accepted input types are
pandas.DataFrame, dict, list.
"""
result = pd.DataFrame()
if type(data) is pd.DataFrame:
result = Preprocessor._convert_timestamps_from_dataframe(data, unit, time_col_names)
elif type(data) is dict:
result = Preprocessor._convert_timestamps_from_dictionary_of_dataframes(data, unit, time_col_names)
elif type(data) is list:
result = Preprocessor._convert_timestamps_from_list_of_total_trips(data, unit, time_col_names)
return result
@staticmethod
def downsample_time_series(series, time_interval="S", time_col_name="time"):
"""
Downsamples a pandas time series DataFrame from milliseconds to a new
user specified time interval. The aggregation for the new time bins will be
calculated via the mean. To make sure that the right time column is
used you have to set the time columns name in time_col_name or set it as
index before calling this function.
Otherwise it is assumed that the time column has the name time_col_name="time".
For further information about examples for pandas resampling function see:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling
https://machinelearningmastery.com/resample-interpolate-time-series-data-python/
Parameters
----------
series: a pandas DataFrame object with a DatetimeIndex, if there is no
DatetimeIndex set, it is assumed that there is a Datetime column with
name time_col_name="time"
time_interval: string, default="S",
specifies the new time interval to which the series will be downsampled.
Valid values are "S" for seconds, "T" for minutes etc. It is also possible
to sample in a special interval e.g. 5 seconds, by passing "5S".
For all possible frequencies see:
https://stackoverflow.com/questions/17001389/pandas-resample-documentation#17001474
time_col_name: string, default="time"
The name of the time column name.
Returns
-------
data: returns the data with downsampled time columns, where each new bin
is aggregated via the mean.
"""
if isinstance(series.index, pd.DatetimeIndex):
resampled = series.resample(time_interval).mean()
elif time_col_name in list(series.columns.values):
# In case the column has not been converted to Datetime object
# it will be converted here.
if series[time_col_name].dtype in [np.dtype("Int64")]:
series = deepcopy(series)
series = Preprocessor.convert_timestamps(series, time_col_names=[time_col_name])
resampled = series.set_index(time_col_name).resample(time_interval).mean()
resampled = resampled.reset_index()
else:
resampled = series
return resampled
@staticmethod
def downsample_time_series_per_category(series, categorical_colnames, time_interval="S", time_col_name="time"):
"""
Downsamples a pandas time series DataFrame from milliseconds to a new
user specified time interval and takes care of the right interpolation of categorical variables.
The aggregation for the new time bins will be calculated via the mean.
To make sure that the right time column is used you have to set the time
columns name in time_col_name.
Otherwise it is assumed that the time column has the name time_col_name="time".
For further information about examples for pandas resampling function see:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html
http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling
https://machinelearningmastery.com/resample-interpolate-time-series-data-python/
Parameters
----------
series: a pandas DataFrame object with a DatetimeIndex, if there is no
DatetimeIndex set, it is assumed that there is a Datetime column with
name time_col_name="time"
categorical_colnames: a list of strings of colum names
e.g. ["sensor"]
time_interval: string, default="S",
specifies the new time interval to which the series will be downsampled.
Valid values are "S" for seconds, "T" for minutes etc. It is also possible
to sample in a special interval e.g. 5 seconds, by passing "5S".
For all possible frequencies see:
https://stackoverflow.com/questions/17001389/pandas-resample-documentation#17001474
time_col_name: string, default="time"
The name of the time column name. set to "index" if you want to transform
the index column
Returns
-------
data: returns the data with downsampled time columns, where each new bin
is aggregated via the mean and keeps the categorical values.
"""
copied_series = deepcopy(series)
series_column_names = list(copied_series.columns.values)
result = pd.DataFrame(columns = series_column_names)
# In case the column or index has not been converted to Datetime object
# it will be converted here.
if (time_col_name=="index") and (copied_series.index.dtype in [np.dtype("Int64")]):
copied_series.index = pd.to_datetime(copied_series.index, unit="ms")
if time_col_name in series_column_names:
if copied_series[time_col_name].dtype in [np.dtype("Int64")]:
copied_series = Preprocessor._convert_timestamps_from_dataframe(copied_series, time_col_names=[time_col_name])
# Start actual downsampling
if isinstance(copied_series.index, pd.DatetimeIndex) or (time_col_name in series_column_names):
for categorical_colname_i in categorical_colnames:
categories = list(copied_series[categorical_colname_i].unique())
for category_i in categories:
series_for_category = copied_series[copied_series[categorical_colname_i]==category_i]
resampled = Preprocessor.downsample_time_series(series_for_category, time_interval, time_col_name)
resampled[categorical_colname_i] = category_i
result = pd.concat([result, resampled])
if isinstance(result.index, pd.DatetimeIndex):
result = result.sort_index()
else:
result = result.set_index(time_col_name).sort_index()
# need to reset index otherwise indices could be not unique anymore
result = result.reset_index()
else:
result = copied_series
return result
@staticmethod
def get_trip_summaries(all_trips, convert_time=False):
"""
This method returns a summary of all recorded trips. The summary includes start,
stop time, trip_length, recording mode and notes.
Parameters
----------
all_trips : a list of all trips
convert_time : bool, default=False
indicates whether or not the time values should be converted to datetime
objects.
Returns
-------
result : pandas DataFrame
a pandas dataframe with the summaries for each trip
"""
nr_of_recorded_trips = len(all_trips)
result = pd.DataFrame()
if convert_time:
all_trips_copy = Preprocessor.convert_timestamps(all_trips)
else:
all_trips_copy = all_trips
start_times = []
end_times = []
for index in range(0, nr_of_recorded_trips):
trip_i = all_trips_copy[index]
if ("annotation" in trip_i.keys()) and (not trip_i["annotation"].empty):
result = pd.concat([result, trip_i["annotation"]])
start_times.append(trip_i["marker"].iloc[0,0])
end_times.append(trip_i["marker"].iloc[-1,0])
result["Start"] = start_times
result["Stop"] = end_times
result["trip_length"] = [end-start for end,start in zip(end_times,start_times)]
result = result.reset_index(drop=True)
return result
@staticmethod
def extract_csv_file_name(csv_name):
"""
Extracts the name from the csv file name e.g. annotation, cell, event, location,
mac, marker, sensor.
Parameters
----------
csv_name: full name of the csv file in tar.gz directory
Returns
-------
extracted_name: string,
"""
csv_name = str(csv_name)
extracted_name = ""
for name in DatasetDownloader.VALID_NAMES:
if name in csv_name:
extracted_name = name
return extracted_name
return extracted_name
@staticmethod
def read_tar_file_from_dir(file_path):
"""
This method reads a tar.gz file from a specified file path and appends each
.csv file to a dictionary where the key is specified as one of the VALID_NAMES:
["annotation", "cell", "event", "location", "mac", "marker", "sensor"], which
are the names given to identify the different collected mobility data.
"""
tar = tarfile.open(file_path, "r:gz")
csv_files_per_name = {}
for member in tar.getmembers():
f = tar.extractfile(member)
if f is not None:
name = Preprocessor.extract_csv_file_name(member)
csv_files_per_name[name] = pd.read_csv(f, header=0, sep=',', quotechar='"')
tar.close()
return csv_files_per_name
@staticmethod
def get_data_per_trip(dir_name="raw"):
"""
This method reads all downloaded data and returns a list of dictionaries
which include the pandas dataframes for each trip. Each trip DataFrame
can be accessed via its name e.g. annotation, cell, event, location,
mac, marker, sensor.
Parameters
-------
dir_name : string, default="raw",
specifies the name of the directory inside the data directory from which
the data should be read.
Returns
-------
data_frames : a list of pandas DataFrame's in a dictionary
"""
file_path = os.path.join(DatasetDownloader.get_data_dir(), dir_name)
tar_file_names = DatasetDownloader.get_file_names(file_path)
dfs = []
for tar_name in tar_file_names:
path_to_tar_file = os.path.join(file_path, tar_name)
csv_files_per_name = Preprocessor.read_tar_file_from_dir(path_to_tar_file)
dfs.append(csv_files_per_name)
return dfs
@staticmethod
def get_data_per_token(token):
"""
This method reads the downloaded data for one user and returns a list of dictionaries
which include the pandas dataframes for each trip. Each trip DataFrame
can be accessed via its name e.g. annotation, cell, event, location,
mac, marker, sensor.
Returns
-------
data_frames : a list of pandas DataFrame's in a dictionary
"""
file_path = os.path.join(DatasetDownloader.get_data_dir(), "raw")
tar_file_names = DatasetDownloader.get_file_names_for(file_path, token)
dfs = []
for tar_name in tar_file_names:
path_to_tar_file = os.path.join(file_path, tar_name)
csv_files_per_name = Preprocessor.read_tar_file_from_dir(path_to_tar_file)
dfs.append(csv_files_per_name)
return dfs
@staticmethod
def _get_shallow_copy(dfs: list):
""" Helper function to get a shallow copy of the list of dictionaries
as only sensor data is modified and the rest can be references.
"""
nr_of_trips = len(dfs)
result = [{} for trip in range(nr_of_trips)]
for trip_index, trip_i in enumerate(dfs):
for key, values in trip_i.items():
if key == "sensor":
result[trip_index][key] = None
else:
result[trip_index][key] = values
return result
@staticmethod
def calculate_paa(dfs, verbose=False):
newDict = Preprocessor._get_shallow_copy(dfs)
nr_of_trips = len(dfs)
for i in range(0, nr_of_trips):
if verbose:
print('Frame ', i)
#get single trip
sensor_trip = dfs[i]['sensor']
#get all sensors
sensor_set = set(sensor_trip['sensor'])
#create new data frame
helper = pd.DataFrame()
for sensor in sensor_set:
if verbose:
print("sensor: ", sensor)
sensor_data = sensor_trip[sensor_trip['sensor'] == sensor]
if verbose:
print('init time frame')
print(Preprocessor.convert_timestamps(sensor_data.head(1)))
print(Preprocessor.convert_timestamps(sensor_data.tail(1)))
sensor_data = sensor_data.drop(['sensor', 'total'], axis=1)
sensor_data.reset_index(drop=True,inplace=True)
sensor_data_approximated = Preprocessor.approx_sensor(sensor_data, 100)
start_index = 0
stop_index = 1
end_of_df = len(sensor_data_approximated)
buffer_helper = pd.DataFrame()
filler = pd.DataFrame()
if verbose:
print("end_of_df:", end_of_df)
while stop_index <= end_of_df:
if start_index + 30000 <= end_of_df:
stop_index = stop_index + 30000
else:
stop_index = end_of_df+1
buffer_helper = Preprocessor.normalize_trip(sensor_data_approximated.iloc[start_index:stop_index,:])
filler = filler.append(buffer_helper)
start_index = stop_index
filler['sensor'] = sensor
filler['total'] = np.linalg.norm(np.array([filler['x'], filler['y'], filler['z']]),ord=2, axis=0)
helper = pd.concat([helper,filler])
if verbose:
print("complete frame")
print(Preprocessor.convert_timestamps(helper.head(1))['time'])
print(Preprocessor.convert_timestamps(helper.tail(1))['time'])
print('----------------------------')
newDict[i]['sensor'] = helper
return Preprocessor.convert_timestamps(newDict)
@staticmethod
def approx_sensor(acc, hz=None, atd_ms=None):
"""
This method interpolates the observations at equidistant time stamps.
e.g. specifying hz=20 will result in a data frame containing 20 observaitons per second.
Returns
-------
df : a pandas DataFrame containing the interpolated series
"""
# interpolate to a common sampling rate
#
# acc ... data.table(time,x,y,z)
# atd_ms ... approximated time difference in milliseconds, default value = 10
if(hz is None and atd_ms is None):
atd_ms = 10
elif (hz is not None and atd_ms is None):
atd_ms = 1000/hz
elif (hz is not None and atd_ms is not None):
print("hz is overruled with atd_ms")
new_time = np.arange(acc['time'][0], acc['time'][len(acc['time'])-1], atd_ms)
f_ax = interp1d(acc['time'],acc['x'])
ax = list(f_ax(new_time))
f_ay = interp1d(acc['time'],acc['y'])
ay = list(f_ay(new_time))
f_az = interp1d(acc['time'],acc['z'])
az = list(f_az(new_time))
df = pd.DataFrame({
'time':new_time,
'x':ax,
'y':ay,
'z':az,
'total': np.linalg.norm(
np.array([ax, ay, az]),
ord=2, axis=0
)
})
return df
@staticmethod
def normalize_trip(trip):
"""
This method performs a Piecewise Aggregate Approximation of a trip.
trip... a dataframe which should be used
w_size... the bin size.
REQUIREMENT: package 'future'
Returns
-------
df : a pandas DataFrame containing the interpolated series
"""
paa = PAA(window_size=5, output_size=None, overlapping=False)
container = []
for label in trip.columns:
# this creates a new object, change to float32 increases speed
arr = np.array([trip[label]], dtype=np.float64)
transf = paa.transform(arr)
container.append(list(transf[0]))
df = pd.DataFrame(container,trip.columns).T
df['time'] = [int(i) for i in df['time']]
return df
# Note by rmitsch: Commented out since variable 'feature' is not defined. To remove?
# @staticmethod
# def plot_paa(sensor_data, w_size=5, seconds=2):
#
#
# plot_paa(feature, window_size=w_size,output_size=None,overlapping=False,marker='o')
@staticmethod
def print_start_and_end_of_recording_per_sensor(df):
set_of_sensors = set(df['sensor'])
for sensor in set_of_sensors:
print("sensor: ", sensor)
# get all sensor data for specific sensor
sensor_data = deepcopy(df[df["sensor"] == sensor])
sensor_data.reset_index(drop=True,inplace=True)
start = min(sensor_data['time'])
start = pd.to_datetime(start, unit="ms")
end = max(sensor_data['time'])
end = pd.to_datetime(end, unit="ms")
print("start of recodring: " , start)
print("end of recodring: " , end)
@staticmethod
def check_second_intervals(sensor_data, seconds=5):
start = True
count = 0
time_series = deepcopy(sensor_data[sensor_data['sensor'] == 'acceleration'])
time_series.reset_index(drop=True,inplace=True)
time_series = time_series['time']
for timestamp in time_series:
if start:
print("beginning at:", timestamp.minute,":",timestamp.second)
prev = timestamp.second
start=False
next_time = prev + 1 if prev < 59 else 1
if timestamp.second == next_time:
index_pos = time_series[time_series == timestamp].index.tolist()
print("next second at index: ",index_pos[0], " at time ",timestamp.minute,":",timestamp.second)
prev = timestamp.second
count = count + 1
if count > seconds:
break
| [
"tarfile.open",
"pandas.read_csv",
"pickle.dumps",
"scipy.interpolate.interp1d",
"numpy.array",
"data.DTWThread.DTWThread",
"copy.deepcopy",
"pickle.loads",
"pyts.transformation.PAA",
"pandas.to_datetime",
"data.download.DatasetDownloader.get_data_dir",
"pandas.DataFrame",
"numpy.dtype",
"... | [((5960, 5992), 'data.download.DatasetDownloader.get_data_dir', 'DatasetDownloader.get_data_dir', ([], {}), '()\n', (5990, 5992), False, 'from data.download import DatasetDownloader\n'), ((6021, 6059), 'os.path.join', 'os.path.join', (['data_dir', '"""preprocessed"""'], {}), "(data_dir, 'preprocessed')\n", (6033, 6059), False, 'import os\n'), ((6109, 6161), 'data.download.DatasetDownloader.setup_directory', 'DatasetDownloader.setup_directory', (['preprocessed_path'], {}), '(preprocessed_path)\n', (6142, 6161), False, 'from data.download import DatasetDownloader\n'), ((6182, 6223), 'os.path.join', 'os.path.join', (['preprocessed_path', 'filename'], {}), '(preprocessed_path, filename)\n', (6194, 6223), False, 'import os\n'), ((11588, 11622), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_names'}), '(columns=column_names)\n', (11600, 11622), True, 'import pandas as pd\n'), ((14965, 14999), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_names'}), '(columns=column_names)\n', (14977, 14999), True, 'import pandas as pd\n'), ((17271, 17305), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_names'}), '(columns=column_names)\n', (17283, 17305), True, 'import pandas as pd\n'), ((17482, 17516), 'numpy.zeros', 'np.zeros', (['[nr_of_rows, nr_of_rows]'], {}), '([nr_of_rows, nr_of_rows])\n', (17490, 17516), True, 'import numpy as np\n'), ((18783, 18836), 'numpy.zeros', 'np.zeros', (['(data.shape[0], data.shape[0])'], {'dtype': 'float'}), '((data.shape[0], data.shape[0]), dtype=float)\n', (18791, 18836), True, 'import numpy as np\n'), ((19411, 19441), 'psutil.cpu_count', 'psutil.cpu_count', ([], {'logical': '(True)'}), '(logical=True)\n', (19427, 19441), False, 'import psutil\n'), ((23144, 23157), 'copy.deepcopy', 'deepcopy', (['dfs'], {}), '(dfs)\n', (23152, 23157), False, 'from copy import deepcopy\n'), ((23786, 23818), 'data.download.DatasetDownloader.get_data_dir', 'DatasetDownloader.get_data_dir', ([], {}), '()\n', (23816, 23818), False, 'from data.download import DatasetDownloader\n'), ((23839, 23887), 'os.path.join', 'os.path.join', (['data_dir', '"""preprocessed"""', 'filename'], {}), "(data_dir, 'preprocessed', filename)\n", (23851, 23887), False, 'import os\n'), ((24056, 24087), 'pickle.loads', 'pickle.loads', (['preprocessed_data'], {}), '(preprocessed_data)\n', (24068, 24087), False, 'import pickle\n'), ((31759, 31773), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (31771, 31773), True, 'import pandas as pd\n'), ((34855, 34869), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (34867, 34869), True, 'import pandas as pd\n'), ((39972, 39988), 'copy.deepcopy', 'deepcopy', (['series'], {}), '(series)\n', (39980, 39988), False, 'from copy import deepcopy\n'), ((40071, 40112), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'series_column_names'}), '(columns=series_column_names)\n', (40083, 40112), True, 'import pandas as pd\n'), ((42395, 42409), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (42407, 42409), True, 'import pandas as pd\n'), ((44272, 44303), 'tarfile.open', 'tarfile.open', (['file_path', '"""r:gz"""'], {}), "(file_path, 'r:gz')\n", (44284, 44303), False, 'import tarfile\n'), ((45415, 45458), 'data.download.DatasetDownloader.get_file_names', 'DatasetDownloader.get_file_names', (['file_path'], {}), '(file_path)\n', (45447, 45458), False, 'from data.download import DatasetDownloader\n'), ((46287, 46341), 'data.download.DatasetDownloader.get_file_names_for', 'DatasetDownloader.get_file_names_for', (['file_path', 'token'], {}), '(file_path, token)\n', (46323, 46341), False, 'from data.download import DatasetDownloader\n'), ((50588, 50619), 'scipy.interpolate.interp1d', 'interp1d', (["acc['time']", "acc['x']"], {}), "(acc['time'], acc['x'])\n", (50596, 50619), False, 'from scipy.interpolate import interp1d\n'), ((50668, 50699), 'scipy.interpolate.interp1d', 'interp1d', (["acc['time']", "acc['y']"], {}), "(acc['time'], acc['y'])\n", (50676, 50699), False, 'from scipy.interpolate import interp1d\n'), ((50748, 50779), 'scipy.interpolate.interp1d', 'interp1d', (["acc['time']", "acc['z']"], {}), "(acc['time'], acc['z'])\n", (50756, 50779), False, 'from scipy.interpolate import interp1d\n'), ((51466, 51521), 'pyts.transformation.PAA', 'PAA', ([], {'window_size': '(5)', 'output_size': 'None', 'overlapping': '(False)'}), '(window_size=5, output_size=None, overlapping=False)\n', (51469, 51521), False, 'from pyts.transformation import PAA\n'), ((52990, 53052), 'copy.deepcopy', 'deepcopy', (["sensor_data[sensor_data['sensor'] == 'acceleration']"], {}), "(sensor_data[sensor_data['sensor'] == 'acceleration'])\n", (52998, 53052), False, 'from copy import deepcopy\n'), ((15041, 15081), 'scipy.spatial.distance.cdist', 'cdist', (['small_df', 'small_df'], {'metric': 'metric'}), '(small_df, small_df, metric=metric)\n', (15046, 15081), False, 'from scipy.spatial.distance import cdist, squareform\n'), ((19713, 19866), 'data.DTWThread.DTWThread', 'DTWThread', ([], {'thread_id': 'i', 'num_threads': 'num_threads', 'segment_pairings': 'segment_pairings', 'distance_matrix': 'dist_matrix', 'data_to_process': 'data', 'norm': 'norm'}), '(thread_id=i, num_threads=num_threads, segment_pairings=\n segment_pairings, distance_matrix=dist_matrix, data_to_process=data,\n norm=norm)\n', (19722, 19866), False, 'from data.DTWThread import DTWThread\n'), ((40359, 40405), 'pandas.to_datetime', 'pd.to_datetime', (['copied_series.index'], {'unit': '"""ms"""'}), "(copied_series.index, unit='ms')\n", (40373, 40405), True, 'import pandas as pd\n'), ((45346, 45378), 'data.download.DatasetDownloader.get_data_dir', 'DatasetDownloader.get_data_dir', ([], {}), '()\n', (45376, 45378), False, 'from data.download import DatasetDownloader\n'), ((45547, 45580), 'os.path.join', 'os.path.join', (['file_path', 'tar_name'], {}), '(file_path, tar_name)\n', (45559, 45580), False, 'import os\n'), ((46221, 46253), 'data.download.DatasetDownloader.get_data_dir', 'DatasetDownloader.get_data_dir', ([], {}), '()\n', (46251, 46253), False, 'from data.download import DatasetDownloader\n'), ((46430, 46463), 'os.path.join', 'os.path.join', (['file_path', 'tar_name'], {}), '(file_path, tar_name)\n', (46442, 46463), False, 'import os\n'), ((47658, 47672), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (47670, 47672), True, 'import pandas as pd\n'), ((51673, 51714), 'numpy.array', 'np.array', (['[trip[label]]'], {'dtype': 'np.float64'}), '([trip[label]], dtype=np.float64)\n', (51681, 51714), True, 'import numpy as np\n'), ((51815, 51852), 'pandas.DataFrame', 'pd.DataFrame', (['container', 'trip.columns'], {}), '(container, trip.columns)\n', (51827, 51852), True, 'import pandas as pd\n'), ((52467, 52503), 'copy.deepcopy', 'deepcopy', (["df[df['sensor'] == sensor]"], {}), "(df[df['sensor'] == sensor])\n", (52475, 52503), False, 'from copy import deepcopy\n'), ((52631, 52663), 'pandas.to_datetime', 'pd.to_datetime', (['start'], {'unit': '"""ms"""'}), "(start, unit='ms')\n", (52645, 52663), True, 'import pandas as pd\n'), ((52726, 52756), 'pandas.to_datetime', 'pd.to_datetime', (['end'], {'unit': '"""ms"""'}), "(end, unit='ms')\n", (52740, 52756), True, 'import pandas as pd\n'), ((6291, 6322), 'pickle.dumps', 'pickle.dumps', (['preprocessed_data'], {}), '(preprocessed_data)\n', (6303, 6322), False, 'import pickle\n'), ((12550, 12584), 'pandas.concat', 'pd.concat', (['[result, splitted_trip]'], {}), '([result, splitted_trip])\n', (12559, 12584), True, 'import pandas as pd\n'), ((15212, 15263), 'pandas.DataFrame', 'pd.DataFrame', (['distance_matrix'], {'columns': 'column_names'}), '(distance_matrix, columns=column_names)\n', (15224, 15263), True, 'import pandas as pd\n'), ((18048, 18099), 'pandas.DataFrame', 'pd.DataFrame', (['distance_matrix'], {'columns': 'column_names'}), '(distance_matrix, columns=column_names)\n', (18060, 18099), True, 'import pandas as pd\n'), ((20977, 21011), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_names'}), '(columns=column_names)\n', (20989, 21011), True, 'import pandas as pd\n'), ((26166, 26203), 'numpy.array', 'np.array', (["[df['x'], df['y'], df['z']]"], {}), "([df['x'], df['y'], df['z']])\n", (26174, 26203), True, 'import numpy as np\n'), ((31956, 31968), 'copy.deepcopy', 'deepcopy', (['df'], {}), '(df)\n', (31964, 31968), False, 'from copy import deepcopy\n'), ((42814, 42855), 'pandas.concat', 'pd.concat', (["[result, trip_i['annotation']]"], {}), "([result, trip_i['annotation']])\n", (42823, 42855), True, 'import pandas as pd\n'), ((44555, 44603), 'pandas.read_csv', 'pd.read_csv', (['f'], {'header': '(0)', 'sep': '""","""', 'quotechar': '"""\\""""'}), '(f, header=0, sep=\',\', quotechar=\'"\')\n', (44566, 44603), True, 'import pandas as pd\n'), ((48480, 48494), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (48492, 48494), True, 'import pandas as pd\n'), ((48520, 48534), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (48532, 48534), True, 'import pandas as pd\n'), ((49260, 49287), 'pandas.concat', 'pd.concat', (['[helper, filler]'], {}), '([helper, filler])\n', (49269, 49287), True, 'import pandas as pd\n'), ((7711, 7773), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'Preprocessor.DATAFRAME_COLUMN_NAMES[key]'}), '(columns=Preprocessor.DATAFRAME_COLUMN_NAMES[key])\n', (7723, 7773), True, 'import pandas as pd\n'), ((37541, 37557), 'copy.deepcopy', 'deepcopy', (['series'], {}), '(series)\n', (37549, 37557), False, 'from copy import deepcopy\n'), ((40303, 40320), 'numpy.dtype', 'np.dtype', (['"""Int64"""'], {}), "('Int64')\n", (40311, 40320), True, 'import numpy as np\n'), ((40509, 40526), 'numpy.dtype', 'np.dtype', (['"""Int64"""'], {}), "('Int64')\n", (40517, 40526), True, 'import numpy as np\n'), ((41307, 41337), 'pandas.concat', 'pd.concat', (['[result, resampled]'], {}), '([result, resampled])\n', (41316, 41337), True, 'import pandas as pd\n'), ((49170, 49219), 'numpy.array', 'np.array', (["[filler['x'], filler['y'], filler['z']]"], {}), "([filler['x'], filler['y'], filler['z']])\n", (49178, 49219), True, 'import numpy as np\n'), ((50984, 51006), 'numpy.array', 'np.array', (['[ax, ay, az]'], {}), '([ax, ay, az])\n', (50992, 51006), True, 'import numpy as np\n'), ((32246, 32285), 'pandas.to_datetime', 'pd.to_datetime', (['result.index'], {'unit': 'unit'}), '(result.index, unit=unit)\n', (32260, 32285), True, 'import pandas as pd\n'), ((37496, 37513), 'numpy.dtype', 'np.dtype', (['"""Int64"""'], {}), "('Int64')\n", (37504, 37513), True, 'import numpy as np\n')] |
import numpy as np # Sử dụng thư viện Numpy (xử lý ma trận) với cách gọi ngắn là np
class Gauss_Jordan_Algorithms:
np.set_printoptions(suppress=True, linewidth=np.inf, precision=10) # Căn chỉnh ma trận in ra trên màn hình
matrix = np.loadtxt("GJ_input.txt", delimiter=' ') # Đọc ma trận input từ file
index_row = [] # Khởi tạo mảng lưu các hàng của phần tử giải (theo thứ tự)
index_column = [] # Khởi tạo mảng lưu các cột của phần tử giải (theo thứ tự)
result = np.zeros(
(len(matrix[0]) - 1, len(matrix[0]))) # Khởi tạo ma trận lưu kết quả với các giá trị ban đầu bằng 0
def solutions_checker(self):
"""Trong trường hợp nghiệm duy nhất, hàm được sử dụng để kiểm tra lại nghiệm
bằng cách nhân lại ma trận kết quả với hệ số ban đầu"""
A = np.loadtxt("input.txt", delimiter=' ')[:, :-1] # ma trận hệ số trong phương trình AX=B
print()
print("- - - - - Kiểm tra nghiệm - - - - -")
print(np.matmul(A, np.delete(self.result, np.s_[1:], axis=1))) # In ra ma trận A * ma trận X
def find_pivot_element(self):
"""Hàm được dùng để tìm phần tử giải"""
# global index_row, index_column
index_temp = []
pivot_element = 0
for row in range(0, len(self.matrix)):
if row in self.index_row:
continue # Bỏ qua vì hàng này đã có phần tử giải
max_row = np.amax(abs(self.matrix[row, 0:(len(self.matrix[0]) - 1)])) # Tìm phần tử lớn nhất trong hàng row
if (1 in self.matrix[row, 0:(len(self.matrix[0]) - 1)]) or (
-1 in self.matrix[row,
0:(len(self.matrix[0]) - 1)]): # Nếu có 1 hoặc -1 trong hàng row => chọn làm phần tử giải
pivot_element = 1
row_pivot_element = row
index_temp = np.where(abs(self.matrix[row, 0:(len(self.matrix[0]) - 1)]) == pivot_element)
index_temp = index_temp[:1]
index_temp = index_temp[0][0]
break
elif max_row > pivot_element: # Lưu giá trị phần tử giải, tìm vị trí trên ma trận
pivot_element = max_row
row_pivot_element = row
index_temp = np.where(abs(self.matrix[row, 0:(len(self.matrix[0]) - 1)]) == pivot_element)
index_temp = index_temp[:1]
index_temp = index_temp[0][0]
if pivot_element != 0: # Lưu vị trí hàng và cột của phần tử giải
self.index_row.append(row_pivot_element)
self.index_column.append(int(index_temp))
""" In ra giá trị và vị trí phần tử giải"""
# print("Phan tu giai: ", round(matrix[index_row[-1]][index_column[-1]], 10))
# print("Vi tri: ", index_row[-1] + 1, index_column[-1] + 1)
# print()
def Gauss_Jordan_method(self):
"""Phương pháp Gauss - Jordan"""
# global matrix
self.find_pivot_element()
zeros_array = np.zeros((len(self.matrix), len(self.matrix[0]))) # Tạo 1 ma trận không
for row in range(0, len(self.matrix)):
if row == self.index_row[-1]:
continue
m = - self.matrix[row][self.index_column[-1]] / self.matrix[self.index_row[-1]][
self.index_column[-1]] # Tìm m
zeros_array[row] = self.matrix[self.index_row[-1]] * m
self.matrix = self.matrix + zeros_array
def normalize_pivot_element(self):
"""Chuẩn hóa hệ số của phần tử giải (chia để hệ số của phần tử giải =1)"""
for i in range(len(self.index_row)):
self.matrix[self.index_row[i]] = self.matrix[self.index_row[i]] / self.matrix[self.index_row[i]][
self.index_column[i]]
# print(self.matrix)
def rank(self):
"""Tìm hạng của ma trận hệ số A và hạng của ma trận mở rộng"""
rank1 = 0 # Hạng của ma trận hệ số A
rank2 = 0 # Hạng của ma trận mở rộng
for row in range(0, len(self.matrix)):
if np.amax(abs(self.matrix[row, 0:(len(self.matrix[0]) - 1)])) > 0:
rank1 = rank1 + 1
if np.amax(abs(self.matrix[row, 0:len(self.matrix[0])])) > 0:
rank2 = rank2 + 1
if rank1 < rank2:
# print("He PT vo nghiem!")
f=open("GJ_output.txt","w")
f.write("He PT vo nghiem!")
f.close()
elif rank1 < (len(self.matrix[0]) - 1):
# print("He PT co vo so nghiem!")
self.display_solutions()
else:
# print("He PT co nghiem duy nhat!")
self.display_solutions()
# solutions_checker()
def display_solutions(self):
"""Ghi kết quả vào ma trận result, in ma trận result ra màn hình và xuất ra file output.txt"""
# Ghi kết quả vào ma trận result
# global result
for column in range(len(self.matrix[0]) - 1):
if column in self.index_column:
vt = self.index_column.index(column)
self.result[column][0] = self.matrix[self.index_row[vt]][len(self.matrix[0]) - 1]
for i in range(len(self.matrix[0]) - 1):
if i not in self.index_column:
self.result[column][i + 1] = -self.matrix[self.index_row[vt]][i]
else:
self.result[column][column + 1] = 1
# In ma trận result ra màn hình
# print(result)
# Xuất kết quả ra file output.txt
np.savetxt('GJ_output.txt', self.result, fmt='%.5f') # %.5f: lấy 5 chữ số sau dấu phẩy ghi vào file
# Main program
def main(self):
# print(matrix)
# print("- - - - - - - - - - - - - - - - - - - -")
# print()
for i in range(0, min(len(self.matrix), len(self.matrix[0]))):
self.Gauss_Jordan_method()
# print(matrix)
# print("- - - - - - - - - - - - - - - - - - - -")
# print("- - - - - Chuẩn hóa hệ số - - - - -")
self.normalize_pivot_element()
# print("- - - - - Kết luận - - - - -")
self.rank()
try:
RUN = Gauss_Jordan_Algorithms()
RUN.main()
except:
f = open("GJ_output.txt", "w")
f.write("Da co loi xay ra!")
f.close()
| [
"numpy.delete",
"numpy.loadtxt",
"numpy.savetxt",
"numpy.set_printoptions"
] | [((124, 190), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)', 'linewidth': 'np.inf', 'precision': '(10)'}), '(suppress=True, linewidth=np.inf, precision=10)\n', (143, 190), True, 'import numpy as np\n'), ((250, 291), 'numpy.loadtxt', 'np.loadtxt', (['"""GJ_input.txt"""'], {'delimiter': '""" """'}), "('GJ_input.txt', delimiter=' ')\n", (260, 291), True, 'import numpy as np\n'), ((5606, 5658), 'numpy.savetxt', 'np.savetxt', (['"""GJ_output.txt"""', 'self.result'], {'fmt': '"""%.5f"""'}), "('GJ_output.txt', self.result, fmt='%.5f')\n", (5616, 5658), True, 'import numpy as np\n'), ((819, 857), 'numpy.loadtxt', 'np.loadtxt', (['"""input.txt"""'], {'delimiter': '""" """'}), "('input.txt', delimiter=' ')\n", (829, 857), True, 'import numpy as np\n'), ((1006, 1047), 'numpy.delete', 'np.delete', (['self.result', 'np.s_[1:]'], {'axis': '(1)'}), '(self.result, np.s_[1:], axis=1)\n', (1015, 1047), True, 'import numpy as np\n')] |
import scipy
import numpy as np
import torch
"""
FUNCTION FOR THE CANONICAL BETA DISTRIBUTION
"""
def compute_constraint(alpha, beta, x):
"""
Computes the differential of the likelihood w.r.t. alpha (i.e. a). Used for computed saturated parameters.
Input:
- alpha: float
Parameter a (or $\alpha$)
- beta: float
Parameter b (or $\beta$)
- x: float
Data value
"""
return scipy.special.digamma(alpha) - scipy.special.digamma(alpha + beta) - np.log(x)
def compute_alpha(beta, x, eps=10**(-8)):
min_val, max_val = 0, 10**10
alpha = (min_val + max_val) / 2
while np.abs(compute_constraint(alpha, beta, x)) > eps:
if compute_constraint(alpha, beta, x) > 0:
max_val = (min_val + max_val) / 2
else:
min_val = (min_val + max_val) / 2
alpha = (min_val + max_val) / 2
return alpha
def compute_alpha_gene(beta, x, eps=10**(-8)):
"""
wrapper to compute the alpha coefficients for a full gene
"""
print('START ONE', flush=True)
return [compute_alpha(beta, x[i]) for i in range(x.shape[0])]
"""
FUNCTION FOR THE REPARAMETRIZATION
"""
def log_part_grad_zero_reparam(mu, nu, x):
return torch.digamma(mu * nu) - torch.digamma(nu - mu * nu) + torch.log(1-x) - torch.log(x)
def compute_mu(nu, x, eps=10**(-6), maxiter=1000):
min_mu, max_mu = 0, 1
mu = (min_mu + max_mu) / 2
iter_idx = 0
current_value = log_part_grad_zero_reparam(mu, nu, x)
while np.abs(current_value) > eps:
if current_value > 0:
max_mu = (min_mu + max_mu) / 2
else:
min_mu = (min_mu + max_mu) / 2
mu = (min_mu + max_mu) / 2
current_value = log_part_grad_zero_reparam(mu, nu, x)
if iter_idx > maxiter:
print('CONVERGENCE NOT REACHED FOR BETA REPARAM. LAST VALUE: %1.3f'%(current_value))
break
iter_idx += 1
return mu
def compute_mu_gene(nu, X, eps=10**-6, maxiter=1000):
return [
compute_mu(nu, x, eps=eps, maxiter=maxiter) for x in X
]
| [
"numpy.abs",
"scipy.special.digamma",
"torch.log",
"numpy.log",
"torch.digamma"
] | [((497, 506), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (503, 506), True, 'import numpy as np\n'), ((1299, 1311), 'torch.log', 'torch.log', (['x'], {}), '(x)\n', (1308, 1311), False, 'import torch\n'), ((1511, 1532), 'numpy.abs', 'np.abs', (['current_value'], {}), '(current_value)\n', (1517, 1532), True, 'import numpy as np\n'), ((428, 456), 'scipy.special.digamma', 'scipy.special.digamma', (['alpha'], {}), '(alpha)\n', (449, 456), False, 'import scipy\n'), ((459, 494), 'scipy.special.digamma', 'scipy.special.digamma', (['(alpha + beta)'], {}), '(alpha + beta)\n', (480, 494), False, 'import scipy\n'), ((1282, 1298), 'torch.log', 'torch.log', (['(1 - x)'], {}), '(1 - x)\n', (1291, 1298), False, 'import torch\n'), ((1227, 1249), 'torch.digamma', 'torch.digamma', (['(mu * nu)'], {}), '(mu * nu)\n', (1240, 1249), False, 'import torch\n'), ((1252, 1279), 'torch.digamma', 'torch.digamma', (['(nu - mu * nu)'], {}), '(nu - mu * nu)\n', (1265, 1279), False, 'import torch\n')] |
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from builddata_ecir import *
from capsuleNet_SEARCH17 import CapsE
np.random.seed(1234)
tf.set_random_seed(1234)
# Parameters
# ==================================================
parser = ArgumentParser("CapsE", formatter_class=ArgumentDefaultsHelpFormatter, conflict_handler='resolve')
parser.add_argument("--data", default="./data/", help="Data sources.")
parser.add_argument("--run_folder", default="./", help="Data sources.")
parser.add_argument("--name", default="SEARCH17", help="Name of the dataset.")
parser.add_argument("--embedding_dim", default=200, type=int, help="Dimensionality of character embedding (fixed: 200)")
parser.add_argument("--filter_size", default=1, type=int, help="Comma-separated filter sizes (default: '3,4,5')")
parser.add_argument("--num_filters", default=400, type=int, help="Number of filters per filter size (default: 128)")
parser.add_argument("--learning_rate", default=0.00001, type=float, help="Learning rate")
parser.add_argument("--batch_size", default=128, type=int, help="Batch Size")
parser.add_argument("--neg_ratio", default=1.0, help="Number of negative triples generated by positive (default: 1.0)")
parser.add_argument("--useInitialization", default=True, type=bool, help="Using the pretrained embeddings")
parser.add_argument("--num_epochs", default=100, type=int, help="Number of training epochs")
parser.add_argument("--savedEpochs", default=10, type=int, help="")
parser.add_argument("--allow_soft_placement", default=True, type=bool, help="Allow device soft device placement")
parser.add_argument("--log_device_placement", default=False, type=bool, help="Log placement of ops on devices")
parser.add_argument("--model_name", default='search17model', help="")
parser.add_argument("--useConstantInit", action='store_true')
parser.add_argument('--iter_routing', default=1, type=int, help='number of iterations in routing algorithm')
parser.add_argument('--num_outputs_secondCaps', default=1, type=int, help='')
parser.add_argument('--vec_len_secondCaps', default=10, type=int, help='')
args = parser.parse_args()
print(args)
# Load data
# Load data
print("Loading data...")
train_triples, train_rank_triples, train_val_triples, valid_triples, valid_rank_triples, valid_val_triples, \
test_triples, test_rank_triples, test_val_triples, query_indexes, user_indexes, doc_indexes, \
indexes_query, indexes_user, indexes_doc = build_data_ecir()
data_size = len(train_triples)
train_batch = Batch_Loader_ecir(train_triples, train_val_triples, batch_size=args.batch_size)
assert args.embedding_dim % 200 == 0
pretrained_query = init_dataset_ecir(args.data + args.name + '/query2vec.200.init')
pretrained_user = init_dataset_ecir(args.data + args.name + '/user2vec.200.init')
pretrained_doc = init_dataset_ecir(args.data + args.name + '/doc2vec.200.init')
print("Using pre-trained initialization.")
lstEmbedQuery = assignEmbeddings(pretrained_query, query_indexes)
lstEmbedUser = assignEmbeddings(pretrained_user, user_indexes)
lstEmbedDoc = assignEmbeddings(pretrained_doc, doc_indexes)
lstEmbedQuery = np.array(lstEmbedQuery, dtype=np.float32)
lstEmbedUser = np.array(lstEmbedUser, dtype=np.float32)
lstEmbedDoc = np.array(lstEmbedDoc, dtype=np.float32)
print("Loading data... finished!")
# Training
# ==================================================
with tf.Graph().as_default():
session_conf = tf.ConfigProto(allow_soft_placement=args.allow_soft_placement, log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
with sess.as_default():
global_step = tf.Variable(0, name="global_step", trainable=False)
capse = CapsE(sequence_length=3,
batch_size=20 * args.batch_size,
initialization=[lstEmbedQuery, lstEmbedUser, lstEmbedDoc],
embedding_size=200,
filter_size=args.filter_size,
num_filters=args.num_filters,
iter_routing=args.iter_routing,
num_outputs_secondCaps=args.num_outputs_secondCaps,
vec_len_secondCaps=args.vec_len_secondCaps,
useConstantInit=args.useConstantInit
)
# Define Training procedure
#optimizer = tf.contrib.opt.NadamOptimizer(1e-3)
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
#optimizer = tf.train.RMSPropOptimizer(learning_rate=args.learning_rate)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
grads_and_vars = optimizer.compute_gradients(capse.total_loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
out_dir = os.path.abspath(os.path.join(args.run_folder, "runs_CapsE_SEARCH17", args.model_name))
print("Writing to {}\n".format(out_dir))
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# Initialize all variables
sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
capse.input_x: x_batch,
capse.input_y: y_batch
}
_, step, loss = sess.run([train_op, global_step, capse.total_loss], feed_dict)
return loss
# Predict function to predict scores for test data
def predict(x_batch, y_batch):
feed_dict = {
capse.input_x: x_batch,
capse.input_y: y_batch,
capse.dropout_keep_prob: 1.0
}
scores = sess.run([capse.predictions], feed_dict)
return scores
def test_prediction(x_batch, y_batch, lstOriginalRank):
new_x_batch = np.concatenate(x_batch)
new_y_batch = np.concatenate(y_batch, axis=0)
while len(new_x_batch) % (args.batch_size * 20) != 0:
new_x_batch = np.append(new_x_batch, np.array([new_x_batch[-1]]), axis=0)
new_y_batch = np.append(new_y_batch, np.array([new_y_batch[-1]]), axis=0)
results = []
listIndexes = range(0, len(new_x_batch), 20 * args.batch_size)
for tmpIndex in range(len(listIndexes) - 1):
results = np.append(results,
predict(new_x_batch[listIndexes[tmpIndex]:listIndexes[tmpIndex + 1]],
new_y_batch[listIndexes[tmpIndex]:listIndexes[tmpIndex + 1]]))
results = np.append(results,
predict(new_x_batch[listIndexes[-1]:], new_y_batch[listIndexes[-1]:]))
lstresults = []
_start = 0
for tmp in lstOriginalRank:
_end = _start + len(tmp)
lstsorted = np.argsort(results[_start:_end])
lstresults.append(np.where(lstsorted == 0)[0] + 1)
_start = _end
return lstresults
wri = open(checkpoint_prefix + '.cls.' + '.txt', 'w')
lstvalid_mrr = []
lsttest_mrr = []
num_batches_per_epoch = int((data_size - 1) / (args.batch_size)) + 1
for epoch in range(args.num_epochs):
for batch_num in range(num_batches_per_epoch):
x_batch, y_batch = train_batch()
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
valid_results = test_prediction(valid_triples, valid_val_triples, valid_rank_triples)
test_results = test_prediction(test_triples, test_val_triples, test_rank_triples)
valid_mrr = computeMRR(valid_results)
test_mrr = computeMRR(test_results)
test_p1 = computeP1(test_results)
lstvalid_mrr.append(valid_mrr)
lsttest_mrr.append([test_mrr, test_p1])
wri.write("epoch " + str(epoch) + ": " + str(valid_mrr) + " " + str(test_mrr) + " " + str(test_p1) + "\n")
index_valid_max = np.argmax(lstvalid_mrr)
wri.write("\n--------------------------\n")
wri.write("\nBest mrr in valid at epoch " + str(index_valid_max) + ": " + str(lstvalid_mrr[index_valid_max]) + "\n")
wri.write("\nMRR and P1 in test: " + str(lsttest_mrr[index_valid_max][0]) + " " + str(lsttest_mrr[index_valid_max][1]) + "\n")
wri.close()
| [
"numpy.argsort",
"numpy.array",
"capsuleNet_SEARCH17.CapsE",
"tensorflow.set_random_seed",
"tensorflow.Graph",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.where",
"tensorflow.Session",
"numpy.random.seed",
"numpy.concatenate",
"tensorflow.ConfigProto",
"tensorflow.train.AdamOptimizer... | [((240, 260), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (254, 260), True, 'import numpy as np\n'), ((261, 285), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1234)'], {}), '(1234)\n', (279, 285), True, 'import tensorflow as tf\n'), ((362, 464), 'argparse.ArgumentParser', 'ArgumentParser', (['"""CapsE"""'], {'formatter_class': 'ArgumentDefaultsHelpFormatter', 'conflict_handler': '"""resolve"""'}), "('CapsE', formatter_class=ArgumentDefaultsHelpFormatter,\n conflict_handler='resolve')\n", (376, 464), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n'), ((3256, 3297), 'numpy.array', 'np.array', (['lstEmbedQuery'], {'dtype': 'np.float32'}), '(lstEmbedQuery, dtype=np.float32)\n', (3264, 3297), True, 'import numpy as np\n'), ((3313, 3353), 'numpy.array', 'np.array', (['lstEmbedUser'], {'dtype': 'np.float32'}), '(lstEmbedUser, dtype=np.float32)\n', (3321, 3353), True, 'import numpy as np\n'), ((3368, 3407), 'numpy.array', 'np.array', (['lstEmbedDoc'], {'dtype': 'np.float32'}), '(lstEmbedDoc, dtype=np.float32)\n', (3376, 3407), True, 'import numpy as np\n'), ((3558, 3672), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': 'args.allow_soft_placement', 'log_device_placement': 'args.log_device_placement'}), '(allow_soft_placement=args.allow_soft_placement,\n log_device_placement=args.log_device_placement)\n', (3572, 3672), True, 'import tensorflow as tf\n'), ((3729, 3760), 'tensorflow.Session', 'tf.Session', ([], {'config': 'session_conf'}), '(config=session_conf)\n', (3739, 3760), True, 'import tensorflow as tf\n'), ((3811, 3862), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (3822, 3862), True, 'import tensorflow as tf\n'), ((3879, 4263), 'capsuleNet_SEARCH17.CapsE', 'CapsE', ([], {'sequence_length': '(3)', 'batch_size': '(20 * args.batch_size)', 'initialization': '[lstEmbedQuery, lstEmbedUser, lstEmbedDoc]', 'embedding_size': '(200)', 'filter_size': 'args.filter_size', 'num_filters': 'args.num_filters', 'iter_routing': 'args.iter_routing', 'num_outputs_secondCaps': 'args.num_outputs_secondCaps', 'vec_len_secondCaps': 'args.vec_len_secondCaps', 'useConstantInit': 'args.useConstantInit'}), '(sequence_length=3, batch_size=20 * args.batch_size, initialization=[\n lstEmbedQuery, lstEmbedUser, lstEmbedDoc], embedding_size=200,\n filter_size=args.filter_size, num_filters=args.num_filters,\n iter_routing=args.iter_routing, num_outputs_secondCaps=args.\n num_outputs_secondCaps, vec_len_secondCaps=args.vec_len_secondCaps,\n useConstantInit=args.useConstantInit)\n', (3884, 4263), False, 'from capsuleNet_SEARCH17 import CapsE\n'), ((4625, 4681), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'args.learning_rate'}), '(learning_rate=args.learning_rate)\n', (4647, 4681), True, 'import tensorflow as tf\n'), ((5272, 5309), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""model"""'], {}), "(checkpoint_dir, 'model')\n", (5284, 5309), False, 'import os\n'), ((8525, 8548), 'numpy.argmax', 'np.argmax', (['lstvalid_mrr'], {}), '(lstvalid_mrr)\n', (8534, 8548), True, 'import numpy as np\n'), ((3514, 3524), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3522, 3524), True, 'import tensorflow as tf\n'), ((5044, 5113), 'os.path.join', 'os.path.join', (['args.run_folder', '"""runs_CapsE_SEARCH17"""', 'args.model_name'], {}), "(args.run_folder, 'runs_CapsE_SEARCH17', args.model_name)\n", (5056, 5113), False, 'import os\n'), ((5206, 5242), 'os.path.join', 'os.path.join', (['out_dir', '"""checkpoints"""'], {}), "(out_dir, 'checkpoints')\n", (5218, 5242), False, 'import os\n'), ((5325, 5355), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (5339, 5355), False, 'import os\n'), ((5369, 5396), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (5380, 5396), False, 'import os\n'), ((5449, 5482), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5480, 5482), True, 'import tensorflow as tf\n'), ((6273, 6296), 'numpy.concatenate', 'np.concatenate', (['x_batch'], {}), '(x_batch)\n', (6287, 6296), True, 'import numpy as np\n'), ((6323, 6354), 'numpy.concatenate', 'np.concatenate', (['y_batch'], {'axis': '(0)'}), '(y_batch, axis=0)\n', (6337, 6354), True, 'import numpy as np\n'), ((7323, 7355), 'numpy.argsort', 'np.argsort', (['results[_start:_end]'], {}), '(results[_start:_end])\n', (7333, 7355), True, 'import numpy as np\n'), ((7906, 7945), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'global_step'], {}), '(sess, global_step)\n', (7926, 7945), True, 'import tensorflow as tf\n'), ((6475, 6502), 'numpy.array', 'np.array', (['[new_x_batch[-1]]'], {}), '([new_x_batch[-1]])\n', (6483, 6502), True, 'import numpy as np\n'), ((6565, 6592), 'numpy.array', 'np.array', (['[new_y_batch[-1]]'], {}), '([new_y_batch[-1]])\n', (6573, 6592), True, 'import numpy as np\n'), ((7390, 7414), 'numpy.where', 'np.where', (['(lstsorted == 0)'], {}), '(lstsorted == 0)\n', (7398, 7414), True, 'import numpy as np\n')] |
import numpy as np
from numpy import random
from sklearn.externals import joblib
import os
import dataset
CREATE_NEW_DATA = True
if CREATE_NEW_DATA:
print('Creating data...')
X_train, X_test, y_train, y_test, char_mapping = dataset.format_data('data/train.csv')
print('Created data!')
else:
try:
X_train = np.load('formatted_data/x_train.npy')
X_test = np.load('formatted_data/x_test.npy')
y_train = np.load('formatted_data/y_train.npy')
y_test = np.load('formatted_data/y_test.npy')
char_mapping = joblib.load('char_mapping.sav')
print('Loaded data!')
except:
print('Failed to load data, creating new data.')
X_train, X_test, y_train, y_test, char_mapping = dataset.format_data('data/train.csv')
# Encode a letter into a vector
def encode(char_mapping, letter):
assert type(char_mapping) == dict
assert letter in char_mapping
arr = np.zeros((len(char_mapping)), dtype=np.float32)
arr[char_mapping[letter]] = 1
return arr
def generator(batch_size, X, y, char_mapping, fn_encode):
n_vocab = len(char_mapping)
x_batch = np.empty((batch_size,len(X[0]),n_vocab-1),dtype=np.float32)
y_batch = np.empty((batch_size,len(y[0])),dtype=np.float32)
while True:
for i in range(batch_size):
index = random.randint(len(X))
encoded = np.array([fn_encode(char_mapping,letter)[:-1] for letter in X[index]],dtype=np.float32)
x_batch[i] = encoded
y_batch[i] = y[index]
yield x_batch, y_batch
output_size = len(y_train[0])
max_len = len(X_train[0])
# Create the model
from keras.models import Sequential
from keras.layers import LSTM, Dropout, Dense
model = Sequential()
model.add(LSTM(units=300, input_shape=(max_len,len(char_mapping)-1)))
model.add(Dropout(0.2))
model.add(Dense(units=output_size, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
print('Built model!')
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard
filepath = 'checkpoints/weights-improvement-{epoch:02d}-{loss:.4f}.h5'
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=0, save_best_only=True, mode='min')
early_stopping = EarlyStopping(monitor='val_loss', patience=1)
tensorboard = TensorBoard(log_dir='tensorboard_graph', histogram_freq=0, write_graph=True, write_images=True)
callbacks_list = [checkpoint, early_stopping, tensorboard]
batch_size = 128
model.fit_generator(generator=generator(batch_size,X_train,y_train,char_mapping,encode),
steps_per_epoch=len(X_train)//batch_size,
validation_data=generator(batch_size,X_test,y_test,char_mapping,encode),
validation_steps=len(X_test)//batch_size,
epochs=20, callbacks=callbacks_list)
model.save('model.h5') | [
"keras.callbacks.ModelCheckpoint",
"sklearn.externals.joblib.load",
"keras.callbacks.TensorBoard",
"keras.models.Sequential",
"os.path.isdir",
"os.mkdir",
"keras.callbacks.EarlyStopping",
"dataset.format_data",
"keras.layers.Dense",
"numpy.load",
"keras.layers.Dropout"
] | [((1728, 1740), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1738, 1740), False, 'from keras.models import Sequential\n'), ((2205, 2294), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""loss"""', 'verbose': '(0)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(filepath, monitor='loss', verbose=0, save_best_only=True,\n mode='min')\n", (2220, 2294), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard\n'), ((2308, 2353), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(1)'}), "(monitor='val_loss', patience=1)\n", (2321, 2353), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard\n'), ((2368, 2467), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': '"""tensorboard_graph"""', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(True)'}), "(log_dir='tensorboard_graph', histogram_freq=0, write_graph=True,\n write_images=True)\n", (2379, 2467), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard\n'), ((234, 271), 'dataset.format_data', 'dataset.format_data', (['"""data/train.csv"""'], {}), "('data/train.csv')\n", (253, 271), False, 'import dataset\n'), ((1821, 1833), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1828, 1833), False, 'from keras.layers import LSTM, Dropout, Dense\n'), ((1845, 1891), 'keras.layers.Dense', 'Dense', ([], {'units': 'output_size', 'activation': '"""sigmoid"""'}), "(units=output_size, activation='sigmoid')\n", (1850, 1891), False, 'from keras.layers import LSTM, Dropout, Dense\n'), ((1986, 2014), 'os.path.isdir', 'os.path.isdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (1999, 2014), False, 'import os\n'), ((2020, 2043), 'os.mkdir', 'os.mkdir', (['"""checkpoints"""'], {}), "('checkpoints')\n", (2028, 2043), False, 'import os\n'), ((332, 369), 'numpy.load', 'np.load', (['"""formatted_data/x_train.npy"""'], {}), "('formatted_data/x_train.npy')\n", (339, 369), True, 'import numpy as np\n'), ((387, 423), 'numpy.load', 'np.load', (['"""formatted_data/x_test.npy"""'], {}), "('formatted_data/x_test.npy')\n", (394, 423), True, 'import numpy as np\n'), ((442, 479), 'numpy.load', 'np.load', (['"""formatted_data/y_train.npy"""'], {}), "('formatted_data/y_train.npy')\n", (449, 479), True, 'import numpy as np\n'), ((497, 533), 'numpy.load', 'np.load', (['"""formatted_data/y_test.npy"""'], {}), "('formatted_data/y_test.npy')\n", (504, 533), True, 'import numpy as np\n'), ((557, 588), 'sklearn.externals.joblib.load', 'joblib.load', (['"""char_mapping.sav"""'], {}), "('char_mapping.sav')\n", (568, 588), False, 'from sklearn.externals import joblib\n'), ((745, 782), 'dataset.format_data', 'dataset.format_data', (['"""data/train.csv"""'], {}), "('data/train.csv')\n", (764, 782), False, 'import dataset\n')] |
import cv2
import numpy as np
from ..openvino_base.base_model import Base
class FacialLandmarks(Base):
"""Class for the Facial Landmarks Recognition Model."""
def __init__(
self,
model_name,
source_width=None,
source_height=None,
device="CPU",
threshold=0.60,
extensions=None,
**kwargs
):
self._model_type = (
"landmarks-regression-retail"
if "regression" in model_name
else "facial-landmarks-35-adas"
)
super().__init__(
model_name,
source_width,
source_height,
device,
threshold,
extensions,
**kwargs
)
def preprocess_output(self, inference_results, image, show_bbox=False, **kwargs):
"""Draw bounding boxes onto the Facial Landmarks frame."""
flattened_predictions = np.vstack(inference_results).ravel()
results = {}
face_h, face_w = image.shape[:2]
if len(flattened_predictions) == 70:
landmarks = []
for i in range(flattened_predictions.size // 2):
x_coord = int(face_w * flattened_predictions[2 * i])
y_coord = int(face_h * flattened_predictions[2 * i + 1])
landmarks.append((x_coord, y_coord))
face_landmarks = {
"type": self._model_type,
"eyes_coords": landmarks[:4],
"nose_coords": landmarks[4:8],
"mouth_coords": landmarks[8:12],
"face_contour": landmarks[12:],
}
else:
coord_mapping = dict(
zip(
(
"left_eye_x_coord",
"left_eye_y_coord",
"right_eye_x_coord",
"right_eye_y_coord",
"nose_coord",
"left_mouth_coord",
"right_mouth_coord",
),
flattened_predictions,
)
)
def get_eye_points(eye_coords, eye_size=10):
eye_min = eye_coords - eye_size
eye_max = eye_coords + eye_size
return map(int, [eye_coords, eye_min, eye_max])
# left eye offset of face
left_eye_x_coord, left_eye_xmin, left_eye_xmax = get_eye_points(
coord_mapping["left_eye_x_coord"] * face_w
)
# left eye offset of face
left_eye_y_coord, left_eye_ymin, left_eye_ymax = get_eye_points(
coord_mapping["left_eye_y_coord"] * face_h
)
# right eye offset of face
right_eye_x_coord, right_eye_xmin, right_eye_xmax = get_eye_points(
coord_mapping["right_eye_x_coord"] * face_w
)
# right eye offset of face
right_eye_y_coord, right_eye_ymin, right_eye_ymax = get_eye_points(
coord_mapping["right_eye_y_coord"] * face_h
)
eye_size = 10
left_eye_x_coord = int(flattened_predictions[0] * face_w)
# left eye offset of face
left_eye_xmin = left_eye_x_coord - eye_size
left_eye_xmax = left_eye_x_coord + eye_size
left_eye_y_coord = int(flattened_predictions[1] * face_h)
# left eye offset of face
left_eye_ymin = left_eye_y_coord - eye_size
left_eye_ymax = left_eye_y_coord + eye_size
right_eye_x_coord = int(flattened_predictions[2] * face_w)
# right eye offset of face
right_eye_xmin = right_eye_x_coord - eye_size
right_eye_xmax = right_eye_x_coord + eye_size
right_eye_y_coord = int(flattened_predictions[3] * face_h)
# right eye offset of face
right_eye_ymin = right_eye_y_coord - eye_size
right_eye_ymax = right_eye_y_coord + eye_size
# nose coordinates
nose_coord = coord_mapping["nose_coord"]
# mouth coordinates
left_part_mouth = coord_mapping["left_mouth_coord"] * face_w
right_part_mouth = coord_mapping["right_mouth_coord"] * face_w
face_landmarks = {
"type": self._model_type,
"eyes_coords": {
"left_eye_point": (left_eye_x_coord, left_eye_y_coord),
"right_eye_point": (right_eye_x_coord, right_eye_y_coord),
"left_eye_image": image[
left_eye_ymin:left_eye_ymax, left_eye_xmin:left_eye_xmax
],
"right_eye_image": image[
right_eye_ymin:right_eye_ymax, right_eye_xmin:right_eye_xmax,
],
},
"nose_coords": {"nose_coords": nose_coord},
"mouth_coords": {"mouth_coords": [left_part_mouth, right_part_mouth]},
}
results["face_landmarks"] = face_landmarks
results["image"] = image
if show_bbox:
self.draw_output(results, image, **kwargs)
return results
@staticmethod
def draw_output(results, image, radius=20, color=(0, 0, 255), thickness=2, **kwargs):
"""Draw a circle around ROI"""
pass
# TODO: Fix this for the handle the 2 different types of landmarks.
# for landmark in face_landmarks:
# if landmark == "eyes_coords":
# for eye, coords in face_landmarks["eyes_coords"].items():
# if "point" in eye:
# cv2.circle(
# image, (coords[0], coords[1]), radius, color, thickness,
# )
| [
"numpy.vstack"
] | [((923, 951), 'numpy.vstack', 'np.vstack', (['inference_results'], {}), '(inference_results)\n', (932, 951), True, 'import numpy as np\n')] |
# grid functions module
version = '29th April 2021'
# Nexus is a registered trademark of the Halliburton Company
import logging
log = logging.getLogger(__name__)
log.debug('grid_functions.py version %s', version)
# defs:
# def infill_block_geometry(extent, depth, thickness, x, y,
# k_increase_direction = 'down', depth_zero_tolerance = 0.01,
# x_y_zero_tolerance = 0.01,
# vertical_cell_overlap_tolerance = 0.01,
# snap_to_top_and_base = True, nudge = True):
# def resequence_nexus_corp(corner_points, eight_mode = False, undo = False):
# def random_cell(corner_points, border = 0.25, max_tries = 20, tolerance = 0.003):
# def determine_corp_ijk_handedness(corner_points, xyz_is_left_handed = True):
# def determine_corp_extent(corner_points, tolerance = 0.003):
# def translate_corp(corner_points, x_shift = None, y_shift = None, min_xy = 0.0, shift_rounding_digits = None):
import math as maths
import random
import numpy as np
import resqpy.olio.factors as factors
import resqpy.olio.vector_utilities as vec
##########################################################################################
# infill_block_geometry():
# scans each logically vertical column of cells,
# assigning depth and thickness values for inactive cells sandwiched between active cells
# extent is a 3 element vector: nk,nj,ni
# depth is a 3D numpy float array of size matching extent
# depth values assumed more positive with increasing depth
# zero values in depth input array indicate inactive cells
# thickness is a 3D numpy float array of size matching extent
# x and y are each a 3D numpy float array of size matching extent
# k_increase_direction is either 'up' or 'down'
# depth_zero_tolerance is maximum value for which depth is considered zero
# vertical_cell_overlap_tolerance is the maximum acceptable overlap of cells on input
# snap_to_top_and_base, when True, causes cells above topmost active and below deepest active
# to be populated with pinched out cells at the top and bottom faces respectively
# nudge causes the depth of cells with greater k to be moved to clean up overlap over pinchouts
def infill_block_geometry(extent,
depth,
thickness,
x,
y,
k_increase_direction = 'down',
depth_zero_tolerance = 0.01,
x_y_zero_tolerance = 0.01,
vertical_cell_overlap_tolerance = 0.01,
snap_to_top_and_base = True,
nudge = True):
"""Scans logically vertical columns of cells setting depth (& thickness) of inactive cells."""
if k_increase_direction == 'down':
k_dir_sign = 1.0
elif k_increase_direction == 'up':
k_dir_sign = -1.0
else:
assert (False)
for j in range(extent[1]):
for i in range(extent[2]):
k_top = 0 # NB: 'top' & 'bottom' are misleading if k_increase_direction == 'up'
while k_top < extent[0] and abs(depth[k_top, j, i]) <= depth_zero_tolerance:
depth[k_top, j, i] = 0.0 # clean up tiny values
thickness[k_top, j, i] = 0.0
if abs(x[k_top, j, i]) <= x_y_zero_tolerance:
x[k_top, j, i] = 0.0
if abs(y[k_top, j, i]) <= x_y_zero_tolerance:
y[k_top, j, i] = 0.0
k_top += 1 # skip topmost inactive batch
if k_top >= extent[0]:
continue # whole column is inactive
if snap_to_top_and_base:
snap_depth = depth[k_top, j, i] - k_dir_sign * thickness[k_top, j, i] / 2.0
snap_x = x[k_top, j, i]
snap_y = y[k_top, j, i]
for k_snap in range(k_top):
depth[k_snap, j, i] = snap_depth
x[k_snap, j, i] = snap_x
y[k_snap, j, i] = snap_y
while True:
while k_top < extent[0] and abs(depth[k_top, j, i]) > depth_zero_tolerance: # skip active layers
k_top += 1
k_base = k_top + 1
while k_base < extent[0] and abs(depth[k_base, j, i]) <= depth_zero_tolerance:
depth[k_base, j, i] = 0.0 # clean up tiny depth values
thickness[k_base, j, i] = 0.0
if abs(x[k_base, j, i]) <= x_y_zero_tolerance:
x[k_base, j, i] = 0.0
if abs(y[k_base, j, i]) <= x_y_zero_tolerance:
y[k_base, j, i] = 0.0
k_base += 1 # look for deeper active layer
if k_base >= extent[0]: # no deeper active cells found
if snap_to_top_and_base:
snap_depth = depth[k_top - 1, j, i] + k_dir_sign * thickness[k_top - 1, j, i] / 2.0
snap_x = x[k_top - 1, j, i]
snap_y = y[k_top - 1, j, i]
for k_snap in range(extent[0] - k_top):
depth[k_top + k_snap, j, i] = snap_depth
x[k_top + k_snap, j, i] = snap_x
y[k_top + k_snap, j, i] = snap_y
break
void_cell_count = k_base - k_top
assert (void_cell_count > 0)
void_top_depth = depth[k_top - 1, j, i] + (thickness[k_top - 1, j, i] / 2.0) * k_dir_sign
void_bottom_depth = depth[k_base, j, i] - (thickness[k_base, j, i] / 2.0) * k_dir_sign
void_top_x = x[k_top - 1, j, i]
void_top_y = y[k_top - 1, j, i]
void_interval = k_dir_sign * (void_bottom_depth - void_top_depth)
void_x_interval = x[k_base, j, i] - void_top_x
void_y_interval = y[k_base, j, i] - void_top_y
infill_cell_thickness = void_interval / void_cell_count
if void_interval < 0.0: # overlapping cells
if -void_interval < vertical_cell_overlap_tolerance:
if nudge:
nudge_count = 0 # debug
for k_nudge in range(extent[0] - k_base):
if depth[k_base + k_nudge, j, i] > depth_zero_tolerance:
depth[k_base + k_nudge, j, i] += -void_interval * k_dir_sign
nudge_count += 1 # debug
log.debug('%1d cells nudged in [ i j ] column [%1d, %1d]', nudge_count, i + 1, j + 1)
void_bottom_depth += -void_interval
void_interval = 0.0
infill_cell_thickness = 0.0
else:
log.warn('Cells [%1d, %1d, %1d] and [%1d, %1d, %1d] overlap ...', i + 1, j + 1, k_top, i + 1,
j + 1, k_base + 1)
log.warn(' check k_increase_direction and tolerances')
log.warn('Skipping rest of i,j column') # todo: could abort here
break
assert (infill_cell_thickness >= 0.0)
for void_k in range(void_cell_count):
depth[k_top + void_k, j, i] = void_top_depth + (0.5 + void_k) * infill_cell_thickness * k_dir_sign
thickness[k_top + void_k, j, i] = infill_cell_thickness
x[k_top + void_k, j, i] = void_top_x + (0.5 + void_k) * void_x_interval / void_cell_count
y[k_top + void_k, j, i] = void_top_y + (0.5 + void_k) * void_y_interval / void_cell_count
k_top = k_base
# end of def infill_block_geometry()
##########################################################################################
##########################################################################################
# def resequence_nexus_corp():
def resequence_nexus_corp(corner_points, eight_mode = False, undo = False):
"""Reorders corner point data in situ, to handle bizarre nexus orderings."""
# undo False for corp to internal; undo True for internal to corp; only relevant in eight_mode
assert (corner_points.ndim == 7)
extent = np.array(corner_points.shape, dtype = 'int')
if eight_mode:
for k in range(extent[0]):
for j in range(extent[1]):
for i in range(extent[2]):
if undo:
xyz = np.zeros((3, 8))
c = 0
for kp in range(2):
for jp in range(2):
for ip in range(2):
xyz[:, c] = corner_points[k, j, i, kp, jp, ip, :]
c += 1
corner_points[k, j, i] = xyz.reshape((2, 2, 2, 3))
else:
xyz = corner_points[k, j, i].reshape((3, 8)).copy()
c = 0
for kp in range(2):
for jp in range(2):
for ip in range(2):
corner_points[k, j, i, kp, jp, ip, :] = xyz[:, c]
c += 1
else: # reversible, so not dependent on undo argument
jp_slice = corner_points[:, :, :, :, 1, 0, :].copy()
corner_points[:, :, :, :, 1, 0, :] = corner_points[:, :, :, :, 1, 1, :]
corner_points[:, :, :, :, 1, 1, :] = jp_slice
# end of def resequence_nexus_corp()
##########################################################################################
##########################################################################################
# def random_cell():
def random_cell(corner_points, border = 0.25, max_tries = 20, tolerance = 0.003):
"""Returns a random cell's (k,j,i) tuple for a cell with non-zero lengths on all 3 primary edges."""
assert (corner_points.ndim == 7)
assert (border >= 0.0 and border < 0.5)
assert (max_tries > 0)
extent = np.array(corner_points.shape, dtype = 'int')
kji_extent = extent[:3]
kji_border = np.zeros(3, dtype = 'int')
kji_upper = np.zeros(3, dtype = 'int')
for axis in range(3):
kji_border[axis] = int(float(kji_extent[axis]) * border)
kji_upper[axis] = kji_extent[axis] - kji_border[axis] - 1
if kji_upper[axis] < kji_border[axis]:
kji_upper[axis] = kji_border[axis]
kji_cell = np.empty(3, dtype = 'int')
attempt = 0
while attempt < max_tries:
attempt += 1
for axis in range(3):
if kji_extent[axis] == 1:
kji_cell[axis] = 0
else:
kji_cell[axis] = random.randint(kji_border[axis], kji_upper[axis])
cell_cp = corner_points[tuple(kji_cell)]
assert (cell_cp.shape == (2, 2, 2, 3))
if vec.manhatten_distance(cell_cp[0, 0, 0], cell_cp[1, 0, 0]) < tolerance:
continue
if vec.manhatten_distance(cell_cp[0, 0, 0], cell_cp[0, 1, 0]) < tolerance:
continue
if vec.manhatten_distance(cell_cp[0, 0, 0], cell_cp[0, 0, 1]) < tolerance:
continue
return tuple(kji_cell)
log.warning('failed to find random voluminous cell')
return None
# end of def random_cell()
##########################################################################################
##########################################################################################
# def determine_corp_ijk_handedness():
def determine_corp_ijk_handedness(corner_points, xyz_is_left_handed = True):
"""Determine true ijk handedness from corner point data in pagoda style 7D array; returns 'right' or 'left'."""
assert (corner_points.ndim == 7)
cell_kji = random_cell(corner_points)
assert (cell_kji is not None)
log.debug('using cell ijk0 [{}, {}, {}] to determine ijk handedness'.format(cell_kji[2], cell_kji[1], cell_kji[0]))
cell_cp = corner_points[cell_kji]
origin = cell_cp[0, 0, 0]
det = vec.determinant(cell_cp[0, 0, 1] - origin, cell_cp[0, 1, 0] - origin,
cell_cp[1, 0, 0] - origin) # NB. IJK ordering
if det == 0.0:
log.warning('indeterminate handedness in cell ijk0 [{}, {}, {}]'.format(cell_kji[2], cell_kji[1], cell_kji[0]))
return None
if det > 0.0:
ijk_is_left_handed = xyz_is_left_handed
else:
ijk_is_left_handed = not xyz_is_left_handed
if ijk_is_left_handed:
return 'left'
return 'right'
# end of def determine_corp_ijk_handedness()
##########################################################################################
##########################################################################################
# def determine_corp_extent():
def determine_corp_extent(corner_points, tolerance = 0.003):
"""Returns extent of grid derived from 7D corner points with all cells temporarily in I."""
def neighbours(corner_points, sextuple_cell_a_p1, sextuple_cell_a_p2, sextuple_cell_b_p1, sextuple_cell_b_p2,
tolerance):
# allows for reversal of points (or not) in neighbouring cell
if ((vec.manhatten_distance(corner_points[sextuple_cell_a_p1], corner_points[sextuple_cell_b_p1]) <= tolerance)
and (vec.manhatten_distance(corner_points[sextuple_cell_a_p2], corner_points[sextuple_cell_b_p2]) <=
tolerance)):
return True
if ((vec.manhatten_distance(corner_points[sextuple_cell_a_p1], corner_points[sextuple_cell_b_p2]) <= tolerance)
and (vec.manhatten_distance(corner_points[sextuple_cell_a_p2], corner_points[sextuple_cell_b_p1]) <=
tolerance)):
return True
return False
assert (corner_points.ndim == 7 and corner_points.shape[:2] == (1, 1))
confirmation = 3 # number of identical results needed for each of NI and NJ
max_failures = 100 # maximum number of failed random cells for each of NI and NJ
min_cell_length = 10.0 * tolerance
cell_count = corner_points.shape[2]
prime_factorization = factors.factorize(cell_count)
log.debug('cell count is ' + str(cell_count) + '; prime factorization: ' + str(prime_factorization))
possible_extents = factors.all_factors_from_primes(prime_factorization)
log.debug('possible extents are: ' + str(possible_extents))
ni = None
redundancy = confirmation
remaining_attempts = max_failures
while redundancy:
kji_cell = random_cell(corner_points, tolerance = min_cell_length)
found = False
for e in possible_extents:
candidate = kji_cell[2] + e
if candidate >= cell_count:
continue
if neighbours(corner_points, (0, 0, kji_cell[2], 0, 1, 0), (0, 0, kji_cell[2], 0, 1, 1),
(0, 0, candidate, 0, 0, 0), (0, 0, candidate, 0, 0, 1), tolerance):
if ni is not None and ni != e:
log.error('inconsistent NI values of {} and {} determined from corner points'.format(ni, e))
return None
found = True
ni = e
redundancy -= 1
break
if not found:
remaining_attempts -= 1
if remaining_attempts <= 0:
log.error('failed to determine NI from corner points (out of tries)') # could assume NJ = 1 here
return None
log.info('NI determined from corner points to be ' + str(ni))
if ni > 1:
ni_prime_factors = factors.factorize(ni)
factors.remove_subset(prime_factorization, ni_prime_factors)
log.debug('remaining prime factors after accounting for NI are: ' + str(prime_factorization))
possible_extents = factors.all_factors_from_primes(prime_factorization)
log.debug('possible extents for NJ & NK are: ' + str(possible_extents))
nj = None
redundancy = confirmation
remaining_attempts = max_failures
while redundancy:
kji_cell = random_cell(corner_points)
found = False
for e in possible_extents:
candidate = kji_cell[2] + (e * ni)
if candidate >= cell_count:
continue
if vec.manhatten_distance(corner_points[0, 0, kji_cell[2], 1, 0, 0], corner_points[0, 0, candidate, 0, 0,
0]) <= tolerance:
if nj is not None and nj != e:
log.error('inconsistent NJ values of {} and {} determined from corner points'.format(nj, e))
return None
found = True
nj = e
redundancy -= 1
break
if not found:
remaining_attempts -= 1
if remaining_attempts <= 0:
log.error(
'failed to determine NJ from corner points (out of tries)') # could assume or check if NK = 1 here
return None
log.info('NJ determined from corner points to be ' + str(nj))
nk, remainder = divmod(cell_count, ni * nj)
assert (remainder == 0)
log.info('NK determined from corner points to be ' + str(nk))
assert (nk in possible_extents)
return [nk, nj, ni]
# end def determine_corp_extent():
##########################################################################################
##########################################################################################
# def translate_corp():
def translate_corp(corner_points, x_shift = None, y_shift = None, min_xy = None, preserve_digits = None):
"""Adjusts x and y values of corner points by a constant offset."""
assert (corner_points.ndim == 7)
if min_xy is None:
minimum_xy = 0.0
else:
minimum_xy = min_xy
if x_shift is None:
x_sub = np.min(corner_points[:, :, :, :, :, :, 0]) - minimum_xy
else:
x_sub = -x_shift
if y_shift is None:
y_sub = np.min(corner_points[:, :, :, :, :, :, 1]) - minimum_xy
else:
y_sub = -y_shift
if preserve_digits is not None:
divisor = maths.pow(10.0, preserve_digits)
x_sub = divisor * maths.floor(x_sub / divisor)
y_sub = divisor * maths.floor(y_sub / divisor)
log.info('translating corner points by %3.1f in x and %3.1f in y', -x_sub, -y_sub)
corner_points[:, :, :, :, :, :, 0] -= x_sub
corner_points[:, :, :, :, :, :, 1] -= y_sub
# end of def translate_corp()
##########################################################################################
def triangles_for_cell_faces(cp):
"""Returns numpy array of shape (3, 2, 4, 3, 3) with axes being kji, -+, triangle within face, triangle corner, xyz.
args:
cp (numpy float array of shape (2, 2, 2, 3)): single cell corner point array in pagoda protocol
returns:
numpy float array of shape (3, 2, 4, 3, 3) holding triangle corner coordinates for cell faces represented with
quad triangles
note:
resqpy.surface also contains methods for working with cell faces as triangulated sets
"""
tri = np.empty((3, 2, 4, 3, 3))
# create face centre points and assign as one vertex in each of 4 trangles for face
tri[0, :, :, 0] = np.mean(cp, axis = (1, 2)).reshape((2, 1, 3)).repeat(4, axis = 1).reshape(
(2, 4, 3)) # k face centres
tri[1, :, :, 0] = np.mean(cp, axis = (0, 2)).reshape((2, 1, 3)).repeat(4, axis = 1).reshape(
(2, 4, 3)) # j face centres
tri[2, :, :, 0] = np.mean(cp, axis = (0, 1)).reshape((2, 1, 3)).repeat(4, axis = 1).reshape(
(2, 4, 3)) # i face centres
# k faces
tri[0, :, 0, 1] = cp[:, 0, 0]
tri[0, :, 0, 2] = cp[:, 0, 1]
tri[0, :, 1, 1] = cp[:, 0, 1]
tri[0, :, 1, 2] = cp[:, 1, 1]
tri[0, :, 2, 1] = cp[:, 1, 1]
tri[0, :, 2, 2] = cp[:, 1, 0]
tri[0, :, 3, 1] = cp[:, 1, 0]
tri[0, :, 3, 2] = cp[:, 0, 0]
# j faces
tri[1, :, 0, 1] = cp[0, :, 0]
tri[1, :, 0, 2] = cp[0, :, 1]
tri[1, :, 1, 1] = cp[0, :, 1]
tri[1, :, 1, 2] = cp[1, :, 1]
tri[1, :, 2, 1] = cp[1, :, 1]
tri[1, :, 2, 2] = cp[1, :, 0]
tri[1, :, 3, 1] = cp[1, :, 0]
tri[1, :, 3, 2] = cp[0, :, 0]
# i faces
tri[2, :, 0, 1] = cp[0, 0, :]
tri[2, :, 0, 2] = cp[0, 1, :]
tri[2, :, 1, 1] = cp[0, 1, :]
tri[2, :, 1, 2] = cp[1, 1, :]
tri[2, :, 2, 1] = cp[1, 1, :]
tri[2, :, 2, 2] = cp[1, 0, :]
tri[2, :, 3, 1] = cp[1, 0, :]
tri[2, :, 3, 2] = cp[0, 0, :]
return tri
# end of grid_functions module
##########################################################################################
def actual_pillar_shape(pillar_points, tolerance = 0.001):
"""Returns 'curved', 'straight' or 'vertical' for shape of fully defined points array of shape (nk + k_gaps + 1,
..., 3)."""
assert pillar_points.ndim >= 3 and pillar_points.shape[-1] == 3
pp = pillar_points.reshape((pillar_points.shape[0], -1, 3))
from_top = pp - pp[0]
xy_drift = np.abs(from_top[:, :, 0]) + np.abs(
from_top[:, :, 1]) # use Manhattan distance as cheap proxy for true distance
if np.max(xy_drift) <= tolerance:
return 'vertical'
if np.max(xy_drift[-1]) <= tolerance:
return 'curved' # top & bottom are vertically aligned, so pillar must be curved
# where z variation is tiny (null pillar), don't interpolate, just treat these pillars as straight
# elsewhere find drift from vector defined by from_top[-1]
null_pillar_mask = (abs(from_top[-1, :, 2]) <= tolerance)
from_top[-1, :, 2] = np.where(null_pillar_mask, tolerance, from_top[-1, :, 2]) # avoid divide by zero issues
z_fraction = from_top[:, :, 2] / from_top[-1, :, 2]
xy_drift = from_top[:, :, :2] - z_fraction.reshape((pp.shape[0], pp.shape[1], 1)) * from_top[-1, :, :2].reshape(
(1, pp.shape[1], 2))
straight = (np.max(np.sum(np.abs(xy_drift), axis = -1), axis = 0) <= tolerance)
masked_straight = np.where(null_pillar_mask, True, straight)
if np.all(masked_straight):
return 'straight'
return 'curved'
##########################################################################################
def columns_to_nearest_split_face(grid):
"""Returns a numpy integer array of shape (NJ, NI) being number of cells to nearest split edge (Manhattan
distance)."""
if not grid.has_split_coordinate_lines:
return None
j_col_faces_split, i_col_faces_split = grid.split_column_faces()
abutting = np.zeros((grid.nj, grid.ni), dtype = bool)
abutting[:-1, :] = j_col_faces_split
abutting[1:, :] = np.logical_or(abutting[1:, :], j_col_faces_split)
abutting[:, :-1] = np.logical_or(abutting[:, :-1], i_col_faces_split)
abutting[:, 1:] = np.logical_or(abutting[:, 1:], i_col_faces_split)
framed = np.full((grid.nj + 2, grid.ni + 2), grid.nj + grid.ni, dtype = int)
framed[1:-1, 1:-1] = np.where(abutting, 0, grid.nj + grid.ni)
while True:
plus_one = framed + 1
updated = np.minimum(framed[1:-1, 1:-1], plus_one[:-2, 1:-1])
updated[:] = np.minimum(updated, plus_one[2:, 1:-1])
updated[:] = np.minimum(updated, plus_one[1:-1, :-2])
updated[:] = np.minimum(updated, plus_one[1:-1, 2:])
if np.all(updated == framed[1:-1, 1:-1]):
break
framed[1:-1, 1:-1] = updated
return framed[1:-1, 1:-1]
##########################################################################################
def left_right_foursome(full_pillar_list, p_index):
"""Returns (2, 2) bool numpy array indicating which columns around a primary pillar are to the right of a line."""
assert 0 < p_index < len(full_pillar_list) - 1
here = np.array(full_pillar_list[p_index], dtype = int)
previous = np.array(full_pillar_list[p_index - 1], dtype = int)
next = np.array(full_pillar_list[p_index + 1], dtype = int)
entry = tuple(here - previous)
exit = tuple(next - here)
if entry == (0, 1):
if exit == (-1, 0):
return np.array([[False, True], [True, True]], dtype = bool)
elif exit == (0, 1):
return np.array([[False, False], [True, True]], dtype = bool)
elif exit == (1, 0):
return np.array([[False, False], [True, False]], dtype = bool)
else:
raise Exception('code failure whilst taking exit sides from dubious full pillar list')
elif entry == (0, -1):
if exit == (-1, 0):
return np.array([[False, True], [False, False]], dtype = bool)
elif exit == (0, -1):
return np.array([[True, True], [False, False]], dtype = bool)
elif exit == (1, 0):
return np.array([[True, True], [True, False]], dtype = bool)
else:
raise Exception('code failure whilst taking exit sides from dubious full pillar list')
elif entry == (1, 0):
if exit == (0, -1):
return np.array([[True, False], [False, False]], dtype = bool)
elif exit == (1, 0):
return np.array([[True, False], [True, False]], dtype = bool)
elif exit == (0, 1):
return np.array([[True, False], [True, True]], dtype = bool)
else:
raise Exception('code failure whilst taking exit sides from dubious full pillar list')
elif entry == (-1, 0):
if exit == (0, -1):
return np.array([[True, True], [False, True]], dtype = bool)
elif exit == (-1, 0):
return np.array([[False, True], [False, True]], dtype = bool)
elif exit == (0, 1):
return np.array([[False, False], [False, True]], dtype = bool)
else:
raise Exception('code failure whilst taking exit sides from dubious full pillar list')
else:
log.debug(f'entry pair: {entry}')
raise Exception('code failure whilst taking entry sides from dubious full pillar list')
##########################################################################################
| [
"logging.getLogger",
"math.floor",
"numpy.array",
"resqpy.olio.vector_utilities.determinant",
"numpy.mean",
"numpy.where",
"resqpy.olio.factors.remove_subset",
"numpy.max",
"numpy.empty",
"resqpy.olio.vector_utilities.manhatten_distance",
"numpy.min",
"random.randint",
"numpy.abs",
"resqpy... | [((138, 165), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (155, 165), False, 'import logging\n'), ((8306, 8348), 'numpy.array', 'np.array', (['corner_points.shape'], {'dtype': '"""int"""'}), "(corner_points.shape, dtype='int')\n", (8314, 8348), True, 'import numpy as np\n'), ((10155, 10197), 'numpy.array', 'np.array', (['corner_points.shape'], {'dtype': '"""int"""'}), "(corner_points.shape, dtype='int')\n", (10163, 10197), True, 'import numpy as np\n'), ((10245, 10269), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': '"""int"""'}), "(3, dtype='int')\n", (10253, 10269), True, 'import numpy as np\n'), ((10288, 10312), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': '"""int"""'}), "(3, dtype='int')\n", (10296, 10312), True, 'import numpy as np\n'), ((10582, 10606), 'numpy.empty', 'np.empty', (['(3)'], {'dtype': '"""int"""'}), "(3, dtype='int')\n", (10590, 10606), True, 'import numpy as np\n'), ((12152, 12253), 'resqpy.olio.vector_utilities.determinant', 'vec.determinant', (['(cell_cp[0, 0, 1] - origin)', '(cell_cp[0, 1, 0] - origin)', '(cell_cp[1, 0, 0] - origin)'], {}), '(cell_cp[0, 0, 1] - origin, cell_cp[0, 1, 0] - origin, \n cell_cp[1, 0, 0] - origin)\n', (12167, 12253), True, 'import resqpy.olio.vector_utilities as vec\n'), ((14247, 14276), 'resqpy.olio.factors.factorize', 'factors.factorize', (['cell_count'], {}), '(cell_count)\n', (14264, 14276), True, 'import resqpy.olio.factors as factors\n'), ((14405, 14457), 'resqpy.olio.factors.all_factors_from_primes', 'factors.all_factors_from_primes', (['prime_factorization'], {}), '(prime_factorization)\n', (14436, 14457), True, 'import resqpy.olio.factors as factors\n'), ((19315, 19340), 'numpy.empty', 'np.empty', (['(3, 2, 4, 3, 3)'], {}), '((3, 2, 4, 3, 3))\n', (19323, 19340), True, 'import numpy as np\n'), ((21774, 21831), 'numpy.where', 'np.where', (['null_pillar_mask', 'tolerance', 'from_top[-1, :, 2]'], {}), '(null_pillar_mask, tolerance, from_top[-1, :, 2])\n', (21782, 21831), True, 'import numpy as np\n'), ((22171, 22213), 'numpy.where', 'np.where', (['null_pillar_mask', '(True)', 'straight'], {}), '(null_pillar_mask, True, straight)\n', (22179, 22213), True, 'import numpy as np\n'), ((22221, 22244), 'numpy.all', 'np.all', (['masked_straight'], {}), '(masked_straight)\n', (22227, 22244), True, 'import numpy as np\n'), ((22706, 22746), 'numpy.zeros', 'np.zeros', (['(grid.nj, grid.ni)'], {'dtype': 'bool'}), '((grid.nj, grid.ni), dtype=bool)\n', (22714, 22746), True, 'import numpy as np\n'), ((22812, 22861), 'numpy.logical_or', 'np.logical_or', (['abutting[1:, :]', 'j_col_faces_split'], {}), '(abutting[1:, :], j_col_faces_split)\n', (22825, 22861), True, 'import numpy as np\n'), ((22885, 22935), 'numpy.logical_or', 'np.logical_or', (['abutting[:, :-1]', 'i_col_faces_split'], {}), '(abutting[:, :-1], i_col_faces_split)\n', (22898, 22935), True, 'import numpy as np\n'), ((22958, 23007), 'numpy.logical_or', 'np.logical_or', (['abutting[:, 1:]', 'i_col_faces_split'], {}), '(abutting[:, 1:], i_col_faces_split)\n', (22971, 23007), True, 'import numpy as np\n'), ((23021, 23086), 'numpy.full', 'np.full', (['(grid.nj + 2, grid.ni + 2)', '(grid.nj + grid.ni)'], {'dtype': 'int'}), '((grid.nj + 2, grid.ni + 2), grid.nj + grid.ni, dtype=int)\n', (23028, 23086), True, 'import numpy as np\n'), ((23114, 23154), 'numpy.where', 'np.where', (['abutting', '(0)', '(grid.nj + grid.ni)'], {}), '(abutting, 0, grid.nj + grid.ni)\n', (23122, 23154), True, 'import numpy as np\n'), ((23921, 23967), 'numpy.array', 'np.array', (['full_pillar_list[p_index]'], {'dtype': 'int'}), '(full_pillar_list[p_index], dtype=int)\n', (23929, 23967), True, 'import numpy as np\n'), ((23985, 24035), 'numpy.array', 'np.array', (['full_pillar_list[p_index - 1]'], {'dtype': 'int'}), '(full_pillar_list[p_index - 1], dtype=int)\n', (23993, 24035), True, 'import numpy as np\n'), ((24049, 24099), 'numpy.array', 'np.array', (['full_pillar_list[p_index + 1]'], {'dtype': 'int'}), '(full_pillar_list[p_index + 1], dtype=int)\n', (24057, 24099), True, 'import numpy as np\n'), ((15707, 15728), 'resqpy.olio.factors.factorize', 'factors.factorize', (['ni'], {}), '(ni)\n', (15724, 15728), True, 'import resqpy.olio.factors as factors\n'), ((15737, 15797), 'resqpy.olio.factors.remove_subset', 'factors.remove_subset', (['prime_factorization', 'ni_prime_factors'], {}), '(prime_factorization, ni_prime_factors)\n', (15758, 15797), True, 'import resqpy.olio.factors as factors\n'), ((15927, 15979), 'resqpy.olio.factors.all_factors_from_primes', 'factors.all_factors_from_primes', (['prime_factorization'], {}), '(prime_factorization)\n', (15958, 15979), True, 'import resqpy.olio.factors as factors\n'), ((18317, 18349), 'math.pow', 'maths.pow', (['(10.0)', 'preserve_digits'], {}), '(10.0, preserve_digits)\n', (18326, 18349), True, 'import math as maths\n'), ((21203, 21228), 'numpy.abs', 'np.abs', (['from_top[:, :, 0]'], {}), '(from_top[:, :, 0])\n', (21209, 21228), True, 'import numpy as np\n'), ((21231, 21256), 'numpy.abs', 'np.abs', (['from_top[:, :, 1]'], {}), '(from_top[:, :, 1])\n', (21237, 21256), True, 'import numpy as np\n'), ((21332, 21348), 'numpy.max', 'np.max', (['xy_drift'], {}), '(xy_drift)\n', (21338, 21348), True, 'import numpy as np\n'), ((21396, 21416), 'numpy.max', 'np.max', (['xy_drift[-1]'], {}), '(xy_drift[-1])\n', (21402, 21416), True, 'import numpy as np\n'), ((23220, 23271), 'numpy.minimum', 'np.minimum', (['framed[1:-1, 1:-1]', 'plus_one[:-2, 1:-1]'], {}), '(framed[1:-1, 1:-1], plus_one[:-2, 1:-1])\n', (23230, 23271), True, 'import numpy as np\n'), ((23293, 23332), 'numpy.minimum', 'np.minimum', (['updated', 'plus_one[2:, 1:-1]'], {}), '(updated, plus_one[2:, 1:-1])\n', (23303, 23332), True, 'import numpy as np\n'), ((23354, 23394), 'numpy.minimum', 'np.minimum', (['updated', 'plus_one[1:-1, :-2]'], {}), '(updated, plus_one[1:-1, :-2])\n', (23364, 23394), True, 'import numpy as np\n'), ((23416, 23455), 'numpy.minimum', 'np.minimum', (['updated', 'plus_one[1:-1, 2:]'], {}), '(updated, plus_one[1:-1, 2:])\n', (23426, 23455), True, 'import numpy as np\n'), ((23467, 23504), 'numpy.all', 'np.all', (['(updated == framed[1:-1, 1:-1])'], {}), '(updated == framed[1:-1, 1:-1])\n', (23473, 23504), True, 'import numpy as np\n'), ((10988, 11046), 'resqpy.olio.vector_utilities.manhatten_distance', 'vec.manhatten_distance', (['cell_cp[0, 0, 0]', 'cell_cp[1, 0, 0]'], {}), '(cell_cp[0, 0, 0], cell_cp[1, 0, 0])\n', (11010, 11046), True, 'import resqpy.olio.vector_utilities as vec\n'), ((11092, 11150), 'resqpy.olio.vector_utilities.manhatten_distance', 'vec.manhatten_distance', (['cell_cp[0, 0, 0]', 'cell_cp[0, 1, 0]'], {}), '(cell_cp[0, 0, 0], cell_cp[0, 1, 0])\n', (11114, 11150), True, 'import resqpy.olio.vector_utilities as vec\n'), ((11196, 11254), 'resqpy.olio.vector_utilities.manhatten_distance', 'vec.manhatten_distance', (['cell_cp[0, 0, 0]', 'cell_cp[0, 0, 1]'], {}), '(cell_cp[0, 0, 0], cell_cp[0, 0, 1])\n', (11218, 11254), True, 'import resqpy.olio.vector_utilities as vec\n'), ((18041, 18083), 'numpy.min', 'np.min', (['corner_points[:, :, :, :, :, :, 0]'], {}), '(corner_points[:, :, :, :, :, :, 0])\n', (18047, 18083), True, 'import numpy as np\n'), ((18172, 18214), 'numpy.min', 'np.min', (['corner_points[:, :, :, :, :, :, 1]'], {}), '(corner_points[:, :, :, :, :, :, 1])\n', (18178, 18214), True, 'import numpy as np\n'), ((18376, 18404), 'math.floor', 'maths.floor', (['(x_sub / divisor)'], {}), '(x_sub / divisor)\n', (18387, 18404), True, 'import math as maths\n'), ((18431, 18459), 'math.floor', 'maths.floor', (['(y_sub / divisor)'], {}), '(y_sub / divisor)\n', (18442, 18459), True, 'import math as maths\n'), ((24238, 24289), 'numpy.array', 'np.array', (['[[False, True], [True, True]]'], {'dtype': 'bool'}), '([[False, True], [True, True]], dtype=bool)\n', (24246, 24289), True, 'import numpy as np\n'), ((10831, 10880), 'random.randint', 'random.randint', (['kji_border[axis]', 'kji_upper[axis]'], {}), '(kji_border[axis], kji_upper[axis])\n', (10845, 10880), False, 'import random\n'), ((13299, 13396), 'resqpy.olio.vector_utilities.manhatten_distance', 'vec.manhatten_distance', (['corner_points[sextuple_cell_a_p1]', 'corner_points[sextuple_cell_b_p1]'], {}), '(corner_points[sextuple_cell_a_p1], corner_points[\n sextuple_cell_b_p1])\n', (13321, 13396), True, 'import resqpy.olio.vector_utilities as vec\n'), ((13427, 13524), 'resqpy.olio.vector_utilities.manhatten_distance', 'vec.manhatten_distance', (['corner_points[sextuple_cell_a_p2]', 'corner_points[sextuple_cell_b_p2]'], {}), '(corner_points[sextuple_cell_a_p2], corner_points[\n sextuple_cell_b_p2])\n', (13449, 13524), True, 'import resqpy.olio.vector_utilities as vec\n'), ((13594, 13691), 'resqpy.olio.vector_utilities.manhatten_distance', 'vec.manhatten_distance', (['corner_points[sextuple_cell_a_p1]', 'corner_points[sextuple_cell_b_p2]'], {}), '(corner_points[sextuple_cell_a_p1], corner_points[\n sextuple_cell_b_p2])\n', (13616, 13691), True, 'import resqpy.olio.vector_utilities as vec\n'), ((13722, 13819), 'resqpy.olio.vector_utilities.manhatten_distance', 'vec.manhatten_distance', (['corner_points[sextuple_cell_a_p2]', 'corner_points[sextuple_cell_b_p1]'], {}), '(corner_points[sextuple_cell_a_p2], corner_points[\n sextuple_cell_b_p1])\n', (13744, 13819), True, 'import resqpy.olio.vector_utilities as vec\n'), ((16395, 16505), 'resqpy.olio.vector_utilities.manhatten_distance', 'vec.manhatten_distance', (['corner_points[0, 0, kji_cell[2], 1, 0, 0]', 'corner_points[0, 0, candidate, 0, 0, 0]'], {}), '(corner_points[0, 0, kji_cell[2], 1, 0, 0],\n corner_points[0, 0, candidate, 0, 0, 0])\n', (16417, 16505), True, 'import resqpy.olio.vector_utilities as vec\n'), ((22095, 22111), 'numpy.abs', 'np.abs', (['xy_drift'], {}), '(xy_drift)\n', (22101, 22111), True, 'import numpy as np\n'), ((24340, 24392), 'numpy.array', 'np.array', (['[[False, False], [True, True]]'], {'dtype': 'bool'}), '([[False, False], [True, True]], dtype=bool)\n', (24348, 24392), True, 'import numpy as np\n'), ((24686, 24739), 'numpy.array', 'np.array', (['[[False, True], [False, False]]'], {'dtype': 'bool'}), '([[False, True], [False, False]], dtype=bool)\n', (24694, 24739), True, 'import numpy as np\n'), ((24443, 24496), 'numpy.array', 'np.array', (['[[False, False], [True, False]]'], {'dtype': 'bool'}), '([[False, False], [True, False]], dtype=bool)\n', (24451, 24496), True, 'import numpy as np\n'), ((24791, 24843), 'numpy.array', 'np.array', (['[[True, True], [False, False]]'], {'dtype': 'bool'}), '([[True, True], [False, False]], dtype=bool)\n', (24799, 24843), True, 'import numpy as np\n'), ((25134, 25187), 'numpy.array', 'np.array', (['[[True, False], [False, False]]'], {'dtype': 'bool'}), '([[True, False], [False, False]], dtype=bool)\n', (25142, 25187), True, 'import numpy as np\n'), ((8546, 8562), 'numpy.zeros', 'np.zeros', (['(3, 8)'], {}), '((3, 8))\n', (8554, 8562), True, 'import numpy as np\n'), ((24894, 24945), 'numpy.array', 'np.array', (['[[True, True], [True, False]]'], {'dtype': 'bool'}), '([[True, True], [True, False]], dtype=bool)\n', (24902, 24945), True, 'import numpy as np\n'), ((25238, 25290), 'numpy.array', 'np.array', (['[[True, False], [True, False]]'], {'dtype': 'bool'}), '([[True, False], [True, False]], dtype=bool)\n', (25246, 25290), True, 'import numpy as np\n'), ((25582, 25633), 'numpy.array', 'np.array', (['[[True, True], [False, True]]'], {'dtype': 'bool'}), '([[True, True], [False, True]], dtype=bool)\n', (25590, 25633), True, 'import numpy as np\n'), ((19452, 19476), 'numpy.mean', 'np.mean', (['cp'], {'axis': '(1, 2)'}), '(cp, axis=(1, 2))\n', (19459, 19476), True, 'import numpy as np\n'), ((19586, 19610), 'numpy.mean', 'np.mean', (['cp'], {'axis': '(0, 2)'}), '(cp, axis=(0, 2))\n', (19593, 19610), True, 'import numpy as np\n'), ((19720, 19744), 'numpy.mean', 'np.mean', (['cp'], {'axis': '(0, 1)'}), '(cp, axis=(0, 1))\n', (19727, 19744), True, 'import numpy as np\n'), ((25341, 25392), 'numpy.array', 'np.array', (['[[True, False], [True, True]]'], {'dtype': 'bool'}), '([[True, False], [True, True]], dtype=bool)\n', (25349, 25392), True, 'import numpy as np\n'), ((25685, 25737), 'numpy.array', 'np.array', (['[[False, True], [False, True]]'], {'dtype': 'bool'}), '([[False, True], [False, True]], dtype=bool)\n', (25693, 25737), True, 'import numpy as np\n'), ((25788, 25841), 'numpy.array', 'np.array', (['[[False, False], [False, True]]'], {'dtype': 'bool'}), '([[False, False], [False, True]], dtype=bool)\n', (25796, 25841), True, 'import numpy as np\n')] |
import pytz
import pickle
from base64 import b64encode
from datetime import datetime
import numpy as np
from .base import db
from sqlalchemy.orm import relationship
paris = pytz.timezone("Europe/Paris")
np.set_printoptions(4)
class Record(db.Model):
__tablename__ = "records"
id = db.Column(db.Integer, primary_key=True)
session_id = db.Column(db.Integer, db.ForeignKey("sessions.id"))
part_id = db.Column(db.Integer, db.ForeignKey("session_parts.id"))
sender_name = db.Column(db.String(100))
sender_uuid = db.Column(db.String(100))
sender_ip = db.Column(db.String(15))
question_nb = db.Column(db.Integer)
# question_name = db.Column(db.String(100))
time = db.Column(db.DateTime, default=datetime.utcnow)
type = db.Column(db.String(20))
data = db.Column(db.BLOB)
session = relationship("Session", foreign_keys=session_id, back_populates="records")
part = relationship("SessionPart", foreign_keys=part_id, back_populates="records")
def to_dict(self):
# fields = ['id', 'session_id', 'sender_name', 'sender_ip', 'question_nb', 'question_name', 'f_time', 'type', 'f_data']
fields = [
"id",
"session_id",
"sender_name",
"sender_uuid",
"sender_ip",
"question_nb",
"f_time",
"type",
"f_data",
]
return {f: getattr(self, f) for f in fields}
def format_data(self):
data = "Not Supported"
try:
if self.type == "list":
data = pickle.loads(self.data)
elif self.type == "dict":
data = pickle.loads(self.data)
elif self.type == "ndarray":
data = format_array(pickle.loads(self.data))
elif self.type == "str":
data = self.data.decode("utf-8")
elif self.type == "code":
data = self.data.decode("utf-8")
elif self.type == "image":
data = b64encode(self.data).decode()
except Exception as e:
data = "Data could not be loaded"
print(e)
return data
@property
def f_data(self):
return self.format_data()
@property
def f_time(self):
# date = pytz.utc.localize(self.time).astimezone(paris)
# return date.strftime("%d/%m/%Y %H:%M")
return self.time.timestamp() * 1000
def format_array(a):
if a.size == 1:
return str(a.item())
if a.ndim <= 2:
lines = str(a).replace("[", "").replace("]", "").splitlines()
rv = [r"\begin{bmatrix}"]
rv += [" " + " & ".join(l.split()) + r"\\" for l in lines]
rv += [r"\end{bmatrix}"]
return "\n".join(rv)
return r"\begin{bmatrix}" + \
"\n".join([format_array(a_i) + r"\\" for a_i in a]) + \
r"\end{bmatrix}"
def format_array_old(a):
import re
repl = lambda s: " & ".join(re.sub(" +", " ", s.group()).split(" "))
out = (
re.sub(r"(\d+(?:\.\d+)?)( +(\d+(?:\.\d+)?))*", repl, str(a))
.replace("[", r"\begin{bmatrix} ")
.replace("]", r" \end{bmatrix} \\ ")
)
return out
| [
"sqlalchemy.orm.relationship",
"pytz.timezone",
"base64.b64encode",
"pickle.loads",
"numpy.set_printoptions"
] | [((176, 205), 'pytz.timezone', 'pytz.timezone', (['"""Europe/Paris"""'], {}), "('Europe/Paris')\n", (189, 205), False, 'import pytz\n'), ((206, 228), 'numpy.set_printoptions', 'np.set_printoptions', (['(4)'], {}), '(4)\n', (225, 228), True, 'import numpy as np\n'), ((833, 907), 'sqlalchemy.orm.relationship', 'relationship', (['"""Session"""'], {'foreign_keys': 'session_id', 'back_populates': '"""records"""'}), "('Session', foreign_keys=session_id, back_populates='records')\n", (845, 907), False, 'from sqlalchemy.orm import relationship\n'), ((919, 994), 'sqlalchemy.orm.relationship', 'relationship', (['"""SessionPart"""'], {'foreign_keys': 'part_id', 'back_populates': '"""records"""'}), "('SessionPart', foreign_keys=part_id, back_populates='records')\n", (931, 994), False, 'from sqlalchemy.orm import relationship\n'), ((1574, 1597), 'pickle.loads', 'pickle.loads', (['self.data'], {}), '(self.data)\n', (1586, 1597), False, 'import pickle\n'), ((1659, 1682), 'pickle.loads', 'pickle.loads', (['self.data'], {}), '(self.data)\n', (1671, 1682), False, 'import pickle\n'), ((1760, 1783), 'pickle.loads', 'pickle.loads', (['self.data'], {}), '(self.data)\n', (1772, 1783), False, 'import pickle\n'), ((2020, 2040), 'base64.b64encode', 'b64encode', (['self.data'], {}), '(self.data)\n', (2029, 2040), False, 'from base64 import b64encode\n')] |
# Code adapted from https://github.com/DLR-RM/rl-baselines3-zoo
# it requires stable-baselines3 to be installed
# Colab Notebook: https://colab.research.google.com/github/Stable-Baselines-Team/rl-colab-notebooks/blob/sb3/pybullet.ipynb
# You can run it using: python -m pybullet_envs.stable_baselines.enjoy --algo td3 --env HalfCheetahBulletEnv-v0
# Author: <NAME>
# MIT License
import os
import time
import argparse
import gym
import numpy as np
import pybullet_envs
from stable_baselines3 import SAC, TD3
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"Enjoy an RL agent trained using Stable Baselines3"
)
parser.add_argument(
"--algo",
help="RL Algorithm (Soft Actor-Critic by default)",
default="sac",
type=str,
required=False,
choices=["sac", "td3"],
)
parser.add_argument(
"--env", type=str, default="HalfCheetahBulletEnv-v0", help="environment ID"
)
parser.add_argument(
"-n", "--n-episodes", help="Number of episodes", default=5, type=int
)
parser.add_argument(
"--no-render",
action="store_true",
default=False,
help="Do not render the environment",
)
parser.add_argument(
"--load-best",
action="store_true",
default=False,
help="Load best model instead of last model if available",
)
args = parser.parse_args()
env_id = args.env
# Create an env similar to the training env
env = gym.make(env_id)
# Enable GUI
if not args.no_render:
env.render(mode="human")
algo = {
"sac": SAC,
"td3": TD3,
}[args.algo]
# We assume that the saved model is in the same folder
save_path = f"{args.algo}_{env_id}.zip"
if not os.path.isfile(save_path) or args.load_best:
print("Loading best model")
# Try to load best model
save_path = os.path.join(f"{args.algo}_{env_id}", "best_model.zip")
# Load the saved model
model = algo.load(save_path, env=env)
try:
# Use deterministic actions for evaluation
episode_rewards, episode_lengths = [], []
for _ in range(args.n_episodes):
obs = env.reset()
done = False
episode_reward = 0.0
episode_length = 0
while not done:
action, _ = model.predict(obs, deterministic=True)
obs, reward, done, _info = env.step(action)
episode_reward += reward
episode_length += 1
if not args.no_render:
env.render(mode="human")
dt = 1.0 / 240.0
time.sleep(dt)
episode_rewards.append(episode_reward)
episode_lengths.append(episode_length)
print(
f"Episode {len(episode_rewards)} reward={episode_reward}, length={episode_length}"
)
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
mean_len, std_len = np.mean(episode_lengths), np.std(episode_lengths)
print("==== Results ====")
print(f"Episode_reward={mean_reward:.2f} +/- {std_reward:.2f}")
print(f"Episode_length={mean_len:.2f} +/- {std_len:.2f}")
except KeyboardInterrupt:
pass
# Close process
env.close()
| [
"numpy.mean",
"argparse.ArgumentParser",
"os.path.join",
"time.sleep",
"os.path.isfile",
"numpy.std",
"gym.make"
] | [((551, 627), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Enjoy an RL agent trained using Stable Baselines3"""'], {}), "('Enjoy an RL agent trained using Stable Baselines3')\n", (574, 627), False, 'import argparse\n'), ((1508, 1524), 'gym.make', 'gym.make', (['env_id'], {}), '(env_id)\n', (1516, 1524), False, 'import gym\n'), ((1924, 1979), 'os.path.join', 'os.path.join', (['f"""{args.algo}_{env_id}"""', '"""best_model.zip"""'], {}), "(f'{args.algo}_{env_id}', 'best_model.zip')\n", (1936, 1979), False, 'import os\n'), ((2967, 2991), 'numpy.mean', 'np.mean', (['episode_rewards'], {}), '(episode_rewards)\n', (2974, 2991), True, 'import numpy as np\n'), ((3013, 3036), 'numpy.std', 'np.std', (['episode_rewards'], {}), '(episode_rewards)\n', (3019, 3036), True, 'import numpy as np\n'), ((1790, 1815), 'os.path.isfile', 'os.path.isfile', (['save_path'], {}), '(save_path)\n', (1804, 1815), False, 'import os\n'), ((3066, 3090), 'numpy.mean', 'np.mean', (['episode_lengths'], {}), '(episode_lengths)\n', (3073, 3090), True, 'import numpy as np\n'), ((3092, 3115), 'numpy.std', 'np.std', (['episode_lengths'], {}), '(episode_lengths)\n', (3098, 3115), True, 'import numpy as np\n'), ((2695, 2709), 'time.sleep', 'time.sleep', (['dt'], {}), '(dt)\n', (2705, 2709), False, 'import time\n')] |
from datetime import date, datetime
import numpy as np
def get_season(now):
Y = 2000 # dummy leap year to allow input X-02-29 (leap day)
seasons = [('winter', (date(Y, 1, 1), date(Y, 3, 20))),
('spring', (date(Y, 3, 21), date(Y, 6, 20))),
('summer', (date(Y, 6, 21), date(Y, 9, 22))),
('autumn', (date(Y, 9, 23), date(Y, 12, 20))),
('winter', (date(Y, 12, 21), date(Y, 12, 31)))]
if isinstance(now, datetime):
now = now.date()
now = now.replace(year=Y)
return next(season for season, (start, end) in seasons
if start <= now <= end)
def findZeroTempDay(arr):
csd = 0 #consecutive subzero days
fsdos = np.nan #first subzero day of seq
for t in range(int(np.round(arr.shape[0]/2)), arr.shape[0]):
if arr[t] < 0:
if csd == 0:
fsdos = t
csd += 1
else:
fsdos = np.nan
csd = 0
if csd >= 5:
return fsdos | [
"datetime.date",
"numpy.round"
] | [((779, 805), 'numpy.round', 'np.round', (['(arr.shape[0] / 2)'], {}), '(arr.shape[0] / 2)\n', (787, 805), True, 'import numpy as np\n'), ((169, 182), 'datetime.date', 'date', (['Y', '(1)', '(1)'], {}), '(Y, 1, 1)\n', (173, 182), False, 'from datetime import date, datetime\n'), ((187, 201), 'datetime.date', 'date', (['Y', '(3)', '(20)'], {}), '(Y, 3, 20)\n', (191, 201), False, 'from datetime import date, datetime\n'), ((233, 247), 'datetime.date', 'date', (['Y', '(3)', '(21)'], {}), '(Y, 3, 21)\n', (237, 247), False, 'from datetime import date, datetime\n'), ((251, 265), 'datetime.date', 'date', (['Y', '(6)', '(20)'], {}), '(Y, 6, 20)\n', (255, 265), False, 'from datetime import date, datetime\n'), ((297, 311), 'datetime.date', 'date', (['Y', '(6)', '(21)'], {}), '(Y, 6, 21)\n', (301, 311), False, 'from datetime import date, datetime\n'), ((315, 329), 'datetime.date', 'date', (['Y', '(9)', '(22)'], {}), '(Y, 9, 22)\n', (319, 329), False, 'from datetime import date, datetime\n'), ((361, 375), 'datetime.date', 'date', (['Y', '(9)', '(23)'], {}), '(Y, 9, 23)\n', (365, 375), False, 'from datetime import date, datetime\n'), ((379, 394), 'datetime.date', 'date', (['Y', '(12)', '(20)'], {}), '(Y, 12, 20)\n', (383, 394), False, 'from datetime import date, datetime\n'), ((425, 440), 'datetime.date', 'date', (['Y', '(12)', '(21)'], {}), '(Y, 12, 21)\n', (429, 440), False, 'from datetime import date, datetime\n'), ((443, 458), 'datetime.date', 'date', (['Y', '(12)', '(31)'], {}), '(Y, 12, 31)\n', (447, 458), False, 'from datetime import date, datetime\n')] |
import argparse
import datetime
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
plt.style.use("./Styles/Scientific.mplstyle")
from typing import Dict, List
import data
import filters
import utilities
import utm
def filter_aps(data_config: data.DataConfiguration, \
filter_config: filters.FilterConfiguration):
"""
"""
data = pd.read_csv(data_config.input)
# Extract relevant data for filtering.
aps_time = data["Epoch"].to_numpy()
aps_data = np.stack([ data["UTM Northing"], data["UTM Easting"], \
data["Depth"] ])
filter_config.sample_frequency = \
1 / np.mean(aps_time[1:] - aps_time[0:-1])
# Add end values.
filtered_aps_data = filters.add_appendage(aps_data.copy(), \
filter_config)
# Filter data and account for time delay.
filtered_aps_data, filter_delay = filters.FIR_filter( \
filtered_aps_data, filter_config, axis=1)
filtered_aps_time = aps_time - filter_delay
print("\nAPS:")
print(" - Sampling time: {0:.4f}".format( \
1 / filter_config.sample_frequency))
print(" - Sampling frequency: {0:.4f}".format( \
filter_config.sample_frequency))
print(" - Filter time delay: {0:.4f}".format(filter_delay))
# Remove end values.
filtered_aps_data = filters.remove_appendage(filtered_aps_data, \
filter_config)
filtered_data = pd.DataFrame()
filtered_data["Epoch"] = filtered_aps_time
filtered_data["UTM Northing"] = filtered_aps_data[0]
filtered_data["UTM Easting"] = filtered_aps_data[1]
filtered_data["Depth"] = filtered_aps_data[2]
filtered_data["UTM Zone"] = data["UTM Zone"]
filtered_data["UTM Hemisphere"] = data["UTM Hemisphere"]
# Latitude / longitude calculations.
latitudes, longitudes = [], []
for northing, easting, zone, hemisphere in \
zip(filtered_data["UTM Northing"], filtered_data["UTM Easting"], \
filtered_data["UTM Zone"], filtered_data["UTM Hemisphere"]):
latitude, longitude = utm.UtmToLatLon(easting, northing, zone, \
hemisphere)
latitudes.append(latitude)
longitudes.append(longitude)
filtered_data["Latitude"] = np.array(latitudes, dtype=float)
filtered_data["Longitude"] = np.array(longitudes, dtype=float)
# Datetime calculations.
times = []
for epoch in filtered_data["Epoch"]:
time = datetime.datetime.fromtimestamp(epoch).strftime(
data_config.datetime_format)
times.append(time)
filtered_data["Datetime"] = np.array(times, dtype=str)
if data_config.save_output:
filtered_data = pd.DataFrame(filtered_data)
filtered_data.to_csv(data_config.output + "ROV-APS.csv", sep=',')
def main():
# Parse arguments.
parser = argparse.ArgumentParser( \
description="Filter APS data with a FIR lowpass filter.")
parser.add_argument("input", type=str, help="Input file path.")
parser.add_argument("output", type=str, help="Output directory path.")
parser.add_argument("order", type=int, help="Filter order.")
parser.add_argument("cutoff", type=float, help="Filter cutoff.")
parser.add_argument("appendage", type=int, help="Filter appendage.")
parser.add_argument('--show_figures', type=bool, default=False, \
help= "Show figures.", action=argparse.BooleanOptionalAction)
parser.add_argument('--save_figures', type=bool, default=False, \
help= "Save figures.", action=argparse.BooleanOptionalAction)
parser.add_argument('--save_output', type=bool, default=False, \
help= "Save output.", action=argparse.BooleanOptionalAction)
args = parser.parse_args()
# Data configuration.
data_config = data.DataConfiguration(args.input, args.output, \
args.show_figures, args.save_figures, args.save_output)
# Filter configuration.
filter_config = filters.FilterConfiguration(args.order, args.cutoff, \
args.appendage)
# Filter data.
filter_aps(data_config, filter_config)
if __name__ == "__main__":
main()
| [
"data.DataConfiguration",
"numpy.mean",
"filters.FilterConfiguration",
"utm.UtmToLatLon",
"datetime.datetime.fromtimestamp",
"filters.FIR_filter",
"pandas.read_csv",
"matplotlib.use",
"filters.remove_appendage",
"argparse.ArgumentParser",
"matplotlib.pyplot.style.use",
"numpy.stack",
"numpy.... | [((89, 112), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (103, 112), False, 'import matplotlib\n'), ((145, 190), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""./Styles/Scientific.mplstyle"""'], {}), "('./Styles/Scientific.mplstyle')\n", (158, 190), True, 'import matplotlib.pyplot as plt\n'), ((409, 439), 'pandas.read_csv', 'pd.read_csv', (['data_config.input'], {}), '(data_config.input)\n', (420, 439), True, 'import pandas as pd\n'), ((539, 607), 'numpy.stack', 'np.stack', (["[data['UTM Northing'], data['UTM Easting'], data['Depth']]"], {}), "([data['UTM Northing'], data['UTM Easting'], data['Depth']])\n", (547, 607), True, 'import numpy as np\n'), ((906, 966), 'filters.FIR_filter', 'filters.FIR_filter', (['filtered_aps_data', 'filter_config'], {'axis': '(1)'}), '(filtered_aps_data, filter_config, axis=1)\n', (924, 966), False, 'import filters\n'), ((1354, 1412), 'filters.remove_appendage', 'filters.remove_appendage', (['filtered_aps_data', 'filter_config'], {}), '(filtered_aps_data, filter_config)\n', (1378, 1412), False, 'import filters\n'), ((1444, 1458), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1456, 1458), True, 'import pandas as pd\n'), ((2256, 2288), 'numpy.array', 'np.array', (['latitudes'], {'dtype': 'float'}), '(latitudes, dtype=float)\n', (2264, 2288), True, 'import numpy as np\n'), ((2322, 2355), 'numpy.array', 'np.array', (['longitudes'], {'dtype': 'float'}), '(longitudes, dtype=float)\n', (2330, 2355), True, 'import numpy as np\n'), ((2611, 2637), 'numpy.array', 'np.array', (['times'], {'dtype': 'str'}), '(times, dtype=str)\n', (2619, 2637), True, 'import numpy as np\n'), ((2846, 2932), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Filter APS data with a FIR lowpass filter."""'}), "(description=\n 'Filter APS data with a FIR lowpass filter.')\n", (2869, 2932), False, 'import argparse\n'), ((3783, 3891), 'data.DataConfiguration', 'data.DataConfiguration', (['args.input', 'args.output', 'args.show_figures', 'args.save_figures', 'args.save_output'], {}), '(args.input, args.output, args.show_figures, args.\n save_figures, args.save_output)\n', (3805, 3891), False, 'import data\n'), ((3946, 4014), 'filters.FilterConfiguration', 'filters.FilterConfiguration', (['args.order', 'args.cutoff', 'args.appendage'], {}), '(args.order, args.cutoff, args.appendage)\n', (3973, 4014), False, 'import filters\n'), ((671, 709), 'numpy.mean', 'np.mean', (['(aps_time[1:] - aps_time[0:-1])'], {}), '(aps_time[1:] - aps_time[0:-1])\n', (678, 709), True, 'import numpy as np\n'), ((2083, 2135), 'utm.UtmToLatLon', 'utm.UtmToLatLon', (['easting', 'northing', 'zone', 'hemisphere'], {}), '(easting, northing, zone, hemisphere)\n', (2098, 2135), False, 'import utm\n'), ((2695, 2722), 'pandas.DataFrame', 'pd.DataFrame', (['filtered_data'], {}), '(filtered_data)\n', (2707, 2722), True, 'import pandas as pd\n'), ((2461, 2499), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['epoch'], {}), '(epoch)\n', (2492, 2499), False, 'import datetime\n')] |
import keras
import numpy as np
class ActivationLogger(keras.callbacks.Callback):
def set_model(self, model):
self.model = model
layer_outputs = [layer.output for layer in model.layers]
self.activations_model = keras.models.Model(model.input,
layer_outputs)
def on_epoch_end(self, epoch, logs=None):
if self.validation_data is None:
raise RuntimeError('Requires validation_data.')
validation_sample = self.validation_data[0][0:1]
activations = self.activations_model.predict(validation_sample)
f = open('activations_at_epoch_' + str(epoch) + '.npz', 'w')
np.savez(f, activations)
f.close()
| [
"numpy.savez",
"keras.models.Model"
] | [((240, 286), 'keras.models.Model', 'keras.models.Model', (['model.input', 'layer_outputs'], {}), '(model.input, layer_outputs)\n', (258, 286), False, 'import keras\n'), ((693, 717), 'numpy.savez', 'np.savez', (['f', 'activations'], {}), '(f, activations)\n', (701, 717), True, 'import numpy as np\n')] |
import numpy as np
import os
from IPython import embed
import subprocess
from tqdm import tqdm, trange
import sys
import json
from PIL import Image
import argparse
from collections import namedtuple
import cv2
davis_path = "../databases/DAVIS2017/JPEGImages/1080p"
# gt_path = '../databases/DAVIS2017/Annotations/1080p'
# resolution = "480p"
resolution = "1080p"
# challenge_path = "../databases/test-challenge/"+resolution
# np_masks = 'results/np_masks_davis/'
# pred_path = 'results/png_images_davis/'
# pred_path = 'results/png_images_test_challenge_'+resolution+'/'
# np_masks = 'results/np_masks_test_challenge/'+resolution
np_masks = 'results/np_masks_test_dev/'+resolution
pred_path = 'results/png_images_test_dev_'+resolution+'/'
# img_dir = '/content/drive/My Drive/MASTER/TFM/RVOS/DAVIS_IMGS/davis_imgs/'
frame = 0
PALETTE = [0, 0, 0, 128, 0, 0, 0, 128, 0, 128, 128, 0, 0, 0, 128, 128, 0, 128, 0, 128, 128, 128, 128, 128, 64,
0, 0, 191, 0, 0, 64, 128, 0, 191, 128, 0, 64, 0, 128]
def intersection_over_unit(target, prediction):
# https://www.jeremyjordan.me/evaluating-image-segmentation-models/
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / np.sum(union)
# return the intersection over union value
return iou_score
def intersection_over_object(target, prediction):
# https://www.jeremyjordan.me/evaluating-image-segmentation-models/
intersection = np.logical_and(target, prediction)
# union = np.logical_or(target, prediction)
io_object = np.sum(intersection) / np.sum(prediction == 1)
# TODO: np.sum(intersection) / prediction --> ELIMINAR PREDICTION MASK
# return the intersection over union value
return io_object
def NMS(masks):
NMS_masks = []
for index, current_mask in enumerate(tqdm(masks)):
# iou
if index == 0:
NMS_masks.append(current_mask)
else:
discart = False
for saved_mask in NMS_masks:
iou = intersection_over_unit(saved_mask, current_mask)
io_object = intersection_over_object(saved_mask, current_mask)
if iou > 0.5 or io_object > 0.5:
# if iou > 0.3:
# print("IM GONNA BREAK")
discart = True
if not discart:
NMS_masks.append(current_mask)
return NMS_masks
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gs", action="store_true", default=False, help="Enable to compute global statistics")
args = parser.parse_args()
z = 0
# for seq in tqdm(sorted(os.listdir(davis_path))):
for seq in tqdm(sorted(os.listdir(np_masks))):
# seq = "blackswan"
# if z == 1:
# break
# masks = top.get_field('mask')
masks = np.load(os.path.join(np_masks, seq, 'mask.npy'))
masks = masks.squeeze(1)
k, h, w = masks.shape
# import matplotlib.pyplot as plt
# mask_pred = (np.squeeze(masks[1, :, :]))
# mask_pred = np.reshape(mask_pred, (h, w))
# print(mask_pred.shape)
# print(np.unique(mask_pred))
# plt.imsave('test.png', mask_pred)
masks = NMS(masks)
prediction_png = np.zeros((h, w))
for i in reversed(range(len(masks))):
# set the current k value
current_k = i+1
# change ones for actual k value
prediction_png[np.array(masks[i]) == 1] = current_k
# aux_path = os.path.join('/content/drive/My Drive/MASTER/TFM/RVOS/DAVIS_IMGS/palette_preds/', seq)
aux_path = os.path.join(pred_path, seq)
os.makedirs(aux_path, exist_ok=True)
a = Image.fromarray(prediction_png.astype(np.uint8), mode="P")
a.putpalette(PALETTE)
a.save(os.path.join(aux_path, '00000.png'))
z += 1
| [
"os.listdir",
"argparse.ArgumentParser",
"numpy.logical_and",
"os.makedirs",
"tqdm.tqdm",
"os.path.join",
"numpy.logical_or",
"numpy.sum",
"numpy.zeros",
"numpy.array"
] | [((1151, 1185), 'numpy.logical_and', 'np.logical_and', (['target', 'prediction'], {}), '(target, prediction)\n', (1165, 1185), True, 'import numpy as np\n'), ((1198, 1231), 'numpy.logical_or', 'np.logical_or', (['target', 'prediction'], {}), '(target, prediction)\n', (1211, 1231), True, 'import numpy as np\n'), ((1497, 1531), 'numpy.logical_and', 'np.logical_and', (['target', 'prediction'], {}), '(target, prediction)\n', (1511, 1531), True, 'import numpy as np\n'), ((2500, 2525), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2523, 2525), False, 'import argparse\n'), ((1248, 1268), 'numpy.sum', 'np.sum', (['intersection'], {}), '(intersection)\n', (1254, 1268), True, 'import numpy as np\n'), ((1271, 1284), 'numpy.sum', 'np.sum', (['union'], {}), '(union)\n', (1277, 1284), True, 'import numpy as np\n'), ((1596, 1616), 'numpy.sum', 'np.sum', (['intersection'], {}), '(intersection)\n', (1602, 1616), True, 'import numpy as np\n'), ((1619, 1642), 'numpy.sum', 'np.sum', (['(prediction == 1)'], {}), '(prediction == 1)\n', (1625, 1642), True, 'import numpy as np\n'), ((1868, 1879), 'tqdm.tqdm', 'tqdm', (['masks'], {}), '(masks)\n', (1872, 1879), False, 'from tqdm import tqdm, trange\n'), ((3337, 3353), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (3345, 3353), True, 'import numpy as np\n'), ((3703, 3731), 'os.path.join', 'os.path.join', (['pred_path', 'seq'], {}), '(pred_path, seq)\n', (3715, 3731), False, 'import os\n'), ((3740, 3776), 'os.makedirs', 'os.makedirs', (['aux_path'], {'exist_ok': '(True)'}), '(aux_path, exist_ok=True)\n', (3751, 3776), False, 'import os\n'), ((2762, 2782), 'os.listdir', 'os.listdir', (['np_masks'], {}), '(np_masks)\n', (2772, 2782), False, 'import os\n'), ((2919, 2958), 'os.path.join', 'os.path.join', (['np_masks', 'seq', '"""mask.npy"""'], {}), "(np_masks, seq, 'mask.npy')\n", (2931, 2958), False, 'import os\n'), ((3894, 3929), 'os.path.join', 'os.path.join', (['aux_path', '"""00000.png"""'], {}), "(aux_path, '00000.png')\n", (3906, 3929), False, 'import os\n'), ((3538, 3556), 'numpy.array', 'np.array', (['masks[i]'], {}), '(masks[i])\n', (3546, 3556), True, 'import numpy as np\n')] |
# Rao-Blackwellised particle filtering for jump markov linear systems
# Based on: https://github.com/probml/pmtk3/blob/master/demos/rbpfManeuverDemo.m
# Author: <NAME> (@gerdm)
# !pip install matplotlib==3.4.2
import jax
import numpy as np
import jax.numpy as jnp
import seaborn as sns
import matplotlib.pyplot as plt
import mixture_kalman_filter_lib as kflib
import pyprobml_utils as pml
from jax import random
from mpl_toolkits.mplot3d import Axes3D
from functools import partial
from sklearn.preprocessing import OneHotEncoder
from jax.scipy.special import logit
from numpy import linalg
plt.rcParams["axes.spines.right"] = False
plt.rcParams["axes.spines.top"] = False
def plot_3d_belief_state(mu_hist, dim, ax, skip=3, npoints=2000, azimuth=-30, elevation=30):
nsteps = len(mu_hist)
xmin, xmax = mu_hist[..., dim].min(), mu_hist[..., dim].max()
xrange = jnp.linspace(xmin, xmax, npoints).reshape(-1, 1)
res = np.apply_along_axis(lambda X: pml.kdeg(xrange, X[..., None], 0.5), 1, mu_hist)
densities = res[..., dim]
for t in range(0, nsteps, skip):
tloc = t * np.ones(npoints)
px = densities[t]
ax.plot(tloc, xrange, px, c="tab:blue", linewidth=1)
ax.set_zlim(0, 1)
pml.style3d(ax, 1.8, 1.2, 0.7, 0.8)
ax.view_init(elevation, azimuth)
ax.set_xlabel(r"$t$", fontsize=13)
ax.set_ylabel(r"$x_{"f"d={dim}"",t}$", fontsize=13)
ax.set_zlabel(r"$p(x_{d, t} \vert y_{1:t})$", fontsize=13)
TT = 0.1
A = jnp.array([[1, TT, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, TT],
[0, 0, 0, 1]])
B1 = jnp.array([0, 0, 0, 0])
B2 = jnp.array([-1.225, -0.35, 1.225, 0.35])
B3 = jnp.array([1.225, 0.35, -1.225, -0.35])
B = jnp.stack([B1, B2, B3], axis=0)
Q = 0.2 * jnp.eye(4)
R = 10 * jnp.diag(jnp.array([2, 1, 2, 1]))
C = jnp.eye(4)
transition_matrix = jnp.array([
[0.9, 0.05, 0.05],
[0.05, 0.9, 0.05],
[0.05, 0.05, 0.9]
])
transition_matrix = jnp.array([
[0.8, 0.1, 0.1],
[0.1, 0.8, 0.1],
[0.1, 0.1, 0.8]
])
params = kflib.RBPFParamsDiscrete(A, B, C, Q, R, transition_matrix)
nparticles = 1000
nsteps = 100
key = random.PRNGKey(1)
keys = random.split(key, nsteps)
x0 = (1, random.multivariate_normal(key, jnp.zeros(4), jnp.eye(4)))
draw_state_fixed = partial(kflib.draw_state, params=params)
# Create target dataset
_, (latent_hist, state_hist, obs_hist) = jax.lax.scan(draw_state_fixed, x0, keys)
# Perform filtering
key_base = random.PRNGKey(31)
key_mean_init, key_sample, key_state, key_next = random.split(key_base, 4)
p_init = jnp.array([0.0, 1.0, 0.0])
# Initial filter configuration
mu_0 = 0.01 * random.normal(key_mean_init, (nparticles, 4))
mu_0 = 0.01 * random.normal(key_mean_init, (nparticles, 4))
Sigma_0 = jnp.zeros((nparticles, 4,4))
s0 = random.categorical(key_state, logit(p_init), shape=(nparticles,))
weights_0 = jnp.ones(nparticles) / nparticles
init_config = (key_next, mu_0, Sigma_0, weights_0, s0)
rbpf_optimal_part = partial(kflib.rbpf_optimal, params=params, nparticles=nparticles)
_, (mu_hist, Sigma_hist, weights_hist, s_hist, Ptk) = jax.lax.scan(rbpf_optimal_part, init_config, obs_hist)
mu_hist_post_mean = jnp.einsum("ts,tsm->tm", weights_hist, mu_hist)
# Plot target dataset
color_dict = {0: "tab:green", 1: "tab:red", 2: "tab:blue"}
fig, ax = plt.subplots()
color_states_org = [color_dict[state] for state in latent_hist]
ax.scatter(*state_hist[:, [0, 2]].T, c="none", edgecolors=color_states_org, s=10)
ax.scatter(*obs_hist[:, [0, 2]].T, s=5, c="black", alpha=0.6)
ax.set_title("Data")
pml.savefig("rbpf-maneuver-data.pdf")
# Plot filtered dataset
fig, ax = plt.subplots()
rbpf_mse = ((mu_hist_post_mean - state_hist)[:, [0, 2]] ** 2).mean(axis=0).sum()
latent_hist_est = Ptk.mean(axis=1).argmax(axis=1)
color_states_est = [color_dict[state] for state in latent_hist_est]
ax.scatter(*mu_hist_post_mean[:, [0, 2]].T, c="none", edgecolors=color_states_est, s=10)
ax.set_title(f"RBPF MSE: {rbpf_mse:.2f}")
pml.savefig("rbpf-maneuver-trace.pdf")
# Plot belief state of discrete system
p_terms = Ptk.mean(axis=1)
rbpf_error_rate = (latent_hist != p_terms.argmax(axis=1)).mean()
fig, ax = plt.subplots(figsize=(2.5, 5))
sns.heatmap(p_terms, cmap="viridis", cbar=False)
plt.title(f"RBPF, error rate: {rbpf_error_rate:0.3}")
pml.savefig("rbpf-maneuver-discrete-belief.pdf")
# Plot ground truth and MAP estimate
ohe = OneHotEncoder(sparse=False)
latent_hmap = ohe.fit_transform(latent_hist[:, None])
latent_hmap_est = ohe.fit_transform(p_terms.argmax(axis=1)[:, None])
fig, ax = plt.subplots(figsize=(2.5, 5))
sns.heatmap(latent_hmap, cmap="viridis", cbar=False, ax=ax)
ax.set_title("Data")
pml.savefig("rbpf-maneuver-discrete-ground-truth.pdf")
fig, ax = plt.subplots(figsize=(2.5, 5))
sns.heatmap(latent_hmap_est, cmap="viridis", cbar=False, ax=ax)
ax.set_title(f"MAP (error rate: {rbpf_error_rate:0.4f})")
pml.savefig("rbpf-maneuver-discrete-map.pdf")
# Plot belief for state space
dims = [0, 2]
for dim in dims:
fig = plt.figure()
ax = plt.axes(projection="3d")
plot_3d_belief_state(mu_hist, dim, ax)
pml.savefig(f"rbpf-maneuver-belief-stated-dim{dim}.pdf", pad_inches=0, bbox_inches="tight")
plt.show()
| [
"jax.random.split",
"jax.random.PRNGKey",
"jax.numpy.eye",
"jax.random.normal",
"numpy.ones",
"jax.numpy.einsum",
"seaborn.heatmap",
"jax.lax.scan",
"matplotlib.pyplot.axes",
"jax.numpy.ones",
"matplotlib.pyplot.title",
"jax.numpy.stack",
"mixture_kalman_filter_lib.RBPFParamsDiscrete",
"ma... | [((1476, 1545), 'jax.numpy.array', 'jnp.array', (['[[1, TT, 0, 0], [0, 1, 0, 0], [0, 0, 1, TT], [0, 0, 0, 1]]'], {}), '([[1, TT, 0, 0], [0, 1, 0, 0], [0, 0, 1, TT], [0, 0, 0, 1]])\n', (1485, 1545), True, 'import jax.numpy as jnp\n'), ((1598, 1621), 'jax.numpy.array', 'jnp.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (1607, 1621), True, 'import jax.numpy as jnp\n'), ((1627, 1666), 'jax.numpy.array', 'jnp.array', (['[-1.225, -0.35, 1.225, 0.35]'], {}), '([-1.225, -0.35, 1.225, 0.35])\n', (1636, 1666), True, 'import jax.numpy as jnp\n'), ((1672, 1711), 'jax.numpy.array', 'jnp.array', (['[1.225, 0.35, -1.225, -0.35]'], {}), '([1.225, 0.35, -1.225, -0.35])\n', (1681, 1711), True, 'import jax.numpy as jnp\n'), ((1718, 1749), 'jax.numpy.stack', 'jnp.stack', (['[B1, B2, B3]'], {'axis': '(0)'}), '([B1, B2, B3], axis=0)\n', (1727, 1749), True, 'import jax.numpy as jnp\n'), ((1819, 1829), 'jax.numpy.eye', 'jnp.eye', (['(4)'], {}), '(4)\n', (1826, 1829), True, 'import jax.numpy as jnp\n'), ((1851, 1919), 'jax.numpy.array', 'jnp.array', (['[[0.9, 0.05, 0.05], [0.05, 0.9, 0.05], [0.05, 0.05, 0.9]]'], {}), '([[0.9, 0.05, 0.05], [0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])\n', (1860, 1919), True, 'import jax.numpy as jnp\n'), ((1955, 2017), 'jax.numpy.array', 'jnp.array', (['[[0.8, 0.1, 0.1], [0.1, 0.8, 0.1], [0.1, 0.1, 0.8]]'], {}), '([[0.8, 0.1, 0.1], [0.1, 0.8, 0.1], [0.1, 0.1, 0.8]])\n', (1964, 2017), True, 'import jax.numpy as jnp\n'), ((2042, 2100), 'mixture_kalman_filter_lib.RBPFParamsDiscrete', 'kflib.RBPFParamsDiscrete', (['A', 'B', 'C', 'Q', 'R', 'transition_matrix'], {}), '(A, B, C, Q, R, transition_matrix)\n', (2066, 2100), True, 'import mixture_kalman_filter_lib as kflib\n'), ((2139, 2156), 'jax.random.PRNGKey', 'random.PRNGKey', (['(1)'], {}), '(1)\n', (2153, 2156), False, 'from jax import random\n'), ((2164, 2189), 'jax.random.split', 'random.split', (['key', 'nsteps'], {}), '(key, nsteps)\n', (2176, 2189), False, 'from jax import random\n'), ((2278, 2318), 'functools.partial', 'partial', (['kflib.draw_state'], {'params': 'params'}), '(kflib.draw_state, params=params)\n', (2285, 2318), False, 'from functools import partial\n'), ((2385, 2425), 'jax.lax.scan', 'jax.lax.scan', (['draw_state_fixed', 'x0', 'keys'], {}), '(draw_state_fixed, x0, keys)\n', (2397, 2425), False, 'import jax\n'), ((2458, 2476), 'jax.random.PRNGKey', 'random.PRNGKey', (['(31)'], {}), '(31)\n', (2472, 2476), False, 'from jax import random\n'), ((2526, 2551), 'jax.random.split', 'random.split', (['key_base', '(4)'], {}), '(key_base, 4)\n', (2538, 2551), False, 'from jax import random\n'), ((2561, 2587), 'jax.numpy.array', 'jnp.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (2570, 2587), True, 'import jax.numpy as jnp\n'), ((2750, 2779), 'jax.numpy.zeros', 'jnp.zeros', (['(nparticles, 4, 4)'], {}), '((nparticles, 4, 4))\n', (2759, 2779), True, 'import jax.numpy as jnp\n'), ((2972, 3037), 'functools.partial', 'partial', (['kflib.rbpf_optimal'], {'params': 'params', 'nparticles': 'nparticles'}), '(kflib.rbpf_optimal, params=params, nparticles=nparticles)\n', (2979, 3037), False, 'from functools import partial\n'), ((3092, 3146), 'jax.lax.scan', 'jax.lax.scan', (['rbpf_optimal_part', 'init_config', 'obs_hist'], {}), '(rbpf_optimal_part, init_config, obs_hist)\n', (3104, 3146), False, 'import jax\n'), ((3167, 3214), 'jax.numpy.einsum', 'jnp.einsum', (['"""ts,tsm->tm"""', 'weights_hist', 'mu_hist'], {}), "('ts,tsm->tm', weights_hist, mu_hist)\n", (3177, 3214), True, 'import jax.numpy as jnp\n'), ((3308, 3322), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3320, 3322), True, 'import matplotlib.pyplot as plt\n'), ((3552, 3589), 'pyprobml_utils.savefig', 'pml.savefig', (['"""rbpf-maneuver-data.pdf"""'], {}), "('rbpf-maneuver-data.pdf')\n", (3563, 3589), True, 'import pyprobml_utils as pml\n'), ((3625, 3639), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3637, 3639), True, 'import matplotlib.pyplot as plt\n'), ((3970, 4008), 'pyprobml_utils.savefig', 'pml.savefig', (['"""rbpf-maneuver-trace.pdf"""'], {}), "('rbpf-maneuver-trace.pdf')\n", (3981, 4008), True, 'import pyprobml_utils as pml\n'), ((4151, 4181), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(2.5, 5)'}), '(figsize=(2.5, 5))\n', (4163, 4181), True, 'import matplotlib.pyplot as plt\n'), ((4182, 4230), 'seaborn.heatmap', 'sns.heatmap', (['p_terms'], {'cmap': '"""viridis"""', 'cbar': '(False)'}), "(p_terms, cmap='viridis', cbar=False)\n", (4193, 4230), True, 'import seaborn as sns\n'), ((4231, 4284), 'matplotlib.pyplot.title', 'plt.title', (['f"""RBPF, error rate: {rbpf_error_rate:0.3}"""'], {}), "(f'RBPF, error rate: {rbpf_error_rate:0.3}')\n", (4240, 4284), True, 'import matplotlib.pyplot as plt\n'), ((4285, 4333), 'pyprobml_utils.savefig', 'pml.savefig', (['"""rbpf-maneuver-discrete-belief.pdf"""'], {}), "('rbpf-maneuver-discrete-belief.pdf')\n", (4296, 4333), True, 'import pyprobml_utils as pml\n'), ((4378, 4405), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (4391, 4405), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((4540, 4570), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(2.5, 5)'}), '(figsize=(2.5, 5))\n', (4552, 4570), True, 'import matplotlib.pyplot as plt\n'), ((4571, 4630), 'seaborn.heatmap', 'sns.heatmap', (['latent_hmap'], {'cmap': '"""viridis"""', 'cbar': '(False)', 'ax': 'ax'}), "(latent_hmap, cmap='viridis', cbar=False, ax=ax)\n", (4582, 4630), True, 'import seaborn as sns\n'), ((4652, 4706), 'pyprobml_utils.savefig', 'pml.savefig', (['"""rbpf-maneuver-discrete-ground-truth.pdf"""'], {}), "('rbpf-maneuver-discrete-ground-truth.pdf')\n", (4663, 4706), True, 'import pyprobml_utils as pml\n'), ((4718, 4748), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(2.5, 5)'}), '(figsize=(2.5, 5))\n', (4730, 4748), True, 'import matplotlib.pyplot as plt\n'), ((4749, 4812), 'seaborn.heatmap', 'sns.heatmap', (['latent_hmap_est'], {'cmap': '"""viridis"""', 'cbar': '(False)', 'ax': 'ax'}), "(latent_hmap_est, cmap='viridis', cbar=False, ax=ax)\n", (4760, 4812), True, 'import seaborn as sns\n'), ((4871, 4916), 'pyprobml_utils.savefig', 'pml.savefig', (['"""rbpf-maneuver-discrete-map.pdf"""'], {}), "('rbpf-maneuver-discrete-map.pdf')\n", (4882, 4916), True, 'import pyprobml_utils as pml\n'), ((5177, 5187), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5185, 5187), True, 'import matplotlib.pyplot as plt\n'), ((1230, 1265), 'pyprobml_utils.style3d', 'pml.style3d', (['ax', '(1.8)', '(1.2)', '(0.7)', '(0.8)'], {}), '(ax, 1.8, 1.2, 0.7, 0.8)\n', (1241, 1265), True, 'import pyprobml_utils as pml\n'), ((1761, 1771), 'jax.numpy.eye', 'jnp.eye', (['(4)'], {}), '(4)\n', (1768, 1771), True, 'import jax.numpy as jnp\n'), ((2634, 2679), 'jax.random.normal', 'random.normal', (['key_mean_init', '(nparticles, 4)'], {}), '(key_mean_init, (nparticles, 4))\n', (2647, 2679), False, 'from jax import random\n'), ((2694, 2739), 'jax.random.normal', 'random.normal', (['key_mean_init', '(nparticles, 4)'], {}), '(key_mean_init, (nparticles, 4))\n', (2707, 2739), False, 'from jax import random\n'), ((2814, 2827), 'jax.scipy.special.logit', 'logit', (['p_init'], {}), '(p_init)\n', (2819, 2827), False, 'from jax.scipy.special import logit\n'), ((2862, 2882), 'jax.numpy.ones', 'jnp.ones', (['nparticles'], {}), '(nparticles)\n', (2870, 2882), True, 'import jax.numpy as jnp\n'), ((4989, 5001), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4999, 5001), True, 'import matplotlib.pyplot as plt\n'), ((5011, 5036), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (5019, 5036), True, 'import matplotlib.pyplot as plt\n'), ((5084, 5179), 'pyprobml_utils.savefig', 'pml.savefig', (['f"""rbpf-maneuver-belief-stated-dim{dim}.pdf"""'], {'pad_inches': '(0)', 'bbox_inches': '"""tight"""'}), "(f'rbpf-maneuver-belief-stated-dim{dim}.pdf', pad_inches=0,\n bbox_inches='tight')\n", (5095, 5179), True, 'import pyprobml_utils as pml\n'), ((1790, 1813), 'jax.numpy.array', 'jnp.array', (['[2, 1, 2, 1]'], {}), '([2, 1, 2, 1])\n', (1799, 1813), True, 'import jax.numpy as jnp\n'), ((2232, 2244), 'jax.numpy.zeros', 'jnp.zeros', (['(4)'], {}), '(4)\n', (2241, 2244), True, 'import jax.numpy as jnp\n'), ((2246, 2256), 'jax.numpy.eye', 'jnp.eye', (['(4)'], {}), '(4)\n', (2253, 2256), True, 'import jax.numpy as jnp\n'), ((876, 909), 'jax.numpy.linspace', 'jnp.linspace', (['xmin', 'xmax', 'npoints'], {}), '(xmin, xmax, npoints)\n', (888, 909), True, 'import jax.numpy as jnp\n'), ((965, 1000), 'pyprobml_utils.kdeg', 'pml.kdeg', (['xrange', 'X[..., None]', '(0.5)'], {}), '(xrange, X[..., None], 0.5)\n', (973, 1000), True, 'import pyprobml_utils as pml\n'), ((1100, 1116), 'numpy.ones', 'np.ones', (['npoints'], {}), '(npoints)\n', (1107, 1116), True, 'import numpy as np\n')] |
import sys
import argparse
import random
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
import copy
from models import bb_net_rl, dropout_rl
import trainer
import processing
from samplers import Sampler
import torch
parser = argparse.ArgumentParser(description='Grid search')
parser.add_argument('-sn', default=1000, type=int, help='number of epochs')
parser.add_argument('-c', default='sgld', type=str, help='type of algorithms')
parser.add_argument('-lr', default=1e-6, type=float, help='learning rate')
parser.add_argument('-T', default=0, type=float, help='temperature')
parser.add_argument('-sz', default=0, type=float, help='stepsize for stochastic approximation')
parser.add_argument('-zeta', default=0, type=float, help='zeta')
parser.add_argument('-reg', default=1e-3, type=float, help='regularizer')
parser.add_argument('-wdecay', default=1e-2, type=float, help='L2 penalty')
parser.add_argument('-part', default=200, type=int, help='The number of partitions')
parser.add_argument('-div', default=2, type=float, help='Divide energy: divisor to calculate partition index')
parser.add_argument('-chains', default=1, type=int, help='Total number of chains')
parser.add_argument('-repeat', default=5, type=int, help='Total number of repeats')
parser.add_argument('-hidden', default=100, type=int, help='Number of hidden nodes')
parser.add_argument('-init', default=1024, type=int, help='burn in number of mushrooms')
parser.add_argument('-buffers', default=4096, type=int, help='buffer size')
parser.add_argument('-batch', default=512, type=int, help='Training batch size')
parser.add_argument('-trains', default=16, type=int, help='Train iterations')
parser.add_argument('-pull', default=20, type=int, help='Number of pulls')
""" hyperparameters for preconditioned SGLD """
parser.add_argument('-precondition', default=0, type=int, help='set preconditioner or not')
parser.add_argument('-preg', default=1e-3, type=float, help='regularizer for preconditioner')
parser.add_argument('-alpha', default=0.01, type=float, help='stepsize for preconditioner')
""" hyperparameters for dropout """
parser.add_argument('-rate', default=0, type=float, help='dropout rate')
parser.add_argument('-samples', default=1, type=int, help='repeating samples')
""" hyperparameters for RMSProp """
parser.add_argument('-decay', default=1.0, type=float, help='LR decay')
parser.add_argument('-epsilon', default=0, type=float, help='epsilon greedy')
""" hyperparameters for BayesBackProp """
parser.add_argument('-sigma1', default=1, type=float, help='')
parser.add_argument('-sigma2', default=1e-6, type=float, help='')
parser.add_argument('-sigma', default=0.02, type=float, help='')
parser.add_argument('-pi', default=0.5, type=float, help='')
parser.add_argument('-warm', default=0.1, type=float, help='warm up for CSGLD')
parser.add_argument('-seed', default=random.randint(1, 1e4), type=int, help='Random Seed')
parser.add_argument('-gpu', default=-1, type=int, help='Default GPU')
pars = parser.parse_args()
print(pars)
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
""" set random seeds """
torch.manual_seed(pars.seed)
torch.cuda.manual_seed(pars.seed)
np.random.seed(pars.seed)
random.seed(pars.seed)
torch.backends.cudnn.deterministic=True
try:
torch.cuda.set_device(pars.gpu)
except: # in case the device has only one GPU
torch.cuda.set_device(0)
CUDA_EXISTS = torch.cuda.is_available() and pars.gpu >= 0
print('Using GPU: {}'.format(CUDA_EXISTS))
train_X, train_y = processing.get_mushroom()
dim_input = train_X.shape[1]
dim_action_space = 2
X_train_tensor = torch.from_numpy(train_X.copy()).float().unsqueeze(dim=1)
y_train_tensor = torch.from_numpy(train_y.copy()).float()
if CUDA_EXISTS:
X_train_tensor, y_train_tensor = X_train_tensor.cuda(), y_train_tensor.cuda()
epsilons = [pars.epsilon]
regrets = np.zeros((len(epsilons), pars.sn + 1))
regrets_std = np.zeros_like(regrets)
for i, epsilon in enumerate(epsilons):
regs = np.zeros((pars.repeat, pars.sn + 1))
for run in range(pars.repeat):
print('epsilon greedy {:.3f} sample {} / {}'.format(epsilon, run + 1, pars.repeat))
nets, samplers = [], []
for _ in range(pars.chains):
if pars.c in ('sgd', 'sgld', 'csgld'):
net = trainer.DeterministicRLNet(dim_input, pars.hidden, dim_action_space)
elif pars.c == 'dropout':
net = trainer.DropoutNet(dim_input, pars.hidden, dim_action_space, p=pars.rate)
elif pars.c == 'bayesbackprop':
prior_parameters = {'sigma1': pars.sigma1, 'sigma2': pars.sigma2, 'pi': pars.pi}
net = trainer.BayesBackpropRLNet(dim_input, pars.hidden, dim_action_space, prior_parameters, sigma=pars.sigma)
if CUDA_EXISTS:
net = net.cuda()
nets.append(net)
""" ensemble net for predictions of actions """
if pars.c in ('sgd', 'sgld', 'csgld'):
agents = trainer.AgentGreedy(nets, epsilon)
rl_reg = trainer.DeterministicRLReg(X_train_tensor, y_train_tensor, agents, \
buffer_size=pars.buffers, minibatch_size=pars.batch, burn_in=pars.init)
elif pars.c == 'dropout':
agents = trainer.AgentDropout(nets, sample=pars.samples)
rl_reg = trainer.DropoutRLReg(X_train_tensor, y_train_tensor, agents, \
buffer_size=pars.buffers, minibatch_size=pars.batch, burn_in=pars.init)
elif pars.c == 'bayesbackprop':
agents = trainer.AgentBayesBackprop(nets, sample=pars.samples)
rl_reg = trainer.BayesRLReg(X_train_tensor, y_train_tensor, agents, \
buffer_size=pars.buffers, minibatch_size=pars.batch, burn_in=pars.init)
for idx in range(pars.chains):
if pars.c == 'sgd':
sampler = torch.optim.SGD(nets[idx].parameters(), lr=pars.lr, weight_decay=pars.wdecay)
elif pars.c in ('sgld', 'csgld', 'dropout', 'bayesbackprop'):
sampler = Sampler(nets[idx], pars, CUDA_EXISTS)
else:
sys.exit('Unknown algorithms.')
samplers.append(sampler)
rl_reg.train(pars.sn, pars.pull, pars.trains, pars.buffers, pars.sz, pars.warm, pars.decay, samplers, CUDA_EXISTS)
regs[run] = copy.copy(rl_reg.hist['regret'])
if pars.c == 'csgld':
print('print G function (related to PDF in energy or density of states)')
print(samplers[0].G.numpy())
regrets[i] = regs.mean(axis=0)
regrets_std[i] = regs.std(axis=0)
'''
plt.figure(figsize=(6, 4))
for epsilon, regs, regs_std in zip(epsilons, regrets, regrets_std):
plt.plot(regs, label=r"$\epsilon$ = {}".format(epsilon))
plt.fill_between(np.arange(pars.sn + 1), regs - regs_std, regs + regs_std, alpha=0.3)
plt.legend()
plt.xlabel('Step')
plt.ylabel('Cumulative regret')
plt.show()
plt.savefig('./figures/mushroom_' + pars.c + '_chains_' + str(pars.chains) + '_node_' + str(pars.hidden) + '_lr_' + str(pars.lr) + '_sz_' + str(pars.sz) \
+ '_T_' + str(pars.T) + '_l2_' + str(pars.wdecay) + '_zeta_' + str(pars.zeta) + '_sn_' + str(pars.sn) + '_pull_' + str(pars.pull) \
+ '_trains_' + str(pars.trains) + '_div_' + str(pars.div) + '_part_' + str(pars.part) + '_repeat_' + str(pars.repeat) + '_seed_' + str(pars.seed) + '.png', dpi=200)
'''
| [
"trainer.DropoutRLReg",
"processing.get_mushroom",
"torch.cuda.is_available",
"sys.exit",
"copy.copy",
"argparse.ArgumentParser",
"trainer.AgentGreedy",
"numpy.random.seed",
"trainer.AgentDropout",
"random.randint",
"trainer.DeterministicRLReg",
"samplers.Sampler",
"trainer.BayesBackpropRLNe... | [((253, 303), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Grid search"""'}), "(description='Grid search')\n", (276, 303), False, 'import argparse\n'), ((3047, 3079), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (3066, 3079), True, 'import numpy as np\n'), ((3080, 3114), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (3099, 3114), True, 'import numpy as np\n'), ((3141, 3169), 'torch.manual_seed', 'torch.manual_seed', (['pars.seed'], {}), '(pars.seed)\n', (3158, 3169), False, 'import torch\n'), ((3170, 3203), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['pars.seed'], {}), '(pars.seed)\n', (3192, 3203), False, 'import torch\n'), ((3204, 3229), 'numpy.random.seed', 'np.random.seed', (['pars.seed'], {}), '(pars.seed)\n', (3218, 3229), True, 'import numpy as np\n'), ((3230, 3252), 'random.seed', 'random.seed', (['pars.seed'], {}), '(pars.seed)\n', (3241, 3252), False, 'import random\n'), ((3532, 3557), 'processing.get_mushroom', 'processing.get_mushroom', ([], {}), '()\n', (3555, 3557), False, 'import processing\n'), ((3933, 3955), 'numpy.zeros_like', 'np.zeros_like', (['regrets'], {}), '(regrets)\n', (3946, 3955), True, 'import numpy as np\n'), ((3303, 3334), 'torch.cuda.set_device', 'torch.cuda.set_device', (['pars.gpu'], {}), '(pars.gpu)\n', (3324, 3334), False, 'import torch\n'), ((3426, 3451), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3449, 3451), False, 'import torch\n'), ((4006, 4042), 'numpy.zeros', 'np.zeros', (['(pars.repeat, pars.sn + 1)'], {}), '((pars.repeat, pars.sn + 1))\n', (4014, 4042), True, 'import numpy as np\n'), ((2884, 2910), 'random.randint', 'random.randint', (['(1)', '(10000.0)'], {}), '(1, 10000.0)\n', (2898, 2910), False, 'import random\n'), ((3385, 3409), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(0)'], {}), '(0)\n', (3406, 3409), False, 'import torch\n'), ((6355, 6387), 'copy.copy', 'copy.copy', (["rl_reg.hist['regret']"], {}), "(rl_reg.hist['regret'])\n", (6364, 6387), False, 'import copy\n'), ((4998, 5032), 'trainer.AgentGreedy', 'trainer.AgentGreedy', (['nets', 'epsilon'], {}), '(nets, epsilon)\n', (5017, 5032), False, 'import trainer\n'), ((5054, 5196), 'trainer.DeterministicRLReg', 'trainer.DeterministicRLReg', (['X_train_tensor', 'y_train_tensor', 'agents'], {'buffer_size': 'pars.buffers', 'minibatch_size': 'pars.batch', 'burn_in': 'pars.init'}), '(X_train_tensor, y_train_tensor, agents,\n buffer_size=pars.buffers, minibatch_size=pars.batch, burn_in=pars.init)\n', (5080, 5196), False, 'import trainer\n'), ((4312, 4380), 'trainer.DeterministicRLNet', 'trainer.DeterministicRLNet', (['dim_input', 'pars.hidden', 'dim_action_space'], {}), '(dim_input, pars.hidden, dim_action_space)\n', (4338, 4380), False, 'import trainer\n'), ((5274, 5321), 'trainer.AgentDropout', 'trainer.AgentDropout', (['nets'], {'sample': 'pars.samples'}), '(nets, sample=pars.samples)\n', (5294, 5321), False, 'import trainer\n'), ((5343, 5480), 'trainer.DropoutRLReg', 'trainer.DropoutRLReg', (['X_train_tensor', 'y_train_tensor', 'agents'], {'buffer_size': 'pars.buffers', 'minibatch_size': 'pars.batch', 'burn_in': 'pars.init'}), '(X_train_tensor, y_train_tensor, agents, buffer_size=\n pars.buffers, minibatch_size=pars.batch, burn_in=pars.init)\n', (5363, 5480), False, 'import trainer\n'), ((4441, 4514), 'trainer.DropoutNet', 'trainer.DropoutNet', (['dim_input', 'pars.hidden', 'dim_action_space'], {'p': 'pars.rate'}), '(dim_input, pars.hidden, dim_action_space, p=pars.rate)\n', (4459, 4514), False, 'import trainer\n'), ((5563, 5616), 'trainer.AgentBayesBackprop', 'trainer.AgentBayesBackprop', (['nets'], {'sample': 'pars.samples'}), '(nets, sample=pars.samples)\n', (5589, 5616), False, 'import trainer\n'), ((5638, 5773), 'trainer.BayesRLReg', 'trainer.BayesRLReg', (['X_train_tensor', 'y_train_tensor', 'agents'], {'buffer_size': 'pars.buffers', 'minibatch_size': 'pars.batch', 'burn_in': 'pars.init'}), '(X_train_tensor, y_train_tensor, agents, buffer_size=pars\n .buffers, minibatch_size=pars.batch, burn_in=pars.init)\n', (5656, 5773), False, 'import trainer\n'), ((6071, 6108), 'samplers.Sampler', 'Sampler', (['nets[idx]', 'pars', 'CUDA_EXISTS'], {}), '(nets[idx], pars, CUDA_EXISTS)\n', (6078, 6108), False, 'from samplers import Sampler\n'), ((6143, 6174), 'sys.exit', 'sys.exit', (['"""Unknown algorithms."""'], {}), "('Unknown algorithms.')\n", (6151, 6174), False, 'import sys\n'), ((4678, 4786), 'trainer.BayesBackpropRLNet', 'trainer.BayesBackpropRLNet', (['dim_input', 'pars.hidden', 'dim_action_space', 'prior_parameters'], {'sigma': 'pars.sigma'}), '(dim_input, pars.hidden, dim_action_space,\n prior_parameters, sigma=pars.sigma)\n', (4704, 4786), False, 'import trainer\n')] |
# Source : https://github.com/GuessWhatGame/generic/tree/master/data_provider
import os
from PIL import Image
import numpy as np
import h5py
from data_provider.image_preprocessors import resize_image, scaled_crop_and_pad
# Why doing an image builder/loader?
# Well, there are two reasons:
# - first you want to abstract the kind of image you are using (raw/conv/feature) when you are loading the dataset/batch.
# One may just want... to load an image!
# - One must optimize when to load the image for multiprocessing.
# You do not want to serialize a 2Go of fc8 features when you create a process
# You do not want to load 50Go of images at start
#
# The Builder enables to abstract the kind of image you want to load. It will be used while loading the dataset.
# The Loader enables to load/process the image when you need it. It will be used when you create the batch
#
# Enjoy design patterns, it may **this** page of code complex but the it makes the whole project easier! Act Local, Think Global :P
#
class AbstractImgBuilder(object):
def __init__(self, img_dir, is_raw, require_process=False):
self.img_dir = img_dir
self.is_raw = is_raw
self.require_process = require_process
def build(self, image_id, filename, **kwargs):
return self
def is_raw_image(self):
return self.is_raw
def require_multiprocess(self):
return self.require_process
class AbstractImgLoader(object):
def __init__(self, img_path):
self.img_path = img_path
def get_image(self, **kwargs):
pass
class DummyImgBuilder(AbstractImgBuilder, AbstractImgLoader):
def __init__(self, img_dir, size=1000):
AbstractImgBuilder.__init__(self, img_dir, is_raw=False)
self.size = size
def build(self, image_id, filename, **kwargs):
return self
def get_image(self, **kwargs):
return np.zeros(self.size)
class ErrorImgLoader(AbstractImgLoader):
def __init__(self, img_path):
AbstractImgLoader.__init__(self, img_path)
def get_image(self, **kwargs):
assert False, "The image/crop is not available in file: {}".format(self.img_path)
h5_basename="features.h5"
h5_feature_key="features"
h5_idx_key="idx2img"
class h5FeatureBuilder(AbstractImgBuilder):
def __init__(self, img_dir, bufferize):
AbstractImgBuilder.__init__(self, img_dir, is_raw=False)
self.bufferize = bufferize
self.h5files = dict()
self.img2idx = dict()
def build(self, image_id, filename, optional=True, which_set=None,**kwargs):
# Is the h5 features split into train/val/etc. files or gather into a single file
if which_set is not None:
h5filename = which_set + "_" + h5_basename
else:
h5filename = h5_basename
# Build full bath
h5filepath = os.path.join(self.img_dir,h5filename)
# Retrieve
if h5filename not in self.h5files:
# Load file pointer to h5
h5file = h5py.File(h5filepath, 'r')
# hd5 requires continuous id while image_id can be very diverse.
# We then need a mapping between both of them
if h5_idx_key in h5file:
# Retrieve id mapping from file
img2idx = {id_img : id_h5 for id_h5, id_img in enumerate(h5file[h5_idx_key])}
else:
# Assume their is a perfect identity between image_id and h5_id
no_images = h5file[h5_feature_key].shape[0]
img2idx = {k : k for k in range(no_images) }
self.h5files[h5filename] = h5file
self.img2idx[h5filename] = img2idx
else:
h5file = self.h5files[h5filename]
img2idx = self.img2idx[h5filename]
if self.bufferize:
if (optional and image_id in img2idx) or (not optional):
return h5FeatureBufloader(h5filepath, h5file=h5file, id=img2idx[image_id])
else:
return ErrorImgLoader(h5filepath)
else:
return h5FeatureLoader(h5filepath, h5file=h5file, id=img2idx[image_id])
# Load while creating batch
class h5FeatureLoader(AbstractImgLoader):
def __init__(self, img_path, h5file, id):
AbstractImgLoader.__init__(self, img_path)
self.h5file = h5file
self.id = id
def get_image(self, **kwargs):
return self.h5file[h5_feature_key][self.id]
# Load while loading dataset (requires a lot of memory)
class h5FeatureBufloader(AbstractImgLoader):
def __init__(self, img_path, h5file, id):
AbstractImgLoader.__init__(self, img_path)
self.data = h5file[h5_feature_key][id]
def get_image(self, **kwargs):
return self.data
class RawImageBuilder(AbstractImgBuilder):
def __init__(self, img_dir, width, height, channel=None):
AbstractImgBuilder.__init__(self, img_dir, is_raw=True, require_process=True)
self.width = width
self.height = height
self.channel = channel
def build(self, image_id, filename, **kwargs):
img_path = os.path.join(self.img_dir, filename)
return RawImageLoader(img_path, self.width, self.height, channel=self.channel)
class RawImageLoader(AbstractImgLoader):
def __init__(self, img_path, width, height, channel):
AbstractImgLoader.__init__(self, img_path)
self.width = width
self.height = height
self.channel = channel
def get_image(self, **kwargs):
img = Image.open(self.img_path).convert('RGB')
img = resize_image(img, self.width , self.height)
img = np.array(img, dtype=np.float32)
if self.channel is not None:
img -= self.channel[None, None, :]
return img
class RawCropBuilder(AbstractImgBuilder):
def __init__(self, data_dir, width, height, scale, channel=None):
AbstractImgBuilder.__init__(self, data_dir, is_raw=True, require_process=True)
self.width = width
self.height = height
self.channel = channel
self.scale = scale
def build(self, object_id, filename, **kwargs):
bbox = kwargs["bbox"]
img_path = os.path.join(self.img_dir, filename)
return RawCropLoader(img_path, self.width, self.height, scale=self.scale, bbox=bbox, channel=self.channel)
class RawCropLoader(AbstractImgLoader):
def __init__(self, img_path, width, height, scale, bbox, channel):
AbstractImgLoader.__init__(self, img_path)
self.width = width
self.height = height
self.channel = channel
self.bbox = bbox
self.scale = scale
def get_image(self, **kwargs):
img = Image.open(self.img_path).convert('RGB')
crop = scaled_crop_and_pad(raw_img=img, bbox=self.bbox, scale=self.scale)
crop = resize_image(crop, self.width , self.height)
crop = np.array(crop, dtype=np.float32)
if self.channel is not None:
crop -= self.channel[None, None, :]
return crop
def get_img_builder(config, image_dir, is_crop=False, bufferize=None):
image_input = config["image_input"]
if image_input in ["fc8","fc7"]:
bufferize = bufferize if bufferize is not None else True
loader = h5FeatureBuilder(image_dir, bufferize=bufferize)
elif image_input in ["conv", "raw_h5"]:
bufferize = bufferize if bufferize is not None else False
loader = h5FeatureBuilder(image_dir, bufferize=bufferize)
elif image_input == "raw":
if is_crop:
loader = RawCropBuilder(image_dir,
height=config["dim"][0],
width=config["dim"][1],
scale=config["scale"],
channel=config.get("channel", None))
else:
loader = RawImageBuilder(image_dir,
height=config["dim"][0],
width=config["dim"][1],
channel=config.get("channel", None))
else:
assert False, "incorrect image input: {}".format(image_input)
return loader | [
"PIL.Image.open",
"os.path.join",
"data_provider.image_preprocessors.resize_image",
"h5py.File",
"numpy.array",
"numpy.zeros",
"data_provider.image_preprocessors.scaled_crop_and_pad"
] | [((1907, 1926), 'numpy.zeros', 'np.zeros', (['self.size'], {}), '(self.size)\n', (1915, 1926), True, 'import numpy as np\n'), ((2864, 2902), 'os.path.join', 'os.path.join', (['self.img_dir', 'h5filename'], {}), '(self.img_dir, h5filename)\n', (2876, 2902), False, 'import os\n'), ((5103, 5139), 'os.path.join', 'os.path.join', (['self.img_dir', 'filename'], {}), '(self.img_dir, filename)\n', (5115, 5139), False, 'import os\n'), ((5572, 5614), 'data_provider.image_preprocessors.resize_image', 'resize_image', (['img', 'self.width', 'self.height'], {}), '(img, self.width, self.height)\n', (5584, 5614), False, 'from data_provider.image_preprocessors import resize_image, scaled_crop_and_pad\n'), ((5630, 5661), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.float32'}), '(img, dtype=np.float32)\n', (5638, 5661), True, 'import numpy as np\n'), ((6183, 6219), 'os.path.join', 'os.path.join', (['self.img_dir', 'filename'], {}), '(self.img_dir, filename)\n', (6195, 6219), False, 'import os\n'), ((6745, 6811), 'data_provider.image_preprocessors.scaled_crop_and_pad', 'scaled_crop_and_pad', ([], {'raw_img': 'img', 'bbox': 'self.bbox', 'scale': 'self.scale'}), '(raw_img=img, bbox=self.bbox, scale=self.scale)\n', (6764, 6811), False, 'from data_provider.image_preprocessors import resize_image, scaled_crop_and_pad\n'), ((6827, 6870), 'data_provider.image_preprocessors.resize_image', 'resize_image', (['crop', 'self.width', 'self.height'], {}), '(crop, self.width, self.height)\n', (6839, 6870), False, 'from data_provider.image_preprocessors import resize_image, scaled_crop_and_pad\n'), ((6887, 6919), 'numpy.array', 'np.array', (['crop'], {'dtype': 'np.float32'}), '(crop, dtype=np.float32)\n', (6895, 6919), True, 'import numpy as np\n'), ((3024, 3050), 'h5py.File', 'h5py.File', (['h5filepath', '"""r"""'], {}), "(h5filepath, 'r')\n", (3033, 3050), False, 'import h5py\n'), ((5516, 5541), 'PIL.Image.open', 'Image.open', (['self.img_path'], {}), '(self.img_path)\n', (5526, 5541), False, 'from PIL import Image\n'), ((6688, 6713), 'PIL.Image.open', 'Image.open', (['self.img_path'], {}), '(self.img_path)\n', (6698, 6713), False, 'from PIL import Image\n')] |
from matplotlib.path import Path
import pytz
from datetime import datetime
from dateutil.parser import parse
from dateutil.utils import default_tzinfo
from dateutil import tz
import numpy as np
from scipy import interpolate
from scipy.io import loadmat
DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
JAN_FIRST = datetime(2000, 1, 1, 0, 0, 0, 0, pytz.timezone('UTC'))
JAN_LAST = datetime(2100, 1, 1, 0, 0, 0, 0, pytz.timezone('UTC'))
def getAreaModelBounds(area_model):
area_bounds = area_model['boundingbox']
bounds = dict()
bounds['lat_hi'] = area_bounds[0][1]
bounds['lon_hi'] = area_bounds[1][2]
bounds['lat_lo'] = area_bounds[2][1]
bounds['lon_lo'] = area_bounds[0][2]
if bounds['lat_hi'] <= bounds['lat_lo'] or bounds['lon_hi'] < bounds['lon_lo']:
return None
else:
return bounds
def parseDateString(datetime_string, dflt_tz_string=None):
if (dflt_tz_string == None):
try:
return parse(datetime_string)
except:
return None
else:
dflt_tz = tz.gettz(dflt_tz_string)
try:
return default_tzinfo(parse(datetime_string), dflt_tz)
except:
return None
def loadBoundingBox(bbox_info):
rows = [row for row in bbox_info]
bounding_box_vertices = [(index, float(row['Latitude']), float(row['Longitude'])) for row, index in zip(rows, range(len(rows)))]
return bounding_box_vertices
def loadCorrectionFactors(cfactor_info, dflt_tz_string=None):
cfactors = {}
for sensor_type in cfactor_info:
sensorDict = []
for row in cfactor_info[sensor_type]:
# need to deal with default values
new_row = {}
if (row['starttime'] != 'default'):
new_row['starttime'] = parseDateString(row['starttime'], dflt_tz_string)
new_row['endtime'] = parseDateString(row['endtime'], dflt_tz_string)
new_row['slope'] = float(row['slope'])
new_row['intercept'] = float(row['intercept'])
new_row['note'] = row['note']
sensorDict.append(new_row)
# put the default at the end of the list -- the system will use whichever one hits first
for row in cfactor_info[sensor_type]:
if (row['starttime'] == 'default'):
new_row['starttime'] = JAN_FIRST
new_row['endtime'] = JAN_LAST
new_row['slope'] = float(row['slope'])
new_row['intercept'] = float(row['intercept'])
new_row['note'] = row['note']
sensorDict.append(new_row)
cfactors[sensor_type] = sensorDict
return cfactors
# put the default at the end of the list -- the system will use whichever one hits first
def loadLengthScales(length_info, dflt_tz_string=None):
lengthScaleArray = []
for row in length_info:
new_row = {}
if (row['starttime'] != 'default'):
new_row['starttime'] = parseDateString(row['starttime'], dflt_tz_string)
new_row['endtime'] = parseDateString(row['endtime'], dflt_tz_string)
new_row['Space'] = float(row['Space'])
new_row['Time'] = float(row['Time'])
new_row['Elevation'] = float(row['Elevation'])
lengthScaleArray.append(new_row)
for row in length_info:
if (row['starttime'] == 'default'):
new_row['starttime'] = JAN_FIRST
new_row['endtime'] = JAN_LAST
new_row['Space'] = float(row['Space'])
new_row['Time'] = float(row['Time'])
new_row['Elevation'] = float(row['Elevation'])
lengthScaleArray.append(new_row)
return lengthScaleArray
def isQueryInBoundingBox(bounding_box_vertices, query_lat, query_lon):
verts = [(0, 0)] * len(bounding_box_vertices)
for elem in bounding_box_vertices:
verts[elem[0]] = (elem[2], elem[1])
# Add first vertex to end of verts so that the path closes properly
verts.append(verts[0])
codes = [Path.MOVETO]
codes += [Path.LINETO] * (len(verts) - 2)
codes += [Path.CLOSEPOLY]
boundingBox = Path(verts, codes)
return boundingBox.contains_point((query_lon, query_lat))
def getAreaModelByLocation(area_models, lat=0.0, lon=0.0, string=None):
if string is None:
for key in area_models:
if (isQueryInBoundingBox(area_models[key]['boundingbox'], lat, lon)):
print(f'Using area_model for {key}')
return area_models[key]
else:
try:
return area_models[string]
except:
print("Got bad request for area by string: " + str(string))
print("Query location "+str(lat)+ "," + str(lon) + " not in any known model area")
return None
def buildAreaModelsFromJson(json_data):
area_models = {}
for key in json_data:
this_model = {}
this_model['shortname'] = json_data[key]['shortname']
this_model['timezone'] = json_data[key]['Timezone']
this_model['idstring'] = json_data[key]['ID String']
this_model['elevationfile'] = json_data[key]['Elevation File']
this_model['note'] = json_data[key]['Note']
# this_model['elevationinterpolator'] = buildAreaElevationInterpolator(json_data[key]['Elevation File'])
this_model['elevationinterpolator'] = None
this_model['boundingbox'] = loadBoundingBox(json_data[key]['Boundingbox'])
this_model['correctionfactors'] = loadCorrectionFactors(json_data[key]['Correction Factors'],json_data[key]['Timezone'])
this_model['lengthscales'] = loadLengthScales(json_data[key]['Length Scales'], json_data[key]['Timezone'])
if 'Source table map' in json_data[key]:
this_model['sourcetablemap'] = json_data[key]['Source table map']
# else:
# this_model['sourcetablemap'] = None
area_models[key] = this_model
return area_models
def applyCorrectionFactor(factors, data_timestamp, data, sensor_type, status=False):
for factor_type in factors:
if sensor_type == factor_type:
for i in range(len(factors[factor_type])):
if factors[factor_type][i]['starttime'] <= data_timestamp and factors[factor_type][i]['endtime'] > data_timestamp:
if not status:
return np.maximum(data * factors[factor_type][i]['slope'] + factors[factor_type][i]['intercept'], 0.0)
else:
# print(f"factor type is {factor_type} and case {i}")
# print(factors[factor_type][i])
return np.maximum(data * factors[factor_type][i]['slope'] + factors[factor_type][i]['intercept'], 0.0), factors[factor_type][i]['note']
# no correction factor will be considered identity
if not status:
return data
else:
return data, "no correction"
def getLengthScalesForTime(length_scales_array, datetime):
for i in range(len(length_scales_array)):
if length_scales_array[i]['starttime'] <= datetime and length_scales_array[i]['endtime'] > datetime:
return length_scales_array[i]['Space'], length_scales_array[i]['Time'], length_scales_array[i]['Elevation']
print("failure to find length scale in area model")
return None, None, None
def buildAreaElevationInterpolator(filename):
data = loadmat(filename)
elevation_grid = data['elevs']
gridLongs = data['lons']
gridLats = data['lats']
sz = ((elevation_grid.size + gridLats.size + gridLongs.size) * 8) / 1e6
print(f'Elevation map file size: {sz} MB')
return interpolate.interp2d(gridLongs, gridLats, elevation_grid, kind='linear', fill_value=0.0)
| [
"pytz.timezone",
"matplotlib.path.Path",
"dateutil.parser.parse",
"dateutil.tz.gettz",
"scipy.io.loadmat",
"numpy.maximum",
"scipy.interpolate.interp2d"
] | [((339, 359), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (352, 359), False, 'import pytz\n'), ((405, 425), 'pytz.timezone', 'pytz.timezone', (['"""UTC"""'], {}), "('UTC')\n", (418, 425), False, 'import pytz\n'), ((4111, 4129), 'matplotlib.path.Path', 'Path', (['verts', 'codes'], {}), '(verts, codes)\n', (4115, 4129), False, 'from matplotlib.path import Path\n'), ((7358, 7375), 'scipy.io.loadmat', 'loadmat', (['filename'], {}), '(filename)\n', (7365, 7375), False, 'from scipy.io import loadmat\n'), ((7602, 7694), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['gridLongs', 'gridLats', 'elevation_grid'], {'kind': '"""linear"""', 'fill_value': '(0.0)'}), "(gridLongs, gridLats, elevation_grid, kind='linear',\n fill_value=0.0)\n", (7622, 7694), False, 'from scipy import interpolate\n'), ((1046, 1070), 'dateutil.tz.gettz', 'tz.gettz', (['dflt_tz_string'], {}), '(dflt_tz_string)\n', (1054, 1070), False, 'from dateutil import tz\n'), ((955, 977), 'dateutil.parser.parse', 'parse', (['datetime_string'], {}), '(datetime_string)\n', (960, 977), False, 'from dateutil.parser import parse\n'), ((1118, 1140), 'dateutil.parser.parse', 'parse', (['datetime_string'], {}), '(datetime_string)\n', (1123, 1140), False, 'from dateutil.parser import parse\n'), ((6324, 6424), 'numpy.maximum', 'np.maximum', (["(data * factors[factor_type][i]['slope'] + factors[factor_type][i]['intercept']\n )", '(0.0)'], {}), "(data * factors[factor_type][i]['slope'] + factors[factor_type][i\n ]['intercept'], 0.0)\n", (6334, 6424), True, 'import numpy as np\n'), ((6612, 6712), 'numpy.maximum', 'np.maximum', (["(data * factors[factor_type][i]['slope'] + factors[factor_type][i]['intercept']\n )", '(0.0)'], {}), "(data * factors[factor_type][i]['slope'] + factors[factor_type][i\n ]['intercept'], 0.0)\n", (6622, 6712), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from collections import Counter
import re
from nltk.stem.porter import PorterStemmer
import numpy as np, scipy.stats as st
import itertools
import sys
stemmer = PorterStemmer()
class Rouge(object):
def __init__(self, stem=True, use_ngram_buf=False):
self.N = 2
self.stem = stem
self.use_ngram_buf = use_ngram_buf
self.ngram_buf = {}
@staticmethod
def _format_sentence(sentence):
s = sentence.lower()
s = re.sub(r"[^0-9a-z]", " ", s)
s = re.sub(r"\s+", " ", s)
s = s.strip()
return s
def _create_n_gram(self, raw_sentence, n, stem):
if self.use_ngram_buf:
if raw_sentence in self.ngram_buf:
return self.ngram_buf[raw_sentence]
res = {}
sentence = Rouge._format_sentence(raw_sentence)
tokens = sentence.split(' ')
if stem:
# try: # TODO older NLTK has a bug in Porter Stemmer
tokens = [stemmer.stem(t) for t in tokens]
# except:
# pass
sent_len = len(tokens)
for _n in range(n):
buf = Counter()
for idx, token in enumerate(tokens):
if idx + _n >= sent_len:
break
ngram = ' '.join(tokens[idx: idx + _n + 1])
buf[ngram] += 1
res[_n] = buf
if self.use_ngram_buf:
self.ngram_buf[raw_sentence] = res
return res
def get_ngram(self, sents, N, stem=False):
if isinstance(sents, list):
res = {}
for _n in range(N):
res[_n] = Counter()
for sent in sents:
ngrams = self._create_n_gram(sent, N, stem)
for this_n, counter in ngrams.items():
# res[this_n] = res[this_n] + counter
self_counter = res[this_n]
for elem, count in counter.items():
if elem not in self_counter:
self_counter[elem] = count
else:
self_counter[elem] += count
return res
elif isinstance(sents, str):
return self._create_n_gram(sents, N, stem)
else:
raise ValueError
def find_lcseque(self,s1, s2):
m = [[0 for x in range(len(s2) + 1)] for y in range(len(s1) + 1)]
d = [[None for x in range(len(s2) + 1)] for y in range(len(s1) + 1)]
for p1 in range(len(s1)):
for p2 in range(len(s2)):
if s1[p1] == s2[p2]:
m[p1 + 1][p2 + 1] = m[p1][p2] + 1
d[p1 + 1][p2 + 1] = 'ok'
elif m[p1 + 1][p2] > m[p1][p2 + 1]:
m[p1 + 1][p2 + 1] = m[p1 + 1][p2]
d[p1 + 1][p2 + 1] = 'left'
else:
m[p1 + 1][p2 + 1] = m[p1][p2 + 1]
d[p1 + 1][p2 + 1] = 'up'
(p1, p2) = (len(s1), len(s2))
s = []
while m[p1][p2]:
c = d[p1][p2]
if c == 'ok':
s.append(s1[p1 - 1])
p1 -= 1
p2 -= 1
if c == 'left':
p2 -= 1
if c == 'up':
p1 -= 1
s.reverse()
return ' '.join(s)
def get_mean_sd_internal(self, x):
mean = np.mean(x)
sd = st.sem(x)
res = st.t.interval(0.95, len(x) - 1, loc=mean, scale=sd)
return (mean, sd, res)
def compute_rouge(self, references, systems):
assert (len(references) == len(systems))
peer_count = len(references)
result_buf = {}
for n in range(self.N):
result_buf[n] = {'p': [], 'r': [], 'f': []}
result_buf['L'] = {'p': [], 'r': [], 'f': []}
for ref_sent, sys_sent in zip(references, systems):
ref_ngrams = self.get_ngram(ref_sent, self.N, self.stem)
sys_ngrams = self.get_ngram(sys_sent, self.N, self.stem)
for n in range(self.N):
ref_ngram = ref_ngrams[n]
sys_ngram = sys_ngrams[n]
ref_count = sum(ref_ngram.values())
sys_count = sum(sys_ngram.values())
match_count = 0
for k, v in sys_ngram.items():
if k in ref_ngram:
match_count += min(v, ref_ngram[k])
p = match_count / sys_count if sys_count != 0 else 0
r = match_count / ref_count if ref_count != 0 else 0
f = 0 if (p == 0 or r == 0) else 2 * p * r / (p + r)
result_buf[n]['p'].append(p)
result_buf[n]['r'].append(r)
result_buf[n]['f'].append(f)
res = {}
for n in range(self.N):
n_key = 'rouge-{0}'.format(n + 1)
res[n_key] = {}
if len(result_buf[n]['p']) >= 50:
res[n_key]['p'] = self.get_mean_sd_internal(result_buf[n]['p'])
res[n_key]['r'] = self.get_mean_sd_internal(result_buf[n]['r'])
res[n_key]['f'] = self.get_mean_sd_internal(result_buf[n]['f'])
else:
# not enough samples to calculate confidence interval
res[n_key]['p'] = (np.mean(np.array(result_buf[n]['p'])), 0, (0, 0))
res[n_key]['r'] = (np.mean(np.array(result_buf[n]['r'])), 0, (0, 0))
res[n_key]['f'] = (np.mean(np.array(result_buf[n]['f'])), 0, (0, 0))
for ref_sent, sys_sent in zip(references, systems):
alllcs = 0
alls = 0
allr = 0
for ref_sents in ref_sent:
sys_sent = sys_sent.replace('<unknown>', 'unk')
ref_sents = ref_sents.replace('<unknown>', 'unk')
ref_sent_token = Rouge._format_sentence(ref_sents).split()
sys_sent_token = Rouge._format_sentence(sys_sent).split()
if self.stem:
ref_sent_token = [stemmer.stem(t) for t in ref_sent_token]
sys_sent_token = [stemmer.stem(t) for t in sys_sent_token]
lcs=self.find_lcseque(ref_sent_token,sys_sent_token)
alllcs += len(lcs.split())
alls += len(sys_sent_token)
allr += len(ref_sent_token)
# print(alllcs, alls, allr)
p = alllcs / alls if alls != 0 else 0
r = alllcs / allr if allr != 0 else 0
f = 0 if (p == 0 or r == 0) else 2 * p * r / (p + r)
result_buf['L']['p'].append(p)
result_buf['L']['r'].append(r)
result_buf['L']['f'].append(f)
n_key = 'rouge-L'
res[n_key] = {}
if len(result_buf['L']['f']) >= 50:
res[n_key]['p'] = self.get_mean_sd_internal(result_buf['L']['p'])
res[n_key]['r'] = self.get_mean_sd_internal(result_buf['L']['r'])
res[n_key]['f'] = self.get_mean_sd_internal(result_buf['L']['f'])
else:
# not enough samples to calculate confidence interval
res[n_key]['p'] = (np.mean(np.array(result_buf['L']['p'])), 0, (0, 0))
res[n_key]['r'] = (np.mean(np.array(result_buf['L']['r'])), 0, (0, 0))
res[n_key]['f'] = (np.mean(np.array(result_buf['L']['f'])), 0, (0, 0))
return res
def print_score(self, references, systems):
gen = open(systems, 'r', encoding='utf-8')
ref = open(references, 'r', encoding='utf-8')
gen_corpus = []
ref_corpus = []
for g, r in zip(gen, ref):
gen_corpus.append(g.strip())
ref_corpus.append([r.strip()])
scores = self.compute_rouge(ref_corpus, gen_corpus)
print("Samples: %4d" %len(gen_corpus))
print("rouge-1 F1(R/P): %02.2f (%02.2f/%02.2f)" \
%(scores['rouge-1']['f'][0]*100,\
scores['rouge-1']['r'][0]*100,\
scores['rouge-1']['p'][0]*100))
print("rouge-2 F1(R/P): %02.2f (%02.2f/%02.2f)" \
%(scores['rouge-2']['f'][0]*100,\
scores['rouge-2']['r'][0]*100,\
scores['rouge-2']['p'][0]*100))
print("rouge-L F1(R/P): %02.2f (%02.2f/%02.2f)" \
%(scores['rouge-L']['f'][0]*100,\
scores['rouge-L']['r'][0]*100,\
scores['rouge-L']['p'][0]*100))
gen.close()
ref.close()
def print_all(self, references, systems):
gen = open(systems, 'r', encoding='utf-8')
ref = open(references, 'r', encoding='utf-8')
gen_corpus = []
ref_corpus = []
for g, r in zip(gen, ref):
gen_corpus.append(g.strip())
ref_corpus.append([r.strip()])
scores = self.compute_rouge(ref_corpus, gen_corpus)
print("Sample #: %4d" %len(gen_corpus))
print("------------------------------------------")
print("Rouge-1 Average_F: %02.3f (95-conf.int. %02.3f - %02.3f)" \
%(scores['rouge-1']['f'][0]*100,\
scores['rouge-1']['f'][2][0]*100,\
scores['rouge-1']['f'][2][1]*100))
print("Rouge-1 Average_R: %02.3f (95-conf.int. %02.3f - %02.3f)" \
%(scores['rouge-1']['r'][0]*100,\
scores['rouge-1']['r'][2][0]*100,\
scores['rouge-1']['r'][2][1]*100))
print("Rouge-1 Average_P: %02.3f (95-conf.int. %02.3f - %02.3f)" \
%(scores['rouge-1']['p'][0]*100,\
scores['rouge-1']['p'][2][0]*100,\
scores['rouge-1']['p'][2][1]*100))
print("------------------------------------------")
print("Rouge-2 Average_F: %02.3f (95-conf.int. %02.3f - %02.3f)" \
%(scores['rouge-2']['f'][0]*100,\
scores['rouge-2']['f'][2][0]*100,\
scores['rouge-2']['f'][2][1]*100))
print("Rouge-2 Average_R: %02.3f (95-conf.int. %02.3f - %02.3f)" \
%(scores['rouge-2']['r'][0]*100,\
scores['rouge-2']['r'][2][0]*100,\
scores['rouge-2']['r'][2][1]*100))
print("Rouge-2 Average_P: %02.3f (95-conf.int. %02.3f - %02.3f)" \
%(scores['rouge-2']['p'][0]*100,\
scores['rouge-2']['p'][2][0]*100,\
scores['rouge-2']['p'][2][1]*100))
print("------------------------------------------")
print("Rouge-L Average_F: %02.3f (95-conf.int. %02.3f - %02.3f)" \
%(scores['rouge-L']['f'][0]*100,\
scores['rouge-L']['f'][2][0]*100,\
scores['rouge-L']['f'][2][1]*100))
print("Rouge-L Average_R: %02.3f (95-conf.int. %02.3f - %02.3f)" \
%(scores['rouge-L']['r'][0]*100,\
scores['rouge-L']['r'][2][0]*100,\
scores['rouge-L']['r'][2][1]*100))
print("Rouge-L Average_P: %02.3f (95-conf.int. %02.3f - %02.3f)" \
%(scores['rouge-L']['p'][0]*100,\
scores['rouge-L']['p'][2][0]*100,\
scores['rouge-L']['p'][2][1]*100))
print("------------------------------------------")
gen.close()
ref.close()
if __name__ == "__main__":
rouge = Rouge()
generated_file = sys.argv[1]
reference_file = sys.argv[2]
gen_corpus = []
ref_corpus = []
gen = open(generated_file, 'r', encoding='utf-8')
ref = open(reference_file, 'r', encoding='utf-8')
for g, r in zip(gen, ref):
gen_corpus.append(g.strip())
ref_corpus.append([r.strip()])
print("Samples: %4d" %len(gen_corpus))
scores = rouge.compute_rouge(ref_corpus, gen_corpus)
print("rouge-1 F1(R/P): %02.2f (%02.2f/%02.2f)" %(scores['rouge-1']['f'][0]*100, scores['rouge-1']['r'][0]*100, scores['rouge-1']['p'][0]*100))
print("rouge-2 F1(R/P): %02.2f (%02.2f/%02.2f)" %(scores['rouge-2']['f'][0]*100, scores['rouge-2']['r'][0]*100, scores['rouge-2']['p'][0]*100))
print("rouge-L F1(R/P): %02.2f (%02.2f/%02.2f)" %(scores['rouge-L']['f'][0]*100, scores['rouge-L']['r'][0]*100, scores['rouge-L']['p'][0]*100))
gen.close()
ref.close()
| [
"numpy.mean",
"collections.Counter",
"numpy.array",
"nltk.stem.porter.PorterStemmer",
"scipy.stats.sem",
"re.sub"
] | [((268, 283), 'nltk.stem.porter.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (281, 283), False, 'from nltk.stem.porter import PorterStemmer\n'), ((574, 601), 're.sub', 're.sub', (['"""[^0-9a-z]"""', '""" """', 's'], {}), "('[^0-9a-z]', ' ', s)\n", (580, 601), False, 'import re\n'), ((615, 637), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 's'], {}), "('\\\\s+', ' ', s)\n", (621, 637), False, 'import re\n'), ((3493, 3503), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (3500, 3503), True, 'import numpy as np, scipy.stats as st\n'), ((3517, 3526), 'scipy.stats.sem', 'st.sem', (['x'], {}), '(x)\n', (3523, 3526), True, 'import numpy as np, scipy.stats as st\n'), ((1231, 1240), 'collections.Counter', 'Counter', ([], {}), '()\n', (1238, 1240), False, 'from collections import Counter\n'), ((1735, 1744), 'collections.Counter', 'Counter', ([], {}), '()\n', (1742, 1744), False, 'from collections import Counter\n'), ((7283, 7313), 'numpy.array', 'np.array', (["result_buf['L']['p']"], {}), "(result_buf['L']['p'])\n", (7291, 7313), True, 'import numpy as np, scipy.stats as st\n'), ((7366, 7396), 'numpy.array', 'np.array', (["result_buf['L']['r']"], {}), "(result_buf['L']['r'])\n", (7374, 7396), True, 'import numpy as np, scipy.stats as st\n'), ((7449, 7479), 'numpy.array', 'np.array', (["result_buf['L']['f']"], {}), "(result_buf['L']['f'])\n", (7457, 7479), True, 'import numpy as np, scipy.stats as st\n'), ((5414, 5442), 'numpy.array', 'np.array', (["result_buf[n]['p']"], {}), "(result_buf[n]['p'])\n", (5422, 5442), True, 'import numpy as np, scipy.stats as st\n'), ((5499, 5527), 'numpy.array', 'np.array', (["result_buf[n]['r']"], {}), "(result_buf[n]['r'])\n", (5507, 5527), True, 'import numpy as np, scipy.stats as st\n'), ((5584, 5612), 'numpy.array', 'np.array', (["result_buf[n]['f']"], {}), "(result_buf[n]['f'])\n", (5592, 5612), True, 'import numpy as np, scipy.stats as st\n')] |
# -*- coding: utf-8 -*-
""" Time-Varying Functional Connectivity Graphs
Time-varying functional connectivity graphs (TVFCGs) (Dimitriadis2010_, Falani2008_) introduce the
idea of processing overlapping segments of neuroelectric signals by defining a
frequency-dependent time window in which the synchronization is estimated;
and then tabulating the results as adjacency matrices. These matrices have a
natural graph-based representation called “functional connectivity graphs”
(FCGs).
An important aspect of the TVFCGs is the “cycle-criterion” (CC) (Cohen2008_).
It regulates the amount of the oscillation cycles that will be considered in
measuring the phase synchrony. In the original proposal :math:`CC = 2.0` was
introduced, resulting into a time-window with width twice the lower period.
TVFCGs on the other, consider the given lower frequency that correspond to the
possibly synchronized oscillations of each brain rhythm and the sampling
frequency. This newly defined frequency-depedent time-window is sliding over
the time series and the network connectivity is estimated. The overlapping
is determined by an arbitrary step parameter.
Given a multi-channel recording data matrix :math:`X^{m \\times n}` of
size :math:`m \\times n` (with :math:`m` channels, and :math:`n` samples), a
frequency range with :math:`F_{up}` and :math:`F_{lo}` the upper and lower
limits, :math:`fs` the sampling frequency, :math:`step` and :math:`CC`, the
computation of these graphs proceeds as follows:
Firstly, based on the :math:`CC` and the specified frequency range
(:math:`F_{lo}` and :math:`fs` ) the window size calculated:
.. math::
w_{len} = \\frac{ CC }{ F_{lo} } fs
Then, this window is moving per :math:`step` samples and the average
synchronization is computed (between the channels, in a pairwise manner)
resulting into :math:`\\frac{n}{step}` adjacency matrices of size
:math:`n \\times n`.
|
-----
.. [Cohen2008] <NAME>. (2008). Assessing transient cross-frequency coupling in EEG data. Journal of neuroscience methods, 168(2), 494-499.
.. [Dimitriadis2010] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2010). Tracking brain dynamics via time-dependent network analysis. Journal of neuroscience methods, 193(1), 145-155.
.. [Falani2008] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., ... & <NAME>. (2008). Persistent patterns of interconnection in time-varying cortical networks estimated from high-resolution EEG recordings in humans during a simple motor act. Journal of Physics A: Mathematical and Theoretical, 41(22), 224014.
"""
# Author: <NAME> <<EMAIL>>
import numpy as np
from .fc.estimator import Estimator
def tvfcg(data, estimator_instance, fb, fs, cc=2.0, step=5.0, pairs=None):
""" Time-Varying Functional Connectivity Graphs
The TVFCGs are computed from the input ``data`` by employing the given
synchronization estimator (``estimator_instance``).
Parameters
----------
data : array-like, shape(n_channels, n_samples)
Multichannel recording data.
estimator_instance : object
An object of type :mod:`dyconnmap.fc.Estimator`.
fb : list of length 2
The lower and upper frequency.
fs : float
Sampling frequency.
cc : float
Cycle criterion.
step : int
The amount of samples the window will move/slide over the time series.
pairs : array-like or `None`
- If an `array-like` is given, notice that each element is a tuple of length two.
- If `None` is passed, complete connectivity will be assumed.
Returns
-------
fcgs : array-like, shape(n_windows, n_channels, n_channels)
The computed FCGs.
"""
preprocess, estimator, avg_func = _validate_estimator(estimator_instance)
# Preprocess the data (estimator function)
pp_data = preprocess(data)
#
n_channels, n_samples = np.shape(data)
# window_length = np.int32(np.round((cc / fb[0]) * fs))
# windows = np.int32(np.round((n_samples - window_length) / step))
windows, window_length = tvfcg_compute_windows(data, fb, fs, cc, step)
if window_length >= n_samples:
raise Exception(
"The size of window cannot be greater than the number of samples"
)
fcgs = np.zeros(
(windows, n_channels, n_channels), dtype=estimator_instance.data_type
)
if pairs is None:
pairs = [
(win_id, int(win_id * step), int(window_length + (win_id * step)), c1, c2)
for win_id in range(windows)
for c1 in range(0, n_channels)
for c2 in range(c1, n_channels)
if c1 != c2
]
for pair in pairs:
win_id, start, end, c1, c2 = pair
slice1 = pp_data[c1, ..., start:end]
slice2 = pp_data[c2, ..., start:end]
# slice = None
# try:
slice_ts, _ = estimator(slice1, slice2)
# except:
# slice = estimator(slice1, slice2)
fcgs[win_id, c1, c2] = avg_func(slice_ts)
return fcgs
def tvfcg_cfc(
data, estimator_instance, fb_lo, fb_hi, fs=128, cc=2.0, step=5, pairs=None
):
""" Time-Varying Functional Connectivity Graphs (for Cross frequency Coupling)
The TVFCGs are computed from the input ``data`` by employing the given
cross frequency coupling synchronization estimator (``estimator_instance``).
Parameters
----------
data : array-like, shape(n_channels, n_samples)
Multichannel recording data.
estimator_instance : object
An object of type :mod:`dyconnmap.fc.Estimator`.
fb_lo : list of length 2
The low and high frequencies.
fb_hi : list of length 2
The low and high frequencies.
fs : float
Sampling frequency.
cc : float
Cycle criterion.
step : int
The amount of samples the window will move/slide over the time series.
pairs : array-like or `None`
- If an `array-like` is given, notice that each element is a tuple of length two.
- If `None` is passed, complete connectivity will be assumed.
Returns
-------
fcgs : array-like, shape(n_windows, n_channels, n_channels)
The computed Cross-Frequency FCGs.
Notes
-----
Not all available estimators in the :mod:`dyconnmap.fc` are valid for estimating
cross frequency coupling.
"""
preprocess, estimator, avg_func = _validate_estimator(estimator_instance)
# Preprocess the data (estimator function)
pp_data1, pp_data2 = preprocess(data)
#
n_channels, n_samples = np.shape(data)
# window_length = np.int32(np.round((cc / fb_lo[0]) * fs))
# windows = np.int32(np.round((n_samples - window_length) / step))
windows, window_length = tvfcg_compute_windows(data, fb_lo, fs, cc, step)
if window_length >= n_samples:
raise Exception(
"The size of window cannot be greater than the number of samples"
)
fcgs = np.zeros((windows, n_channels, n_channels))
if pairs is None:
pairs = [
(win_id, (win_id * step), window_length + (win_id * step), c1, c2)
for win_id in range(windows)
for c1 in range(0, n_channels)
for c2 in range(c1, n_channels)
if c1 != c2
]
for pair in pairs:
win_id, start, end, c1, c2 = pair
slice1 = pp_data1[c1, ..., start:end]
slice2 = pp_data2[c2, ..., start:end]
slice_ts, _ = estimator(slice1, slice2)
aslice = avg_func(slice_ts)
fcgs[win_id, c1, c2] = aslice
return fcgs
def tvfcg_ts(ts, fb, fs=128, cc=2.0, step=5, pairs=None, avg_func=np.mean):
""" Time-Varying Function Connectivity Graphs (from time series)
This implementation operates directly on the given estimated synchronization
time series (``ts``) and the mean value inside the window is computed.
Parameters
----------
ts : array-like, shape(n_channels, n_samples)
Multichannel synchronization time series.
fb : list of length 2
The lower and upper frequency.
fs : float
Sampling frequency.
cc : float
Cycle criterion.
step : int
The amount of samples the window will move/slide over the time series.
pairs : array-like or `None`
- If an `array-like` is given, notice that each element is a tuple of length two.
- If `None` is passed, complete connectivity will be assumed.
Returns
-------
fcgs : array-like
The computed FCGs.
"""
n_channels, n_channels, n_samples = np.shape(ts)
window_length = np.int32(np.round((cc / fb[0]) * fs))
windows = np.int32(np.round((n_samples - window_length) / step))
if window_length >= n_samples:
raise Exception(
"The size of window cannot be greater than the number of samples"
)
fcgs = np.zeros((windows, n_channels, n_channels))
if pairs is None:
pairs = [
(win_id, (win_id * step), window_length + (win_id * step), c1, c2)
for win_id in range(windows)
for c1 in range(n_channels)
for c2 in range(c1, n_channels)
]
for pair in pairs:
win_id, start, end, c1, c2 = pair
slice_ts = ts[c1, c2, start:end]
fcgs[win_id, c1, c2] = avg_func(slice_ts)
return fcgs
def tvfcg_compute_windows(data, fb_lo, fs, cc, step):
""" Compute TVFCGs Sliding Windows
A helper function that computes the size and number of sliding windows
given the parameters.
Parameters
----------
data : array-like, shape(n_channels, n_samples)
Multichannel recording data.
fb_lo :
fb : list of length 2
The lower and upper frequency.
fs : float
Sampling frequency.
cc : float
Cycle criterion.
step : int
Stepping.
Returns
-------
windows : int
The total number of sliding windows.
window_length : int
The length of a sliding window; number of samples used to estimated the connectivity.
"""
*_, n_samples = np.shape(data)
window_length = np.int32(np.round((cc / fb_lo[0]) * fs))
windows = np.int32(np.round((n_samples - window_length) / step))
# print("window_length = {0}".format(window_length))
if window_length >= n_samples:
raise Exception(
"The size of window cannot be greater than the number of samples"
)
return windows, window_length
def _validate_estimator(estimator_instance):
"""
Perform common validity checks for a given estimator.
Parameters
----------
estimator_instance : object
An instance of `dyconnmap.fc.Estimator`
Returns
-------
preprocess : function
A callable function for preprocessing the data.
estimator : function
A callable function for estimating the synchronization.
avg : function
A callable function for computing the average on each slice.
Notes
-----
This function is used mainly internally.
"""
if not isinstance(estimator_instance, Estimator):
raise Exception("Given object is not an Estimator.")
preprocess = getattr(estimator_instance, "preprocess")
estimator = getattr(estimator_instance, "estimate_pair")
avg = getattr(estimator_instance, "mean")
if not callable(preprocess):
raise Exception("Preprocess method is not callable.")
if not callable(estimator):
raise Exception("Estimator method is not callabled.")
if not callable(avg):
raise Exception("Mean method is not callable.")
return preprocess, estimator, avg
| [
"numpy.round",
"numpy.shape",
"numpy.zeros"
] | [((3883, 3897), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (3891, 3897), True, 'import numpy as np\n'), ((4265, 4344), 'numpy.zeros', 'np.zeros', (['(windows, n_channels, n_channels)'], {'dtype': 'estimator_instance.data_type'}), '((windows, n_channels, n_channels), dtype=estimator_instance.data_type)\n', (4273, 4344), True, 'import numpy as np\n'), ((6566, 6580), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (6574, 6580), True, 'import numpy as np\n'), ((6954, 6997), 'numpy.zeros', 'np.zeros', (['(windows, n_channels, n_channels)'], {}), '((windows, n_channels, n_channels))\n', (6962, 6997), True, 'import numpy as np\n'), ((8577, 8589), 'numpy.shape', 'np.shape', (['ts'], {}), '(ts)\n', (8585, 8589), True, 'import numpy as np\n'), ((8879, 8922), 'numpy.zeros', 'np.zeros', (['(windows, n_channels, n_channels)'], {}), '((windows, n_channels, n_channels))\n', (8887, 8922), True, 'import numpy as np\n'), ((10105, 10119), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (10113, 10119), True, 'import numpy as np\n'), ((8620, 8645), 'numpy.round', 'np.round', (['(cc / fb[0] * fs)'], {}), '(cc / fb[0] * fs)\n', (8628, 8645), True, 'import numpy as np\n'), ((8672, 8716), 'numpy.round', 'np.round', (['((n_samples - window_length) / step)'], {}), '((n_samples - window_length) / step)\n', (8680, 8716), True, 'import numpy as np\n'), ((10149, 10177), 'numpy.round', 'np.round', (['(cc / fb_lo[0] * fs)'], {}), '(cc / fb_lo[0] * fs)\n', (10157, 10177), True, 'import numpy as np\n'), ((10204, 10248), 'numpy.round', 'np.round', (['((n_samples - window_length) / step)'], {}), '((n_samples - window_length) / step)\n', (10212, 10248), True, 'import numpy as np\n')] |
import time
import string
import math
import random
import csv
from functools import reduce
from openpyxl import load_workbook
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
import itertools
import selenium
from selenium import webdriver
from selenium.common.exceptions import ElementClickInterceptedException
from webdriver_manager.chrome import ChromeDriverManager
from scipy.optimize import curve_fit
from scipy.stats import norm
from scipy import optimize
from scipy.stats import multivariate_normal
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.graphics.tsaplots import plot_acf
driver = webdriver.Chrome(ChromeDriverManager().install()) # set browser
driver.get('http://tool.globalcalculator.org/') # open website
id_box = driver.find_element_by_id('lets-start') # bypass "Start" screen
id_box.click()
dfs = pd.read_excel("./Output_map.xlsx") # file mapping output lever names to xpaths
dfs_3 = pd.read_excel("./Input_map.xlsx") # file mapping input names to xpaths
for i in range(len(dfs)): # generate html lever addresses and put them in the dataframe
dfs.iloc[i, 2] = '/html/body/table[1]/tbody/tr/td/table/tbody/tr[2]/td[1]/div[13]/div/table/tbody/tr[' + str(dfs.iloc[i, 1]).strip("%") + ']/td[5]/div/font'
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D']
def multi_sampler_2D(observations, all_levers_current, all_thresholds, samples=4, mu_init=[3000, 0.5], plot=False, mu_prior_mu=[3100, 1], mu_prior_sd=[[200, 0],[0, 0.3]], imprimir = False):
"""
Implementation of a variant of Markov Chain Monte-Carlo (MCMC). Given some prior
information and a set of observations, this function performs MCMC. It calculates the posterior
distribution of temperature and cost values and the lever values used in doing so.
**Args**:
- observations (list of lists (N x 2)): Contains temperature and cost values.
- all_levers_current (list): Current values of input levers.
- all_thresholds (list of lists (48 x 2)): Each entry contains an upper and lower bound for each lever.
- samples (int): Number of MCMC steps.
- mu_init (list): Initial guess of temperature and cost values.
- plot (boolean): Flag used for plotting.
- mu_prior_mu (list): Mean temperature and cost values of prior distribution (assummed Gaussian).
- mu_prior_sd (list of lists (2 x 2)): Diagonal matrix containing the standard deviation of the 2D prior.
- imprimir (boolean): Flag used for printing useful information.
**Returns**:
- posterior (list of lists (N x 2)): Contains trace of all temperature and cost values.
- accepted (list): Contains all the lever values corresponding the proposal accepted by MCMC.
- rate (list): Contains the probability of each temperature and cost pair proposal.
- accepted_values (list of lists (M x 2)): Contains accepted temperature and cost values.
"""
# Initialisations
mu_current = mu_init # Set the current temperature and cost value
posterior = [mu_current] # First value of the trace
accepted = []; accepted_values = []; rate = []
address = str(driver.current_url) # Get current URL (website must be TIAM-UCL's 2DS pathway)
# Perform an MCMC step
for i in range(samples):
all_levers_temp = all_levers_current.copy() # Store current lever combination in a temp variable
# Moves the calculator's levers using the sampler. Reads their corresponding temperature and cost values (proposal).
all_levers_current, mu_proposal = generate_mu_proposal_2D(all_levers_current, all_thresholds, address = address)
# Compute likelihood ratio of proposed temperature and cost values
likelihood_ratio = np.prod(multivariate_normal(mu_proposal, [[1000000, 0], [0, 100]]).pdf(observations) / multivariate_normal(mu_current, [[1000000, 0], [0, 100]]).pdf(observations))
# Compute the prior probability ratio of the proposed temperature and cost values
prior_ratio = multivariate_normal(mu_prior_mu, mu_prior_sd).pdf(mu_proposal) / multivariate_normal(mu_prior_mu, mu_prior_sd).pdf(mu_current)
# Probability of accepting the proposal
p_accept = likelihood_ratio*prior_ratio
rate.append(p_accept)
# Printing routine
if imprimir == True:
print("Iteration: ", i, "Current: ", mu_current, " Proposal: ", mu_proposal)
print("Likelihood ratio: ", likelihood_ratio, "Prior ratio: ", prior_ratio, "Acceptance probability: ", p_accept, "\n")
# Decide whether to accept or reject the temperature and cost values proposal
accept = np.random.rand() < p_accept
# Temperature and cost values accepted
if accept:
address = str(driver.current_url) # Change URL address to current lever values (otherwise it remains the same)
mu_current = mu_proposal # Update current temperature and cost values
accepted = accepted[:].copy() + [all_levers_current.copy()] # Save accepted combination of lever values
accepted_values.append(mu_current.copy()) # Save accepted temperature and cost values
# Temperature and cost values rejected
else:
all_levers_current = all_levers_temp.copy() # Return lever values to last accepted iteration
# Update trace of temperature and cost values
posterior.append(mu_current.copy())
return posterior, accepted, rate, accepted_values
def move_lever(lever, value, costs = False, address = str(driver.current_url)):
"""
Sets a lever to a given value. Reads corresponding temperature and, if selected, cost values.
*Args*:
- lever (list of strings): Contains the names of the levers to be moved.
- value (list of floats): Contains the value of the levers to be moved - Automatically matched to lever names.
- costs (optional, boolean): Flag to decide whether to read cost values or not.
- address (optional, string): URL address corresponding to given lever combination.
"""
# Update URL address with input lever names and values, one at a time
for i in range(len(lever)):
address = new_URL(lever[i], value[i], address = address)
# Open website corresponding to the input values
driver.get(address)
########################################## IMPORTANT ####################################################
# All of the lines below are in charge of webscraping the temperature and, if selected, the cost values.
# The Global Calculator is a hard to webscrape website (sometimes, it results in bugs or uncoherent
# temperature and cost values). The code below ensures that, no matter what, the values will be read.
# To do so it performs different actions based on the current state of the website and the output values.
#########################################################################################################
time.sleep(0.2)
id_box = driver.find_element_by_id('lets-start') # Bypass "Start" screen
id_box.click()
time.sleep(1)
# Read temperature values
try:
output = int(read_CO2()[:4]) # Read output CO2
except: # Problem reading output CO2? The code below sorts it
time.sleep(1)
open_lever_menus() # Open lever menus
move_lever([lever[0]],[1.3], costs = False) # Move lever to an arbitrary value
driver.get(address) # Open website back
time.sleep(0.2)
id_box = driver.find_element_by_id('lets-start') # Bypass "Start" screen
id_box.click()
output = int(read_CO2()[:4]) # Read output CO2
# Read cost values
if costs == True:
driver.find_element_by_xpath('//*[@id="mn-6"]').click() # Move to compare tab
time.sleep(0.1)
userid_element = driver.find_element_by_xpath('//*[@id="container_costs_vs_counterfactual"]/div/div[11]') # Read GDP
cost_output = userid_element.text
try:
cost_output = float(cost_output[:4].rstrip("%")) # Convert GDP from string to float
except: # Problem converting GDP? The code below sorts it
cost_output = float(cost_output[:3].rstrip("%"))
# Reload the page and bypass start
driver.refresh() # Refresh
time.sleep(1)
id_box = driver.find_element_by_id('lets-start') # Bypass "Start" screen
id_box.click()
userid_element = driver.find_element_by_xpath('//*[@id="container_costs_vs_counterfactual"]/div/div[12]') # Read text below GDP value
cost_flag = userid_element.text
# Find sign of GDP (less expensive => increase; more expensive => decrease)
if cost_flag == 'less expensive':
cost_output = -cost_output # Reverse sign
# Go back to the overview section
try:
driver.find_element_by_xpath('//*[@id="mn-1"]').click()
except: # Problem going back to the overview section? The code below sorts it
time.sleep(0.2)
id_box = driver.find_element_by_id('lets-start') # Bypass "Start" screen
id_box.click()
output = [output, cost_output] # Output temperature and cost values
return output
def open_lever_menus():
"""Opens all the lever menus of the Global Calculator"""
for i in range(1, 16): # Iterate through menus
try: # Tries to open the menu
driver.find_element_by_xpath('//*[@id="ml-open-close-link-' + str(i) + '"]' ).click() # Open menu
time.sleep(0.3) # Server can't respond quicker than this
except ElementClickInterceptedException: # If opening menus too fast, then slow down
time.sleep(1)
driver.find_element_by_xpath('//*[@id="ml-open-close-link-' + str(i) + '"]' ).click()
return
def find_lever_ref(name, box = 1):
"""Given a lever name and box position, return its XPath"""
ref = str(dfs[dfs.iloc[:, 1].str.match(name)].iloc[0, 3]) # Get lever xpath
ref = ref[:-2] + str(2 + box) + ref[-2 + 1:] # Adjust address for given box
return ref
def read_lever(name):
"""Given a lever name, return its ID"""
pos = str(dfs[dfs.iloc[:, 1].str.match(name)].iloc[0, 2]) # Find lever ID
return 'fb-l-' + pos
def read_CO2():
"""For the current lever combination, return the CO2 level (GtCO2)"""
userid_element = driver.find_element_by_xpath('//*[@id="container_dashboard_co2_budget"]') # Find element that contains CO2 value
time.sleep(0.05)
co2 = userid_element.text.splitlines()[-6] # Get CO2 value from the container
return co2
def read_outputs():
"""Reads all outputs and returns them as a list"""
out_vals = []
for i in range(len(dfs)):
userid_element = driver.find_element_by_xpath(dfs.iloc[i, 2])
out_vals.append(float(userid_element.text.rstrip("%")))
return out_vals
def map_to_letter(value):
"""Takes a float value in the range [1, 4.0] and returns its corresponding URL character"""
if value != 2 and value != 3 and value != 4: # Special cases
if value < 4:
pos = int((value - 1.0)*10)
try:
back = letters[pos]
except: # Oops, the value is out of bounds
print("Not enough letters, fetching position: ", pos, " corresponding to value: ", value)
else: # Special case: Value = 4
back = letters[-1]
else:
back = int(value)
return back
def random_URL():
"""Generates and return a random URL (address) and its corresponding lever values (input_levers)"""
address = []; input_levers = []
string = "" # URL address to be stored here
for i in range(49): # Generate a random value for each lever, map it to a letter and save it
rand_float = random.randint(18, 32)/10 # Define bounds for random number generator (currently set to [1.8, 3.2])
input_levers.append(rand_float); address.append(map_to_letter(rand_float)) # Store them
address[43:47] = [1, 1, 1, 1] # CCS values are fixed at 1 for the moment
input_levers[43:47] = [1, 1, 1, 1] # CCS values are fixed at 1 for the moment
for i in address: # Construct string containing the current lever combination
string = string + str(i)
address = "http://tool.globalcalculator.org/globcalc.html?levers=" + string + "2211111111/technology/en" # Construct URL address
return address, input_levers
def training_sample():
"""Generates a random training sample. It returns the input (input_levers) and output (random_output) values"""
address, input_levers = random_URL() # Generate random URL address
driver.get(address) # Open that URL address
time.sleep(1)
id_box = driver.find_element_by_id('lets-start') # Bypass "Start now" menu
id_box.click()
time.sleep(0.2)
compare_box = driver.find_element_by_xpath('//*[@id="mp-nav-compare"]') # Move to the "Compare" section
compare_box.click()
random_output = read_outputs(dfs) # Read output
return input_levers, random_output
def log_training_sample():
"""Generate training sample and save it to a CSV file"""
Input, Output = training_sample() # Generate random training sample
with open(r'Training_set.csv', 'a', newline='') as f: # Append as Excel row
writer = csv.writer(f)
writer.writerow(Input + Output)
return
def find_lever_URL_position(name):
"""Given a lever name, return its position in the URL"""
return str(dfs_3[dfs_3.iloc[:, 0].str.match(name)].iloc[0, 1]) # Get lever position to insert in the URL
def new_URL(name, value, address = "http://tool.globalcalculator.org/globcalc.html?levers=l2wz222CBpp3pC3f2Dw3DC3plzgj1tA13pp2p223ri11111p22211111111/dashboard/en"):
"""
Generate a new URL address by changing a lever value.
**Args**:
- Name (string): Target lever name
- Value (float): Target value for lever
- Address (string): URL where lever will be changed. Set to TIAM-UCL 2DS pathway by default.
**Returns**:
URL after changes are applied.
"""
value = map_to_letter(value) # Map value to letter
index = int(find_lever_URL_position(name)) # Find URL position of given lever
URL = address[ : 53 + index] + str(value) + address[54 + index :] # Insert given value in its corresponding URL position
return URL
def find_lever_sensitivities():
"""
Analysis of climate impact sensitivity to changes in the inputs.
Takes the default pathway (TIAM UCL 2DS) and changes each lever value at a time (between 1.0 and 4.0),
reading its corresponding output.
"""
all_sensitivities = np.zeros((30, len(dfs_3.iloc[:, 0]))) # Store lever sensitivities here
col = 0 # Counter used for indexing
for lever in dfs_3.iloc[:, 0]: # Iterate through levers, uncomment for testing: # print("Putting lever: ", lever, " in column: ", col)
sensitivity = []
for i in np.linspace(1, 3.9, 30): # Move lever one increment at a time
sensitivity.append(move_lever([lever], [round(i, 2)])) # Move lever and store CO2 value # print(sensitivity)
all_sensitivities[:, col] = sensitivity # Append
col += 1
set_to_benchmark() # Reset levers to benchmark pathway
### Plotting routine ###
x_lever = np.linspace(1, 3.9, 30) # X axis
mean = 3000 # Mean threshold
upper = mean + mean*0.05 # Upper threshold
lower = mean - mean*0.05 # Lower threshold
plt.figure(figsize = (20, 10))
for i in range(48):
plt.plot(x_lever, all_sensitivities[:, i])
plt.title("Temperature values and thresholds")
plt.xlabel("Lever position")
plt.ylabel("GtCO2 per capita")
plt.axhline(y=3000, color='b', linestyle='-') # Plot thresholds
plt.axhline(y=lower, color='g', linestyle='--')
plt.axhline(y=upper, color='g', linestyle='--')
plt.ylim([2250, 3750])
plt.figure(figsize = (20, 10))
thresholds = np.zeros((48, 2))
lever_number = 0
for i in all_sensitivities.T: # Calculate lever values corresponding to thresholds
temp = []
pos = []
count = 0
for j in i:
if j<upper and j>lower:
temp.append(j)
pos.append(round(x_lever[count], 2))
count += 1
thresholds[lever_number, :] = [pos[temp.index(max(temp))], pos[temp.index(min(temp))]]
plt.plot(pos, temp)
plt.title("Temperature values within thresholds")
plt.xlabel("Lever position")
plt.ylabel("GtCO2 per capita")
lever_number+=1
plt.figure(figsize = (20, 20))
count = 0
for i in thresholds:
plt.plot(np.linspace(i[0], i[1], 10), np.linspace(count, count, 10))
count += 1
plt.yticks(np.arange(48), list(dfs_3.iloc[:, 0].to_numpy()), fontsize = 20)
plt.title("Lever ranges that meet temperature thresholds")
plt.xlabel("Value range")
plt.ylabel("Lever")
### End of plotting routine ###
return thresholds
def lever_step(lever_value, thresholds):
"""Naive modification of the Metropolis Hastings algorithm - moves a lever randomly up or down by 0.1. Return the new lever value"""
move = -0.
prob = random.randint(0, 100)/100 # Generate random number
if prob < 0.5: move = -0.1 # Move lever down
else: move = 0.1 # Move lever up
# If the lever value is out of bounds, reverse direction of step
if (lever_value + move < thresholds[0]) or (lever_value + move > thresholds[1]):
move = -move
return round(lever_value + move, 3)
def cost_sensitivity():
"""
Analysis of GDP sensitivity to changes in the inputs.
Sets all levers to 2 and moves each lever to 3 at a time,
reading its corresponding output.
"""
for lever in dfs_3.iloc[:, 0]: # Set all levers to 2
move_lever([lever], [2])
costs_sensitivity = []
for lever in dfs_3.iloc[:, 0]: # Move each lever to 3 at a time
print("Moving lever: ", lever)
costs_temp = move_lever([lever], [3], costs = True)[1]
costs_sensitivity.append(costs_temp)
print("Marginal cost: ", costs_temp)
print("Returning lever back to normal... \n")
move_lever([lever], [2], costs = False) # Put the lever back to 2
reference = move_lever(['Calories consumed'], [2], costs = True)[1] # Read the benchmark cost
data = {'Lever': list(dfs_3.iloc[:, 0].to_numpy()), # Dictionary containing costs and lever names
'Marginal cost': costs_sensitivity
}
costs_df = pd.DataFrame(data, columns = ['Lever', 'Marginal cost']) # Put cost values into dataframe
costs_df = costs_df.sort_values(by=['Marginal cost'], ascending = False) # Sort costs
costs_df.iloc[0, 1] = -0.08 # Truncate first value (very high, reverses direction of GDP, leading to bug)
costs_df = costs_df.sort_values(by=['Marginal cost'], ascending = False)
costs_df.iloc[-1, 1] = 0.46
costs_df = costs_df.sort_values(by=['Marginal cost'], ascending = True)
costs_df['Marginal cost'] = costs_df['Marginal cost'] - reference # Calculate cost change wrt benchmark
### Plotting routine ###
plt.figure(figsize = (20, 10))
plt.xticks(rotation=45, horizontalalignment='right')
plt.bar(costs_df.iloc[:, 0], costs_df.iloc[:, 1])
plt.ylabel("$\Delta$GDP decrease")
plt.title("∆GDP decrease with respect to TIAM-UCL 2DS benchmark pathway – Moving each lever from 2 to 3")
### End of plotting routine ###
return
def set_to_benchmark():
"""Set Global Calculator to TIMA-UCL 2DS's benchmark pathway"""
driver.get('http://tool.globalcalculator.org/globcalc.html?levers=l2wz222CBpp3pC3f2Dw3DC3plzgj1tA13pp2p223ri11111p22211111111/dashboard/en')
id_box = driver.find_element_by_id('lets-start') # Bypass "Start now" screen
id_box.click()
return
def random_lever_value(lever_name):
"""Moves a given lever (lever_name) to a random position between 1 and 3.9"""
rand_val = random.randint(10, 39)/10 # Generate random value between 1 and 3.9
return move_lever([lever_name], [round(rand_val, 2)], costs = True) # Move lever and return CO2 and GDP values
def new_lever_combination():
"""Returns an array containing a random value for each lever"""
random_lever_values = []
for i in range(len(lever_names)):
random_lever_values.append(random.randint(10, 39)/10) # Generate random lever value
return random_lever_values
def generate_mu_proposal_2D(all_levers_current, all_thresholds, address = str(driver.current_url)):
"""Used in MCMC. Takes arrays containing all current values and thresholds and generates a new mu proposal"""
for i in range(len(lever_names)): # Take discrete MH step for each lever
all_levers_current[i] = lever_step(all_levers_current[i], all_thresholds[i])
# Pass list with all lever names and current values. Read temperature and costs.
output = move_lever(lever_names, all_levers_current, costs = True, address = address)
return all_levers_current, output
def save_all():
"""Save all accepted lever combinations, temperature and cost values, trace and probability values to a .XLSX file"""
df1 = pd.DataFrame(np.array(posterior[:-1])); # Dataframe with posterior
df1['2'] = rate # Append rate to it
writer = pd.ExcelWriter('MCMC_output_1.xlsx', engine='openpyxl') # Open Excel file
writer.book = load_workbook('MCMC_output_1.xlsx') # Load current workbook
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets) # Load all sheets
reader = pd.read_excel(r'MCMC_output_1.xlsx') # Read current file
df1.to_excel(writer,index=False,header=False,startrow=len(reader)+1) # Write out the new sheet
writer.close() # Close Excel file
df2 = pd.DataFrame(np.array(accepted_inputs)); # Dataframe with accepted lever combinations
df2['48'] = np.array(accepted_values)[:, 0]; df2['49'] = np.array(accepted_values)[:, 1]; # Append accepted temperature and cost values
writer = pd.ExcelWriter('MCMC_output_2.xlsx', engine='openpyxl') # Open Excel file
writer.book = load_workbook('MCMC_output_2.xlsx') # Load current workbook
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets) # Load all sheets
reader = pd.read_excel(r'MCMC_output_2.xlsx') # Read current file
df2.to_excel(writer,index=False,header=False,startrow=len(reader)+1) # Write out the new sheet
writer.close() # Close Excel file
return
def set_lever(target, lever_name):
"""Set a given lever (lever_name) to a value (target) by clicking on it - Using a minimum number of clicks."""
n_clicks = 0 # Set to 0 by default (<=> do nothing)
current = driver.find_element_by_id(read_lever(lever_name)) # Get lever id
current = float(current.get_attribute('textContent')) # Read current lever value
# Two possibilities: same box, or different box
jump = math.trunc(target) - math.trunc(current)
diff = target - current
# If the lever is already set
if target == current:
# print("Current value = Target value")
box_number = math.trunc(current)
# Same box -> 2 possibilities: up or down (down can hit boundary or not)
elif jump == 0:
#print("Same box case")
# Up
# Non boundary
box_number = math.trunc(current) + 1 # Current box
if diff > 0:
#print("Lever up")
#print("Non boundary")
n_clicks = int(((current - math.trunc(current)) + (math.trunc(target) + 1 - target))*10)
# Down
elif diff < 0:
#print("Lever down")
# Non boundary
if target%math.trunc(target) != 0:
#print("Non boundary")
n_clicks = int(round(abs(diff*10)))
# Boundary: click previous level, then current
else:
#print("Boundary")
n_clicks = 0 # Clicking done here (do not click later on)
# Watch out for boundary case: box number 1
if math.trunc(current) == 1:
#print("Special case = 1")
userid_element = driver.find_elements_by_xpath(find_lever_ref(lever_name, box = 1))[0]
userid_element.click()
else:
userid_element = driver.find_elements_by_xpath(find_lever_ref(lever_name, box = box_number - 1))[0]
userid_element.click()
# Different box -> 2 possibilities: up or down (each can be boundary or non boundary)
elif jump != 0:
#print ("Different box case")
box_number = math.trunc(current) + 1 # Current box (default)
# Up
if diff > 0:
#print("Lever up")
# Boundary
if target%math.trunc(target) == 0:
if jump == 1:
#print("Special case - Different box, boundary closest box")
userid_element = driver.find_elements_by_xpath(find_lever_ref(lever_name, box = box_number+1))[0]
userid_element.click()
box_number = target
n_clicks = 1
else:
#print("Boundary")
box_number = target
n_clicks = 1
# Non boundary
else:
#print("Non boundary")
box_number = math.trunc(target) + 1
userid_element = driver.find_elements_by_xpath(find_lever_ref(lever_name, box = box_number))[0]
userid_element.click()
n_clicks = int(round((math.trunc(target) + 1 - target)*10))
# Down
elif diff < 0:
#print("Lever down")
# Boundary
if target%math.trunc(target) == 0:
#print("Boundary")
box_number = target
n_clicks = 1
# Non boundary
else:
#print("Non boundary")
box_number = math.trunc(target) + 1
userid_element = driver.find_elements_by_xpath(find_lever_ref(lever_name, box = box_number))[0]
userid_element.click()
n_clicks = int(round((math.trunc(target) + 1 - target)*10))
userid_element = driver.find_elements_by_xpath(find_lever_ref(lever_name, box = box_number))[0]
#print("Number of clicks: ", n_clicks)
for i in range(n_clicks):
userid_element.click()
time.sleep(0.25)
print("CO2 emissions: ", read_CO2(), " \t Meets 2C target?", int(read_CO2()[:4]) < 3010)
driver.find_element_by_xpath('/html/body/table[1]/tbody/tr/td/table/tbody/tr[1]/td/table/tbody/tr[1]/td[1]').click()
# move mouse away to avoid collisions
return | [
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"scipy.stats.multivariate_normal",
"time.sleep",
"numpy.array",
"math.trunc",
"pandas.read_excel",
"pandas.ExcelWriter",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"numpy.linspace",
"pa... | [((935, 969), 'pandas.read_excel', 'pd.read_excel', (['"""./Output_map.xlsx"""'], {}), "('./Output_map.xlsx')\n", (948, 969), True, 'import pandas as pd\n'), ((1023, 1056), 'pandas.read_excel', 'pd.read_excel', (['"""./Input_map.xlsx"""'], {}), "('./Input_map.xlsx')\n", (1036, 1056), True, 'import pandas as pd\n'), ((7258, 7273), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (7268, 7273), False, 'import time\n'), ((7374, 7387), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7384, 7387), False, 'import time\n'), ((10822, 10838), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (10832, 10838), False, 'import time\n'), ((13032, 13045), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (13042, 13045), False, 'import time\n'), ((13149, 13164), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (13159, 13164), False, 'import time\n'), ((15654, 15677), 'numpy.linspace', 'np.linspace', (['(1)', '(3.9)', '(30)'], {}), '(1, 3.9, 30)\n', (15665, 15677), True, 'import numpy as np\n'), ((15818, 15846), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (15828, 15846), True, 'import matplotlib.pyplot as plt\n'), ((15928, 15974), 'matplotlib.pyplot.title', 'plt.title', (['"""Temperature values and thresholds"""'], {}), "('Temperature values and thresholds')\n", (15937, 15974), True, 'import matplotlib.pyplot as plt\n'), ((15979, 16007), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Lever position"""'], {}), "('Lever position')\n", (15989, 16007), True, 'import matplotlib.pyplot as plt\n'), ((16012, 16042), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GtCO2 per capita"""'], {}), "('GtCO2 per capita')\n", (16022, 16042), True, 'import matplotlib.pyplot as plt\n'), ((16047, 16092), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(3000)', 'color': '"""b"""', 'linestyle': '"""-"""'}), "(y=3000, color='b', linestyle='-')\n", (16058, 16092), True, 'import matplotlib.pyplot as plt\n'), ((16115, 16162), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'lower', 'color': '"""g"""', 'linestyle': '"""--"""'}), "(y=lower, color='g', linestyle='--')\n", (16126, 16162), True, 'import matplotlib.pyplot as plt\n'), ((16167, 16214), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'upper', 'color': '"""g"""', 'linestyle': '"""--"""'}), "(y=upper, color='g', linestyle='--')\n", (16178, 16214), True, 'import matplotlib.pyplot as plt\n'), ((16219, 16241), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[2250, 3750]'], {}), '([2250, 3750])\n', (16227, 16241), True, 'import matplotlib.pyplot as plt\n'), ((16247, 16275), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (16257, 16275), True, 'import matplotlib.pyplot as plt\n'), ((16295, 16312), 'numpy.zeros', 'np.zeros', (['(48, 2)'], {}), '((48, 2))\n', (16303, 16312), True, 'import numpy as np\n'), ((16923, 16951), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (16933, 16951), True, 'import matplotlib.pyplot as plt\n'), ((17178, 17236), 'matplotlib.pyplot.title', 'plt.title', (['"""Lever ranges that meet temperature thresholds"""'], {}), "('Lever ranges that meet temperature thresholds')\n", (17187, 17236), True, 'import matplotlib.pyplot as plt\n'), ((17241, 17266), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value range"""'], {}), "('Value range')\n", (17251, 17266), True, 'import matplotlib.pyplot as plt\n'), ((17271, 17290), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Lever"""'], {}), "('Lever')\n", (17281, 17290), True, 'import matplotlib.pyplot as plt\n'), ((18895, 18949), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['Lever', 'Marginal cost']"}), "(data, columns=['Lever', 'Marginal cost'])\n", (18907, 18949), True, 'import pandas as pd\n'), ((19512, 19540), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (19522, 19540), True, 'import matplotlib.pyplot as plt\n'), ((19548, 19600), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)', 'horizontalalignment': '"""right"""'}), "(rotation=45, horizontalalignment='right')\n", (19558, 19600), True, 'import matplotlib.pyplot as plt\n'), ((19605, 19654), 'matplotlib.pyplot.bar', 'plt.bar', (['costs_df.iloc[:, 0]', 'costs_df.iloc[:, 1]'], {}), '(costs_df.iloc[:, 0], costs_df.iloc[:, 1])\n', (19612, 19654), True, 'import matplotlib.pyplot as plt\n'), ((19659, 19694), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Delta$GDP decrease"""'], {}), "('$\\\\Delta$GDP decrease')\n", (19669, 19694), True, 'import matplotlib.pyplot as plt\n'), ((19698, 19813), 'matplotlib.pyplot.title', 'plt.title', (['"""∆GDP decrease with respect to TIAM-UCL 2DS benchmark pathway – Moving each lever from 2 to 3"""'], {}), "(\n '∆GDP decrease with respect to TIAM-UCL 2DS benchmark pathway – Moving each lever from 2 to 3'\n )\n", (19707, 19813), True, 'import matplotlib.pyplot as plt\n'), ((21680, 21735), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['"""MCMC_output_1.xlsx"""'], {'engine': '"""openpyxl"""'}), "('MCMC_output_1.xlsx', engine='openpyxl')\n", (21694, 21735), True, 'import pandas as pd\n'), ((21772, 21807), 'openpyxl.load_workbook', 'load_workbook', (['"""MCMC_output_1.xlsx"""'], {}), "('MCMC_output_1.xlsx')\n", (21785, 21807), False, 'from openpyxl import load_workbook\n'), ((21937, 21972), 'pandas.read_excel', 'pd.read_excel', (['"""MCMC_output_1.xlsx"""'], {}), "('MCMC_output_1.xlsx')\n", (21950, 21972), True, 'import pandas as pd\n'), ((22383, 22438), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['"""MCMC_output_2.xlsx"""'], {'engine': '"""openpyxl"""'}), "('MCMC_output_2.xlsx', engine='openpyxl')\n", (22397, 22438), True, 'import pandas as pd\n'), ((22475, 22510), 'openpyxl.load_workbook', 'load_workbook', (['"""MCMC_output_2.xlsx"""'], {}), "('MCMC_output_2.xlsx')\n", (22488, 22510), False, 'from openpyxl import load_workbook\n'), ((22640, 22675), 'pandas.read_excel', 'pd.read_excel', (['"""MCMC_output_2.xlsx"""'], {}), "('MCMC_output_2.xlsx')\n", (22653, 22675), True, 'import pandas as pd\n'), ((26997, 27013), 'time.sleep', 'time.sleep', (['(0.25)'], {}), '(0.25)\n', (27007, 27013), False, 'import time\n'), ((8095, 8110), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (8105, 8110), False, 'import time\n'), ((8609, 8622), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8619, 8622), False, 'import time\n'), ((13649, 13662), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (13659, 13662), False, 'import csv\n'), ((15291, 15314), 'numpy.linspace', 'np.linspace', (['(1)', '(3.9)', '(30)'], {}), '(1, 3.9, 30)\n', (15302, 15314), True, 'import numpy as np\n'), ((15881, 15923), 'matplotlib.pyplot.plot', 'plt.plot', (['x_lever', 'all_sensitivities[:, i]'], {}), '(x_lever, all_sensitivities[:, i])\n', (15889, 15923), True, 'import matplotlib.pyplot as plt\n'), ((16741, 16760), 'matplotlib.pyplot.plot', 'plt.plot', (['pos', 'temp'], {}), '(pos, temp)\n', (16749, 16760), True, 'import matplotlib.pyplot as plt\n'), ((16769, 16818), 'matplotlib.pyplot.title', 'plt.title', (['"""Temperature values within thresholds"""'], {}), "('Temperature values within thresholds')\n", (16778, 16818), True, 'import matplotlib.pyplot as plt\n'), ((16827, 16855), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Lever position"""'], {}), "('Lever position')\n", (16837, 16855), True, 'import matplotlib.pyplot as plt\n'), ((16864, 16894), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GtCO2 per capita"""'], {}), "('GtCO2 per capita')\n", (16874, 16894), True, 'import matplotlib.pyplot as plt\n'), ((17555, 17577), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (17569, 17577), False, 'import random\n'), ((20334, 20356), 'random.randint', 'random.randint', (['(10)', '(39)'], {}), '(10, 39)\n', (20348, 20356), False, 'import random\n'), ((21573, 21597), 'numpy.array', 'np.array', (['posterior[:-1]'], {}), '(posterior[:-1])\n', (21581, 21597), True, 'import numpy as np\n'), ((22155, 22180), 'numpy.array', 'np.array', (['accepted_inputs'], {}), '(accepted_inputs)\n', (22163, 22180), True, 'import numpy as np\n'), ((22245, 22270), 'numpy.array', 'np.array', (['accepted_values'], {}), '(accepted_values)\n', (22253, 22270), True, 'import numpy as np\n'), ((22290, 22315), 'numpy.array', 'np.array', (['accepted_values'], {}), '(accepted_values)\n', (22298, 22315), True, 'import numpy as np\n'), ((23289, 23307), 'math.trunc', 'math.trunc', (['target'], {}), '(target)\n', (23299, 23307), False, 'import math\n'), ((23310, 23329), 'math.trunc', 'math.trunc', (['current'], {}), '(current)\n', (23320, 23329), False, 'import math\n'), ((23492, 23511), 'math.trunc', 'math.trunc', (['current'], {}), '(current)\n', (23502, 23511), False, 'import math\n'), ((730, 751), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (749, 751), False, 'from webdriver_manager.chrome import ChromeDriverManager\n'), ((4870, 4886), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4884, 4886), True, 'import numpy as np\n'), ((7561, 7574), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (7571, 7574), False, 'import time\n'), ((7765, 7780), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (7775, 7780), False, 'import time\n'), ((9858, 9873), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (9868, 9873), False, 'import time\n'), ((12130, 12152), 'random.randint', 'random.randint', (['(18)', '(32)'], {}), '(18, 32)\n', (12144, 12152), False, 'import random\n'), ((17011, 17038), 'numpy.linspace', 'np.linspace', (['i[0]', 'i[1]', '(10)'], {}), '(i[0], i[1], 10)\n', (17022, 17038), True, 'import numpy as np\n'), ((17040, 17069), 'numpy.linspace', 'np.linspace', (['count', 'count', '(10)'], {}), '(count, count, 10)\n', (17051, 17069), True, 'import numpy as np\n'), ((17109, 17122), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (17118, 17122), True, 'import numpy as np\n'), ((9337, 9352), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (9347, 9352), False, 'import time\n'), ((10021, 10034), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (10031, 10034), False, 'import time\n'), ((20717, 20739), 'random.randint', 'random.randint', (['(10)', '(39)'], {}), '(10, 39)\n', (20731, 20739), False, 'import random\n'), ((23718, 23737), 'math.trunc', 'math.trunc', (['current'], {}), '(current)\n', (23728, 23737), False, 'import math\n'), ((4214, 4259), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['mu_prior_mu', 'mu_prior_sd'], {}), '(mu_prior_mu, mu_prior_sd)\n', (4233, 4259), False, 'from scipy.stats import multivariate_normal\n'), ((4279, 4324), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['mu_prior_mu', 'mu_prior_sd'], {}), '(mu_prior_mu, mu_prior_sd)\n', (4298, 4324), False, 'from scipy.stats import multivariate_normal\n'), ((25106, 25125), 'math.trunc', 'math.trunc', (['current'], {}), '(current)\n', (25116, 25125), False, 'import math\n'), ((3934, 3992), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['mu_proposal', '[[1000000, 0], [0, 100]]'], {}), '(mu_proposal, [[1000000, 0], [0, 100]])\n', (3953, 3992), False, 'from scipy.stats import multivariate_normal\n'), ((4013, 4070), 'scipy.stats.multivariate_normal', 'multivariate_normal', (['mu_current', '[[1000000, 0], [0, 100]]'], {}), '(mu_current, [[1000000, 0], [0, 100]])\n', (4032, 4070), False, 'from scipy.stats import multivariate_normal\n'), ((24080, 24098), 'math.trunc', 'math.trunc', (['target'], {}), '(target)\n', (24090, 24098), False, 'import math\n'), ((24481, 24500), 'math.trunc', 'math.trunc', (['current'], {}), '(current)\n', (24491, 24500), False, 'import math\n'), ((25279, 25297), 'math.trunc', 'math.trunc', (['target'], {}), '(target)\n', (25289, 25297), False, 'import math\n'), ((25913, 25931), 'math.trunc', 'math.trunc', (['target'], {}), '(target)\n', (25923, 25931), False, 'import math\n'), ((23882, 23901), 'math.trunc', 'math.trunc', (['current'], {}), '(current)\n', (23892, 23901), False, 'import math\n'), ((26294, 26312), 'math.trunc', 'math.trunc', (['target'], {}), '(target)\n', (26304, 26312), False, 'import math\n'), ((26534, 26552), 'math.trunc', 'math.trunc', (['target'], {}), '(target)\n', (26544, 26552), False, 'import math\n'), ((23906, 23924), 'math.trunc', 'math.trunc', (['target'], {}), '(target)\n', (23916, 23924), False, 'import math\n'), ((26125, 26143), 'math.trunc', 'math.trunc', (['target'], {}), '(target)\n', (26135, 26143), False, 'import math\n'), ((26746, 26764), 'math.trunc', 'math.trunc', (['target'], {}), '(target)\n', (26756, 26764), False, 'import math\n')] |
import numpy as np
import matplotlib.pyplot as plt
import sympy as sp
def func(exp):
"""
Function to convert the expression to the Pythonic format to make mathematical calculations.
Parameters:
exp: input expression by the user to be lambdified
"""
x = sp.symbols('x')
return sp.utilities.lambdify(x, exp, "math")
def diffy(exp):
"""
Function to find the differential of an expression.
Parameters:
exp: input expression whose differential is calculated
"""
x = sp.symbols('x')
return sp.diff(exp, x)
def newton_raphson(expr, guess, tol, roots):
"""
Function to find the root of an expression using Newton-Raphson method.
Parameters:
expr: input expression for which the root is calculated
guess: initial guess as input by the user
tol: maximum permissible error between the calculated root and the true root
roots: an array to store the calculated roots through the iterations to be plotted later
"""
differ = diffy(expr)
diff_math = func(differ)
function = func(expr)
x_new = guess - function(guess)/diff_math(guess)
roots.append(x_new)
if abs(x_new - guess) < tol:
return roots
else:
return newton_raphson(expr, x_new, tol, roots)
def plot_func(expr, roots, guess):
"""
Function to plot the expression, the initial guess and all the calculated roots while highlighting the final correct root.
Parameters:
expr: the expression to be plotted
roots: array of roots calculated by Newton-Raphson method
guess: initial guess input by the user
"""
function = func(expr)
# Plotting the function by creating an array of Xs and Ys
x = np.linspace(np.floor(roots[-1]-5), np.ceil(roots[-1]+5), 50)
y = []
for i in x:
y.append(function(i))
# An array of zeros the same length as the number of roots in arrays, so as to plot the roots
roots_y = np.zeros(len(roots))
fig = plt.figure(figsize=(8,7))
ax1 = fig.add_axes([0.05, 0.05, 0.9, 0.9])
ax1.plot(x, y, label="Function: %s"% expr)
ax1.axhline(0, color='red', ls='--', alpha=0.5)
ax1.scatter(roots[:-1], roots_y[:-1], color='black', s=10, alpha=0.8, edgecolor='black', label="Roots")
ax1.scatter(guess, 0, color='red', s=15, label="Initial Guess: %s"% str(guess))
ax1.scatter(roots[-1], 0, color="green", s=15, label="Final Root: %s"% str(round(roots[-1], 3)))
ax1.set_title("Finding Roots: Newton Raphson Method")
ax1.legend()
plt.show()
# Sample input:
# expr = "x - tan(x)"
# init_guess = 4.6
# error = 0.0001
points = []
expr = input("Enter a continuous function in x: ")
init_guess = float(input("Enter an initial guess for the root: "))
error = float(input("Enter the maximum permissible error of the root: "))
answers = newton_raphson(expr, init_guess, error, points)
plot_func(expr, answers, init_guess)
print(f"The root of {expr} by Newton-Raphson method: {round(answers[-1], 3)}")
| [
"numpy.ceil",
"sympy.utilities.lambdify",
"numpy.floor",
"sympy.symbols",
"matplotlib.pyplot.figure",
"sympy.diff",
"matplotlib.pyplot.show"
] | [((266, 281), 'sympy.symbols', 'sp.symbols', (['"""x"""'], {}), "('x')\n", (276, 281), True, 'import sympy as sp\n'), ((290, 327), 'sympy.utilities.lambdify', 'sp.utilities.lambdify', (['x', 'exp', '"""math"""'], {}), "(x, exp, 'math')\n", (311, 327), True, 'import sympy as sp\n'), ((485, 500), 'sympy.symbols', 'sp.symbols', (['"""x"""'], {}), "('x')\n", (495, 500), True, 'import sympy as sp\n'), ((509, 524), 'sympy.diff', 'sp.diff', (['exp', 'x'], {}), '(exp, x)\n', (516, 524), True, 'import sympy as sp\n'), ((1845, 1871), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 7)'}), '(figsize=(8, 7))\n', (1855, 1871), True, 'import matplotlib.pyplot as plt\n'), ((2366, 2376), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2374, 2376), True, 'import matplotlib.pyplot as plt\n'), ((1614, 1637), 'numpy.floor', 'np.floor', (['(roots[-1] - 5)'], {}), '(roots[-1] - 5)\n', (1622, 1637), True, 'import numpy as np\n'), ((1637, 1659), 'numpy.ceil', 'np.ceil', (['(roots[-1] + 5)'], {}), '(roots[-1] + 5)\n', (1644, 1659), True, 'import numpy as np\n')] |
import OpenEXR
import Imath
import numpy as np
import time
import data.util_exr as exr_utils
import os
def _crop(img, pos, size):
ow, oh = img.shape[0], img.shape[1]
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
# return img.crop((x1, y1, x1 + tw, y1 + th)) #CHANGED
return img[x1:(x1 + tw), y1:(y1 + th), :]
return img
def get_distinct_prefix(dir_path):
names = set()
for f in os.listdir(dir_path):
if os.path.isfile(os.path.join(dir_path, f)):
names.add(f.split(".")[0].rsplit("-",1)[0])
return list(names)
# Divide variance by mean^2 to get relative variance
def CalcRelVar(data, var, calcLog, calcLum=True, calcMean=False):
if calcLum:
denom = np.expand_dims(CalcLuminance(data), axis=2)
elif calcMean:
denom = np.expand_dims(CalcMean(data), axis=2)
else:
denom = data
var = var / ((denom * denom) + 1.0e-5)
if calcLog:
var = LogTransform(var)
return var
# Calculate log transform (with an offset to map zero to zero)
def LogTransform(data):
assert(np.sum(data < 0) == 0)
return np.log(data + 1.0)
# Calculate luminance (3 channels in and 1 channel out)
def CalcLuminance(data):
return (0.2126*data[:,:,0] + 0.7152*data[:,:,1] + 0.0722*data[:,:,2])
# Calculate mean (3 channels in and 1 channel out)
def CalcMean(data):
return (0.3333*data[:,:,0] + 0.3333*data[:,:,1] + 0.3333*data[:,:,2])
# for shading
def loadDisneyEXR_feature_shading(path, FEATURE_LIST):
# time0 = time.time()
prefix = path.split(".")[0]
# color_path = prefix + "_color.exr"
variance_path = prefix + "_variance.exr"
normal_path = prefix + "_normal.exr"
depth_path = prefix + "_depth.exr"
texture_path = prefix + "_texture.exr"
visibility_path = prefix + "_visibility.exr"
diffuse_path = prefix + "_diffuse.exr"
specular_path = prefix + "_specular.exr"
# inFile = exr_utils.open(variance_path)
# variance = inFile.get_all()["default"]
if "normal" in FEATURE_LIST:
try:
inFile = exr_utils.open(normal_path)
normal = inFile.get_all()["default"]
normal = _crop(normal, (1,1), 128)
except Exception:
normal = np.zeros((128,128,3))
if "depth" in FEATURE_LIST:
try:
inFile = exr_utils.open(depth_path)
depth = inFile.get_all()["default"]
depth = _crop(depth, (1,1), 128)
except Exception:
depth = np.zeros((128,128,1))
# if "albedo" in FEATURE_LIST: //always load in albedo
try:
inFile = exr_utils.open(texture_path)
texture = inFile.get_all()["default"]
texture = _crop(texture, (1,1), 128)
except Exception:
texture = np.zeros((128,128,3))
if "visibility" in FEATURE_LIST:
try:
inFile = exr_utils.open(visibility_path)
visibility = inFile.get_all()["default"]
visibility = _crop(visibility, (1,1), 128)
except Exception:
visibility = np.zeros((128,128,1))
if "diffuse" in FEATURE_LIST:
try:
inFile = exr_utils.open(diffuse_path)
diffuse = inFile.get_all()["default"]
diffuse = _crop(diffuse, (1,1), 128)
except Exception:
diffuse = np.zeros((128,128,3))
if "specular" in FEATURE_LIST:
try:
inFile = exr_utils.open(specular_path)
specular = inFile.get_all()["default"]
specular = _crop(specular, (1,1), 128)
except Exception:
specular = np.zeros((128,128,3))
# variance = CalcRelVar( (1+ color.copy()) , variance, False, False, True )
if "diffuse" in FEATURE_LIST:
diffuse[diffuse < 0.0] = 0.0
diffuse = diffuse / (texture + 0.00316)
diffuse = LogTransform(diffuse)
color = diffuse
if "specular" in FEATURE_LIST:
specular[specular < 0.0] = 0.0
specular = LogTransform(specular)
color = specular
feature_tuple = ()
if "normal" in FEATURE_LIST:
normal = np.nan_to_num(normal)
if "specular" in FEATURE_LIST:
normal = (normal + 1.0)*0.5
normal = np.maximum(np.minimum(normal,1.0),0.0)
feature_tuple += (normal,)
if "depth" in FEATURE_LIST:
# Normalize current frame depth to [0,1]
maxDepth = np.max(depth)
if maxDepth != 0:
depth /= maxDepth
feature_tuple += (depth,)
if "albedo" in FEATURE_LIST:
# texture = np.clip(texture,0.0,1.0)
feature_tuple += (texture, )
if "visibility" in FEATURE_LIST:
feature_tuple += (visibility, )
if len(feature_tuple) == 0:
return color, np.zeros(color.shape)
feautres = np.concatenate(feature_tuple, axis=2) #
return color, feautres
def loadDisneyEXR_multi_ref_shading(path, FEATURE_LIST):
# time0 = time.time()
prefix = path.split(".")[0]
color_path = prefix + "_color.exr"
diffuse_path = prefix + "_diffuse.exr"
specular_path = prefix + "_specular.exr"
texture_path = prefix + "_texture.exr"
if "diffuse" in FEATURE_LIST:
try:
inFile = exr_utils.open(diffuse_path)
diffuse = inFile.get_all()["default"]
diffuse = _crop(diffuse, (1,1), 128)
except Exception:
diffuse = np.zeros((128,128,3))
if "specular" in FEATURE_LIST:
try:
inFile = exr_utils.open(specular_path)
specular = inFile.get_all()["default"]
specular = _crop(specular, (1,1), 128)
except Exception:
specular = np.zeros((128,128,3))
try:
inFile = exr_utils.open(texture_path)
texture = inFile.get_all()["default"]
texture = _crop(texture, (1,1), 128)
except Exception:
texture = np.zeros((128,128,3))
if "diffuse" in FEATURE_LIST:
diffuse[diffuse < 0.0] = 0.0
diffuse = diffuse / (texture + 0.00316)
diffuse = LogTransform(diffuse)
color = diffuse
if "specular" in FEATURE_LIST:
specular[specular < 0.0] = 0.0
specular = LogTransform(specular)
color = specular
return color
def loadDisneyEXR_ref(path):
inFile = exr_utils.open(path)
data = inFile.get_all()["default"]
data = LogTransform(data)
return data
# def loadDisneyEXR_feature_from_whole(path, channel=3):
# image = OpenEXR.InputFile(path)
# dataWindow = image.header()['dataWindow']
# size = (dataWindow.max.x - dataWindow.min.x + 1, dataWindow.max.y - dataWindow.min.y + 1)
# FLOAT = Imath.PixelType(Imath.PixelType.FLOAT)
# channel_to_extract = ["B","G","R",'colorVariance.Z','normal.B',"normal.G","normal.R",'depth.Z','albedo.B',"albedo.G","albedo.R",'visibility.Z']
# time0 = time.time()
# data = np.array([np.fromstring(image.channel(c, FLOAT), dtype=np.float32) for c in channel_to_extract])
# data = np.moveaxis(data, 0, -1)
# data = data.reshape(size[1], size[0], -1)
# time1 = time.time()
# color = data[:,:,:3]
# variance = data[:,:,3:4]
# normal = data[:,:,4:7]
# depth = data[:,:, 7:8]
# texture = data[:,:, 8:11]
# visibility = data[:,:, 11:12]
# time2 = time.time()
# variance = CalcRelVar( (1+ color.copy()) , variance, False, False, True )
# color = LogTransform(color)
# normal = (normal + 1.0)*0.5
# # Normalize current frame depth to [0,1]
# maxDepth = np.max(depth)
# if maxDepth != 0:
# depth /= maxDepth
# features = np.concatenate((variance,normal,depth,texture,visibility), axis=2)
# time3 = time.time()
# print("time 0 =%f, time1 = %f, time2 = %f " %(time1-time0, time2-time1,time3-time2))
# return color, features
# def loadDisneyEXR_feature(path, FEATURE_LIST):
# # time0 = time.time()
# prefix = path.split(".")[0]
# color_path = prefix + "_color.exr"
# variance_path = prefix + "_variance.exr"
# normal_path = prefix + "_normal.exr"
# depth_path = prefix + "_depth.exr"
# texture_path = prefix + "_texture.exr"
# # visibility_path = prefix + "_visibility.exr"
# diffuse_path = prefix + "_diffuse.exr"
# specular_path = prefix + "_specular.exr"
# try:
# inFile = exr_utils.open(color_path)
# color = inFile.get_all()["default"]
# color = _crop(color, (1,1), 128)
# except Exception:
# color = np.zeros((128,128,3))
# # inFile = exr_utils.open(variance_path)
# # variance = inFile.get_all()["default"]
# try:
# inFile = exr_utils.open(normal_path)
# normal = inFile.get_all()["default"]
# normal = _crop(normal, (1,1), 128)
# except Exception:
# normal = np.zeros((128,128,3))
# try:
# inFile = exr_utils.open(depth_path)
# depth = inFile.get_all()["default"]
# depth = _crop(depth, (1,1), 128)
# except Exception:
# depth = np.zeros((128,128,1))
# try:
# inFile = exr_utils.open(texture_path)
# texture = inFile.get_all()["default"]
# texture = _crop(texture, (1,1), 128)
# except Exception:
# texture = np.zeros((128,128,3))
# # try:
# # inFile = exr_utils.open(visibility_path)
# # visibility = inFile.get_all()["default"]
# # visibility = _crop(visibility
# # , (1,1), 128)
# # except Exception:
# # visibility = np.zeros((128,128,1))
# try:
# inFile = exr_utils.open(diffuse_path)
# diffuse = inFile.get_all()["default"]
# diffuse = _crop(diffuse, (1,1), 128)
# except Exception:
# diffuse = np.zeros((128,128,3))
# try:
# inFile = exr_utils.open(specular_path)
# specular = inFile.get_all()["default"]
# specular = _crop(specular, (1,1), 128)
# except Exception:
# specular = np.zeros((128,128,3))
# # variance = CalcRelVar( (1+ color.copy()) , variance, False, False, True )
# color[color < 0.0] = 0.0
# color = LogTransform(color)
# diffuse[diffuse < 0.0] = 0.0
# diffuse = LogTransform(diffuse)
# specular[specular < 0.0] = 0.0
# specular = LogTransform(specular)
# normal = np.nan_to_num(normal)
# normal = (normal + 1.0)*0.5
# normal = np.maximum(np.minimum(normal,1.0),0.0)
# # Normalize current frame depth to [0,1]
# maxDepth = np.max(depth)
# if maxDepth != 0:
# depth /= maxDepth
# # texture = np.clip(texture,0.0,1.0)
# # feautres = np.concatenate((variance, normal, depth, texture, visibility), axis=2)
# feautres = np.concatenate((normal, depth, texture), axis=2) #visibility
# return color, diffuse, specular, feautres
# # return np.concatenate((color, normal, depth, texture), axis=2)
# def loadDisneyEXR_multi_ref(path, FEATURE_LIST):
# # time0 = time.time()
# prefix = path.split(".")[0]
# color_path = prefix + "_color.exr"
# diffuse_path = prefix + "_diffuse.exr"
# specular_path = prefix + "_specular.exr"
# try:
# inFile = exr_utils.open(color_path)
# color = inFile.get_all()["default"]
# color = _crop(color, (1,1), 128)
# except Exception:
# color = np.zeros((128,128,3))
# try:
# inFile = exr_utils.open(diffuse_path)
# diffuse = inFile.get_all()["default"]
# diffuse = _crop(diffuse, (1,1), 128)
# except Exception:
# diffuse = np.zeros((128,128,3))
# try:
# inFile = exr_utils.open(specular_path)
# specular = inFile.get_all()["default"]
# specular = _crop(specular, (1,1), 128)
# except Exception:
# specular = np.zeros((128,128,3))
# color[color<0.0] = 0.0
# color = LogTransform(color)
# diffuse[diffuse < 0.0] = 0.0
# diffuse = LogTransform(diffuse)
# specular[specular < 0.0] = 0.0
# specular = LogTransform(specular)
# return color, diffuse, specular
| [
"os.listdir",
"numpy.minimum",
"numpy.log",
"os.path.join",
"numpy.max",
"numpy.sum",
"numpy.zeros",
"numpy.concatenate",
"data.util_exr.open",
"numpy.nan_to_num"
] | [((442, 462), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (452, 462), False, 'import os\n'), ((1134, 1152), 'numpy.log', 'np.log', (['(data + 1.0)'], {}), '(data + 1.0)\n', (1140, 1152), True, 'import numpy as np\n'), ((4340, 4377), 'numpy.concatenate', 'np.concatenate', (['feature_tuple'], {'axis': '(2)'}), '(feature_tuple, axis=2)\n', (4354, 4377), True, 'import numpy as np\n'), ((5636, 5656), 'data.util_exr.open', 'exr_utils.open', (['path'], {}), '(path)\n', (5650, 5656), True, 'import data.util_exr as exr_utils\n'), ((1100, 1116), 'numpy.sum', 'np.sum', (['(data < 0)'], {}), '(data < 0)\n', (1106, 1116), True, 'import numpy as np\n'), ((2487, 2515), 'data.util_exr.open', 'exr_utils.open', (['texture_path'], {}), '(texture_path)\n', (2501, 2515), True, 'import data.util_exr as exr_utils\n'), ((3753, 3774), 'numpy.nan_to_num', 'np.nan_to_num', (['normal'], {}), '(normal)\n', (3766, 3774), True, 'import numpy as np\n'), ((4005, 4018), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (4011, 4018), True, 'import numpy as np\n'), ((5142, 5170), 'data.util_exr.open', 'exr_utils.open', (['texture_path'], {}), '(texture_path)\n', (5156, 5170), True, 'import data.util_exr as exr_utils\n'), ((490, 515), 'os.path.join', 'os.path.join', (['dir_path', 'f'], {}), '(dir_path, f)\n', (502, 515), False, 'import os\n'), ((2042, 2069), 'data.util_exr.open', 'exr_utils.open', (['normal_path'], {}), '(normal_path)\n', (2056, 2069), True, 'import data.util_exr as exr_utils\n'), ((2255, 2281), 'data.util_exr.open', 'exr_utils.open', (['depth_path'], {}), '(depth_path)\n', (2269, 2281), True, 'import data.util_exr as exr_utils\n'), ((2626, 2649), 'numpy.zeros', 'np.zeros', (['(128, 128, 3)'], {}), '((128, 128, 3))\n', (2634, 2649), True, 'import numpy as np\n'), ((2706, 2737), 'data.util_exr.open', 'exr_utils.open', (['visibility_path'], {}), '(visibility_path)\n', (2720, 2737), True, 'import data.util_exr as exr_utils\n'), ((2942, 2970), 'data.util_exr.open', 'exr_utils.open', (['diffuse_path'], {}), '(diffuse_path)\n', (2956, 2970), True, 'import data.util_exr as exr_utils\n'), ((3164, 3193), 'data.util_exr.open', 'exr_utils.open', (['specular_path'], {}), '(specular_path)\n', (3178, 3193), True, 'import data.util_exr as exr_utils\n'), ((4306, 4327), 'numpy.zeros', 'np.zeros', (['color.shape'], {}), '(color.shape)\n', (4314, 4327), True, 'import numpy as np\n'), ((4732, 4760), 'data.util_exr.open', 'exr_utils.open', (['diffuse_path'], {}), '(diffuse_path)\n', (4746, 4760), True, 'import data.util_exr as exr_utils\n'), ((4953, 4982), 'data.util_exr.open', 'exr_utils.open', (['specular_path'], {}), '(specular_path)\n', (4967, 4982), True, 'import data.util_exr as exr_utils\n'), ((5281, 5304), 'numpy.zeros', 'np.zeros', (['(128, 128, 3)'], {}), '((128, 128, 3))\n', (5289, 5304), True, 'import numpy as np\n'), ((2180, 2203), 'numpy.zeros', 'np.zeros', (['(128, 128, 3)'], {}), '((128, 128, 3))\n', (2188, 2203), True, 'import numpy as np\n'), ((2388, 2411), 'numpy.zeros', 'np.zeros', (['(128, 128, 1)'], {}), '((128, 128, 1))\n', (2396, 2411), True, 'import numpy as np\n'), ((2864, 2887), 'numpy.zeros', 'np.zeros', (['(128, 128, 1)'], {}), '((128, 128, 1))\n', (2872, 2887), True, 'import numpy as np\n'), ((3085, 3108), 'numpy.zeros', 'np.zeros', (['(128, 128, 3)'], {}), '((128, 128, 3))\n', (3093, 3108), True, 'import numpy as np\n'), ((3312, 3335), 'numpy.zeros', 'np.zeros', (['(128, 128, 3)'], {}), '((128, 128, 3))\n', (3320, 3335), True, 'import numpy as np\n'), ((3862, 3885), 'numpy.minimum', 'np.minimum', (['normal', '(1.0)'], {}), '(normal, 1.0)\n', (3872, 3885), True, 'import numpy as np\n'), ((4875, 4898), 'numpy.zeros', 'np.zeros', (['(128, 128, 3)'], {}), '((128, 128, 3))\n', (4883, 4898), True, 'import numpy as np\n'), ((5101, 5124), 'numpy.zeros', 'np.zeros', (['(128, 128, 3)'], {}), '((128, 128, 3))\n', (5109, 5124), True, 'import numpy as np\n')] |
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
import tensorflow.contrib.mpi_collectives as mpi
from tensorflow.python.platform import test
average_allreduce = False
max_wrong_count = -1
class AllreduceTest(test.TestCase):
def dumpFailure(self, my_rank, out_loc_red, my_correct, out_all_red,
our_correct):
# Find reduced/allreduced indices that are wrong and print all the
# values from output, slices, reduced, allreduced, so we can debug
# which is incorrect:
wrong_count = 0
red_dims = out_loc_red.shape
assert(len(red_dims) == 2)
for i in range(red_dims[0]):
for j in range(red_dims[1]):
suffix = ""
if out_loc_red[i][j] != my_correct[i][j] or \
out_all_red[i][j] != our_correct[i][j]:
suffix = "WRONG"
wrong_count += 1
print("{}\t{}\t{}\t{}\t{}\t{}"
.format(my_rank, i, j, out_loc_red[i][j],
out_all_red[i][j], suffix), flush=True)
if max_wrong_count > 0 and wrong_count >= max_wrong_count:
return
def test_mpi_allreduce(self):
# Get MPI rank
my_rank = int(os.environ['PMI_RANK'])
num_ranks = int(os.environ['PMI_SIZE'])
stages = 13
batch_size = 1331
hidden_size = batch_size
out_size = batch_size
# Input placeholder (batch_size x hidden) - init to 1s
inputs = tf.placeholder(tf.float32, shape=(batch_size, hidden_size),
name="Input")
# Large matrices (hidden x out_dim) - init random
weights = []
for i in range(stages):
initer = tf.constant_initializer(pow(2.0, i + 1.0))
weights.append(tf.get_variable("weights_{}".format(i),
shape=(hidden_size, out_size),
dtype=tf.float32,
initializer=initer))
# Calculate output through dependent allreduces
stage_input = inputs
for i in range(stages):
inter_output = tf.add(stage_input, weights[i],
name="add_red_{}".format(i))
stage_input = mpi.allreduce(inter_output,
average=average_allreduce)
all_reduced = stage_input
# Local reduced output for verification
local_input = inputs
for i in range(stages):
inter_output = tf.add(local_input, weights[i],
name="addin_loc_{}".format(i))
my_reducer = tf.Variable(initial_value=np.ones((hidden_size, out_size)),
dtype=tf.float32, name="loc_redr_{}".format(i))
for r in range(num_ranks):
my_reducer = tf.add(my_reducer, inter_output,
name="add_loc_{}_{}".format(i, r))
if average_allreduce:
local_input = tf.div(my_reducer, num_ranks,
name="div_loc_{}".format(i))
else:
local_input = my_reducer
local_reduced = local_input
# NOTE: This assumes that device IDs are numbered the same as ranks
gpu_options = tf.GPUOptions(visible_device_list=str(my_rank))
config = tf.ConfigProto(gpu_options=gpu_options)
# MPI Session to test allreduce
with mpi.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
input_feed = np.ones((batch_size, hidden_size), dtype=np.float32)
our_output = input_feed[0][0]
spread_var = 100
input_feed = input_feed + my_rank * spread_var
my_output = input_feed[0][0]
for i in range(stages):
curr_feed = my_output + pow(2.0, i + 1.0)
my_output = curr_feed * num_ranks + 1
curr_our_feed = our_output + pow(2.0, i + 1.0)
if i == 0:
sum_ranks = num_ranks * (num_ranks - 1) / 2
our_output = curr_our_feed * num_ranks + \
spread_var * sum_ranks
else:
our_output = curr_our_feed * num_ranks
print("rank {}: My output is {}".format(my_rank, my_output))
my_correct = np.zeros((batch_size, hidden_size), dtype=np.float32)
my_correct = my_correct + my_output
print("rank {}: Our output is {}".format(my_rank, our_output))
our_correct = np.zeros((batch_size, hidden_size), dtype=np.float32)
our_correct = our_correct + our_output
for i in range(1000):
if i % 100 == 0:
print("{}: iter {}".format(my_rank, i), flush=True)
feed_dict = {inputs: input_feed}
out_all_red, out_loc_red \
= sess.run([all_reduced, local_reduced],
feed_dict=feed_dict)
if not np.allclose(out_loc_red, my_correct) or \
not np.allclose(out_all_red, our_correct):
print("Test incorrect on iter {}".format(i), flush=True)
self.dumpFailure(my_rank, out_loc_red, my_correct, out_all_red,
our_correct)
assert(np.allclose(out_loc_red, my_correct) and
np.allclose(out_all_red, our_correct))
if __name__ == '__main__':
test.main()
| [
"numpy.allclose",
"numpy.ones",
"tensorflow.placeholder",
"tensorflow.contrib.mpi_collectives.Session",
"tensorflow.global_variables_initializer",
"tensorflow.contrib.mpi_collectives.allreduce",
"numpy.zeros",
"tensorflow.ConfigProto",
"tensorflow.python.platform.test.main"
] | [((5055, 5066), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (5064, 5066), False, 'from tensorflow.python.platform import test\n'), ((1420, 1493), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, hidden_size)', 'name': '"""Input"""'}), "(tf.float32, shape=(batch_size, hidden_size), name='Input')\n", (1434, 1493), True, 'import tensorflow as tf\n'), ((3164, 3203), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (3178, 3203), True, 'import tensorflow as tf\n'), ((2158, 2212), 'tensorflow.contrib.mpi_collectives.allreduce', 'mpi.allreduce', (['inter_output'], {'average': 'average_allreduce'}), '(inter_output, average=average_allreduce)\n', (2171, 2212), True, 'import tensorflow.contrib.mpi_collectives as mpi\n'), ((3250, 3276), 'tensorflow.contrib.mpi_collectives.Session', 'mpi.Session', ([], {'config': 'config'}), '(config=config)\n', (3261, 3276), True, 'import tensorflow.contrib.mpi_collectives as mpi\n'), ((3356, 3408), 'numpy.ones', 'np.ones', (['(batch_size, hidden_size)'], {'dtype': 'np.float32'}), '((batch_size, hidden_size), dtype=np.float32)\n', (3363, 3408), True, 'import numpy as np\n'), ((4048, 4101), 'numpy.zeros', 'np.zeros', (['(batch_size, hidden_size)'], {'dtype': 'np.float32'}), '((batch_size, hidden_size), dtype=np.float32)\n', (4056, 4101), True, 'import numpy as np\n'), ((4233, 4286), 'numpy.zeros', 'np.zeros', (['(batch_size, hidden_size)'], {'dtype': 'np.float32'}), '((batch_size, hidden_size), dtype=np.float32)\n', (4241, 4286), True, 'import numpy as np\n'), ((3301, 3334), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3332, 3334), True, 'import tensorflow as tf\n'), ((2533, 2565), 'numpy.ones', 'np.ones', (['(hidden_size, out_size)'], {}), '((hidden_size, out_size))\n', (2540, 2565), True, 'import numpy as np\n'), ((4633, 4669), 'numpy.allclose', 'np.allclose', (['out_loc_red', 'my_correct'], {}), '(out_loc_red, my_correct)\n', (4644, 4669), True, 'import numpy as np\n'), ((4690, 4727), 'numpy.allclose', 'np.allclose', (['out_all_red', 'our_correct'], {}), '(out_all_red, our_correct)\n', (4701, 4727), True, 'import numpy as np\n'), ((4927, 4963), 'numpy.allclose', 'np.allclose', (['out_loc_red', 'my_correct'], {}), '(out_loc_red, my_correct)\n', (4938, 4963), True, 'import numpy as np\n'), ((4985, 5022), 'numpy.allclose', 'np.allclose', (['out_all_red', 'our_correct'], {}), '(out_all_red, our_correct)\n', (4996, 5022), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Validate the number of CCDs in each test region against DR7.
"""
import os, pdb
import numpy as np
from astrometry.util.fits import fits_table
def main():
print()
print('Region DR7 DR8-DECam (no cuts) DR8-DECam (after cuts)')
for region in ('dr8-test-overlap', 'dr8-test-s82', 'dr8-test-hsc-sgc', 'dr8-test-hsc-ngc',
'dr8-test-edr', 'dr8-test-hsc-north', 'dr8-test-deep2-egs'):
if os.path.isfile('dr8b/ccds-{}-decam.fits'.format(region)):
dr8_decam = fits_table('dr8b/ccds-{}-decam.fits'.format(region))
dr8_decam_cuts = np.sum(dr8_decam.ccd_cuts == 0)
else:
dr8_decam = []
dr8_decam_cuts = 0
if os.path.isfile('dr7/ccds-{}.fits'.format(region)):
dr7 = fits_table('dr7/ccds-{}.fits'.format(region))
dr7_cuts = np.sum(dr7.ccd_cuts == 0)
else:
dr7 = []
dr7_cuts = 0
print('{:18} {:6d} {:6d} {:6d}'.format(region, len(dr7), len(dr8_decam), dr8_decam_cuts))
print()
print('Region DR6 DR8-90prime/mosaic (no cuts) DR8-90prime/mosaic (after cuts) ')
for region in ('dr8-test-overlap', 'dr8-test-s82', 'dr8-test-hsc-sgc', 'dr8-test-hsc-ngc',
'dr8-test-edr', 'dr8-test-hsc-north', 'dr8-test-deep2-egs'):
if os.path.isfile('dr8b/ccds-{}-90prime-mosaic.fits'.format(region)):
dr8_mosaic = fits_table('dr8b/ccds-{}-90prime-mosaic.fits'.format(region))
dr8_mosaic_cuts = np.sum(dr8_mosaic.ccd_cuts == 0)
else:
dr8_mosaic = []
dr8_mosaic_cuts = 0
if os.path.isfile('dr6/ccds-{}.fits'.format(region)):
dr6 = fits_table('dr6/ccds-{}.fits'.format(region))
dr6_cuts = np.sum(dr6.ccd_cuts == 0)
else:
dr6 = []
dr6_cuts - 0
print('{:18} {:6d} {:6d} {:6d}'.format(region, len(dr6), len(dr8_mosaic), dr8_mosaic_cuts))
print()
if __name__ == '__main__':
main()
| [
"numpy.sum"
] | [((614, 645), 'numpy.sum', 'np.sum', (['(dr8_decam.ccd_cuts == 0)'], {}), '(dr8_decam.ccd_cuts == 0)\n', (620, 645), True, 'import numpy as np\n'), ((880, 905), 'numpy.sum', 'np.sum', (['(dr7.ccd_cuts == 0)'], {}), '(dr7.ccd_cuts == 0)\n', (886, 905), True, 'import numpy as np\n'), ((1534, 1566), 'numpy.sum', 'np.sum', (['(dr8_mosaic.ccd_cuts == 0)'], {}), '(dr8_mosaic.ccd_cuts == 0)\n', (1540, 1566), True, 'import numpy as np\n'), ((1799, 1824), 'numpy.sum', 'np.sum', (['(dr6.ccd_cuts == 0)'], {}), '(dr6.ccd_cuts == 0)\n', (1805, 1824), True, 'import numpy as np\n')] |
# ---------------------- OFF THE SHELF IMPORTED PACKAGES -----------------------
import numpy as np
import itertools
from sortedcontainers import SortedSet, SortedList
import warnings
import sys
import operator
import jinja2
import os
from jinja2 import Template
from pdflatex import PDFLaTeX
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib
import json
# -------------------------- CUSTOM IMPORTED PACKAGES --------------------------
from steel_beam_analysis import units, vGetLoc, vGetBaseUnits, vGetMag
from steel_beam_analysis.load import AreaLoad, LineLoad, LoadCombo, PointLoad
from steel_beam_analysis.element import Element
from steel_beam_analysis.node import Node
from steel_beam_analysis.span import Span
from steel_beam_analysis.unbracedSpan import UnbracedSpan
import steel_beam_analysis.stringFixer as sf
# config jinja templating environment
latex_jinja_env = jinja2.Environment(block_start_string =
'\BLOCK{', block_end_string = '}', variable_start_string = '\VAR{', variable_end_string = '}',
comment_start_string = '\#{', comment_end_string = '}', line_statement_prefix = '%%',
line_comment_prefix = '%#', trim_blocks = True, autoescape = False,
loader = jinja2.FileSystemLoader(os.path.abspath('.')))
latex_jinja_env.globals['len'] = len
latex_jinja_env.globals['str'] = str
# config matplotlib settings
matplotlib.use('pgf')
matplotlib.rcParams.update({'pgf.texsystem': 'pdflatex',
'font.family': 'serif', 'text.usetex': True, 'pgf.rcfonts': False})
class Beam:
"""Model a beam composed of nodes, elements, material props and loads."""
def __init__(self, nodes, **kwargs):
# a
self.A = None
self.ASCE7version = kwargs.get('ASCE7version', 16)
self.avgNodeSpacing = None
# b
self.bendingAxis = kwargs.get('bendingAxis', 'strong')
self.bendingCheck = None
# c
self.considerSelfWeight = kwargs.get('considerSelfWeight', True)
# d
self.deflChecks = []
self.deflCombos = set()
self.deflLimGlass = kwargs.get('deflLimGlass', 0.25 * units.inch)
self.deflRatios = kwargs.get('deflRatios', {'TL': 240, 'LL': 360})
self.depthClass = kwargs.get('depthClass', 10)
# e
self.elements = SortedSet([])
self.eleSpacing = kwargs.get('eleSpacing', 1 * units.inch)
# f
self.F = {}
self.F0 = {}
self.F0Body = {}
self.F0Node = {}
self.FF = {}
self.freeDOFs = []
# g
self.glassEverywhere = kwargs.get('glassEverywhere', False)
# i
self.I = None
# k
self.K = None
self.KFF = None
# l
self.Lcombos = set()
self.len = None
self.loadTypes = SortedSet([])
self.loadTypesSub = None
# m
self.M = {}
self.maxMomentNode = None
self.maxShearNode = None
self.maxDeflNodes = {}
# n
self._nodes = SortedSet([])
# o
self.omega0 = kwargs.get('omega0', 2.5)
self.outUnit = kwargs.get('outUnit', {'M': 'kft', 'V': 'kip', 'defl': 'inch', 'loc': 'ft'})
self.outputPDF = kwargs.get('outputPDF', False)
self.overallCheck = None
# p
self.patternLoads = kwargs.get('patternLoads', ['L', 'Lr'])
self.pointLoads = []
self.projectInfo_memberName = kwargs.get('name', 'demo')
self.projectInfo_project = kwargs.get('project', '123 Maple Street, San Francisco CA')
self.projectInfo_level = kwargs.get('level', 2)
self.projectInfo_firm = kwargs.get('firm', 'ABC Company')
self.projectInfo_engineer = kwargs.get('engineer', 'Jesse')
self.projectInfo_checker = kwargs.get('checker', 'Joey')
# r
self._rawDistLoads = kwargs.get('rawDistLoads', [])
self.realNodes = SortedSet([])
self.restrainDOFs = []
self.rho = kwargs.get('rho', 1.3)
# s
self.S = None
self.SDS = kwargs.get('SDS', 1.0)
self.seismicFactors = self.seismicFactors = {'omega0': self.omega0, 'rho': self.rho}
self.seismicFactorUse = kwargs.get('seismicFactorUse', 'omega0')
self.seismicLfactor = kwargs.get('seismicLfactor', 0.5)
self.shape = None
self.shearCheck = None
self.spans = SortedList()
self.strengthCombos = set()
self.supports = None
# u
self.U = {}
self.UF = {}
self.unbracedSpanBoundaryPts = SortedSet([])
self.unbracedSpans = []
# v
self.V = {}
# w
self.weight = None
# convert runtime warnings to errors to get traceback and address them
warnings.simplefilter('error', RuntimeWarning)
@property
def rawDistLoads(self):
return self._rawDistLoads
@rawDistLoads.setter
def rawDistLoads(self, vals):
for distLoad in vals:
if not isinstance(distLoad, LineLoad):
raise TypeError
self._rawDistLoads = vals
def plotEnvelope(self, attr, units, combos, title, close, maxNodes, maxVals, labelNames, maxCombos):
"""Plot envelope of values at a node."""
if close:
locs = [0]
else:
locs = []
for node in self.nodes:
locs.append(node.loc.to('ft').magnitude)
if close:
locs.append(self.nodes[-1].loc.to('ft').magnitude)
fig = plt.figure(figsize = (8, 3))
ax = fig.add_subplot(1, 1, 1)
for lc in combos:
if close:
vals = [0]
else:
vals = []
for node in self.nodes:
plotVal = getattr(node, attr)
vals.append(plotVal[str(lc)].to(units).magnitude)
if close:
vals.append(0)
ax.plot(locs, vals, linewidth = 1.5)
for idx, maxNode in enumerate(maxNodes):
xCoord = maxNode.loc.to('ft').magnitude
yCoord = maxVals[idx].to(units).magnitude
maxVal = round(maxVals[idx].to(units).magnitude, 1)
textCoord = 100 if xCoord <= self.len.to('ft').magnitude / 2 else -100 # vary label loc
textAlign = 'right' if xCoord <= self.len.to('ft').magnitude / 2 else 'left' # vary label alignment
ax.annotate(f'${labelNames[idx]} =$ {round(maxVals[idx].to(units), 1)}', xy=(xCoord, yCoord), xycoords='data', xytext=(textCoord, 0), textcoords='offset points', size=10, bbox=dict(boxstyle="round4,pad=.5", fc="0.8"), arrowprops=dict(arrowstyle='-'), ha=textAlign, va='center')
plt.text(0, 0.15, f'Max combo: {maxCombos[idx]}', ha='left', va='top', transform = ax.transAxes, size = 10, bbox=dict(boxstyle="round4,pad=.5", fc="0.8"))
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='on')
plt.grid(b=True, which='major', color='k', linestyle='dotted')
plt.savefig(f'{self.outputPath}/{self.projectInfo_memberName}_{title}.pgf')
def plotLoadDiagram(self):
"""Plot a diagram of the applied loads."""
fig, axs = plt.subplots(2, gridspec_kw={'hspace': -0.09, 'height_ratios': [3, 1]}, figsize = (8, 3), sharex = True)
prop_cycle = plt.rcParams['axes.prop_cycle']
cycle_colors = prop_cycle.by_key()['color']
# create dictionaries from dist loads to store offset magnitudes
distLoads = []
for load in sorted(self.rawDistLoads, reverse = True):
distLoads.append({'load': load, 'offset': 0})
# assign vertical offsets to overlapping loads
plotLoads = []
for load in distLoads:
for plotLoad in plotLoads[::-1]:
if plotLoad['load'].iLoc <= load['load'].iLoc <= plotLoad['load'].jLoc:
load['offset'] += max(plotLoad['load'].iLineLoad, plotLoad['load'].jLineLoad).to('plf').magnitude + plotLoad['offset']
break
plotLoads.append(load)
# plot distributed loads
for idx, load in enumerate(plotLoads):
iMag = load['load'].iLineLoad.to('plf').magnitude
jMag = load['load'].jLineLoad.to('plf').magnitude
iLoc = load['load'].iLoc.to('ft').magnitude
jLoc = load['load'].jLoc.to('ft').magnitude
points = [[iLoc, load['offset']], [iLoc, iMag + load['offset']], [jLoc, jMag + load['offset']], [jLoc, load['offset']]]
polygon = plt.Polygon(points, fill = True, alpha = 0.4, color = cycle_colors[idx])
axs[0].add_patch(polygon)
axs[0].text((iLoc + jLoc) / 2, (jMag) / 2 + load['offset'], f'w\\textsubscript{{{idx+1}}}', bbox = dict(boxstyle = "round4,pad=.5", fc = "0.8"), ha = 'center', va = 'center')
# plot beam flanges
d = self.d.to('in').magnitude
tf = d / 12 # flange width set to constant for aesthetic reasons
locs = [0, self.len.to('ft').magnitude]
topTopFlange = [0, 0]
botTopFlange = [0 - tf, 0 - tf]
topBotFlange = [-d, -d]
botBotFlange = [-d + tf, -d + tf]
flanges = [topTopFlange, botTopFlange, topBotFlange, botBotFlange]
for flange in flanges:
axs[1].plot(locs, flange, color = 'black', linewidth = 1)
axs[1].text(self.len.to('ft').magnitude / 2, -self.d.to('in').magnitude / 2, self.shape, bbox=dict(boxstyle="round4,pad=.5", fc="0.8"), ha='center', va='center')
# plot vertical lines at ends of beam
leftEndX = [0, 0]
leftEndY = [-d, 0]
rightEndX = [self.len.to('ft').magnitude, self.len.to('ft').magnitude]
rightEndY = leftEndY
axs[1].plot(leftEndX, leftEndY, color = 'black', linewidth = 1)
axs[1].plot(rightEndX, rightEndY, color = 'black', linewidth = 1)
# plot gravity support locations
pins = [support for support in self.supports if support.condition == 'pin']
fixes = [support for support in self.supports if support.condition == 'fix']
pinX = [pin.loc.to('ft').magnitude for pin in pins]
pinY = [-d - 3 for pin in pins]
fixX = [fix.loc.to('ft').magnitude for fix in fixes]
fixY = [-d - 3 for fix in fixes]
axs[1].scatter(pinX, pinY, marker = '^', s = 200, c = 'red')
axs[1].scatter(fixX, fixY, marker = 's', s = 200, c = 'blue')
# plot dimensions between supports
spanPts = [span.iNode.loc.to('ft').magnitude for span in self.spans]
spanPts.append(self.len.to('ft').magnitude)
for idx, support in enumerate(spanPts):
if idx != len(spanPts) - 1:
dist = spanPts[idx + 1] - support
# plot dimension line (no text)
axs[1].annotate(f'', xy=(support, -d-5), xycoords='data', xytext=(support + dist, -d-5), textcoords='data', arrowprops=dict(arrowstyle='<->, head_width=0.5', color = '#33ADFF'), ha='center')
# plot text in center of dimension line
axs[1].text(support + dist/2, -d-5, f'Span {idx} = {dist} ft', bbox=dict(boxstyle="round4, pad=0.5", fc="0.8"), size = 10, ha='center', va='center')
# plot applied point loads
pointLoadLocs = []
pointLoadNodes = [node for node in self.nodes if node.pointLoads]
for node in pointLoadNodes:
for pointLoad in node.pointLoads:
pointLoadLocs.append(node.loc.to('ft').magnitude)
pointLoadLocs = set(pointLoadLocs)
for loc in pointLoadLocs:
axs[0].annotate(f'$P_x$', xy=(loc, 0), xycoords='data', xytext=(0, 100), textcoords='offset points', size=12, bbox=dict(boxstyle="round4,pad=.5", fc="0.8"), arrowprops=dict(arrowstyle='->, head_width=0.5'), ha='center')
# plot settings and save
fig.patch.set_visible(False)
axs[0].axis('off')
axs[1].axis('off')
axs[1].autoscale()
axs[0].autoscale()
plt.savefig(f'{self.outputPath}/{self.projectInfo_memberName}_loadDiagram.pgf', dpi=90, pad_inches=0.5)
def runAnalysis(self):
"""Run analysis on the beam system."""
# ---------------------- FORM ELEMENT LOAD ARRAYS ----------------------
for element in self.elements:
element.formf0e()
# ------------------------ CALC DEMAND USING FEA -----------------------
# demands from applied distributed loads on elements
for type in self.loadTypesSub:
self.F0Body[type] = np.zeros((len(self.nodes) * 2, 1))
for idx, element in enumerate(self.elements):
f0e = element.f0e.get(type, np.array(([0],[0],[0],[0])))
self.F0Body[type][2*idx: 2*idx+4] = np.add(self.F0Body[type][2*idx: 2*idx+4], f0e)
# form F0Node
for type in self.loadTypesSub:
self.F0Node[type] = np.zeros((len(self.nodes) * 2, 1))
for idx, node in enumerate(self.nodes):
self.F0Node[type][2*idx, 0] = node.rawVapply.get(type,
0 * units.lbf).to(units.lbf).magnitude
self.F0Node[type][2*idx+1, 0] = node.rawMapply.get(type,
0 * units.lbin).to(units.lbin).magnitude
# form point loads list used for plotting and output table
idx = 1 # starts at 1 because used in output
for node in self.nodes:
for pointLoad in node.pointLoads:
loc = sf.fixUnits(node.loc, type = 'text')
shear = sf.fixUnits(-pointLoad.shear, type = 'text')
self.pointLoads.append({'id': f'P\\textsubscript{{{idx}}}', 'loc': loc, 'shear': shear, 'type': pointLoad.type, 'desc': pointLoad.desc})
idx +=1
# combination of demands from elements and nodes
for type in self.loadTypesSub:
self.F0[type] = np.add(self.F0Body[type], self.F0Node[type])
# global applied forces at free DOFs
for type in self.loadTypesSub:
self.FF[type] = self.F0[type][np.ix_(self.freeDOFs)]
# global stiffness array
self.K = np.zeros((len(self.nodes) * 2, len(self.nodes) * 2))
for idx, element in enumerate(self.elements):
self.K[2*idx: 2*idx+4, 2*idx: 2*idx+4] = np.add(self.K[2*idx: 2*idx+4, 2*idx: 2*idx+4], element.kE)
# global stiffness at free DOFs
self.KFF = self.K[np.ix_(self.freeDOFs, self.freeDOFs)]
# displacements at free DOFs
for type in self.loadTypesSub:
self.UF[type] = np.matmul(np.linalg.inv(self.KFF), self.FF[type])
# pass displacements at free DOFs to nodes
for type in self.loadTypesSub:
for idx, dof in enumerate(self.freeDOFs):
if dof % 2 == 0:
self.nodes[int(dof/2)].rawDefls[type] = self.UF[type][idx, 0]
else:
self.nodes[int((dof-1)/2)].rawRotations[type] = self.UF[type][idx, 0]
# assemble displacement array (translational + rotational) for global system
for type in self.loadTypesSub:
self.U[type] = np.zeros((len(self.nodes) * 2, 1))
for idx, node in enumerate(self.nodes):
self.U[type][2*idx, 0] = node.rawDefls.get(type, 0)
self.U[type][2*idx+1, 0] = node.rawRotations.get(type, 0)
# set units for raw displacements at nodes
for node in self.nodes:
node.setRawDispUnits()
# forces (moments + shears) in global system
for type in self.loadTypesSub:
F = np.zeros((len(self.nodes) * 2, 1))
fEvals = []
for idx, elem in enumerate(self.elements):
f0e = elem.f0e.get(type, np.array(([0],[0],[0],[0])))
fEvals.append(np.add(np.matmul(elem.kE,
self.U[type][2*idx:2*idx+4]), -f0e))
for idx, elem in enumerate(self.elements):
F[2*idx:2*idx+2] = fEvals[idx][0:2]
F[-2:] = -fEvals[len(self.elements) - 1][2:4]
self.F[type] = F
# extract bending and shear demands from global array, set nodal values
for type in self.loadTypesSub:
self.M[type] = - self.F[type][1::2] # sign flipped for convention
self.V[type] = self.F[type][0::2]
for idx, node in enumerate(self.nodes):
node.rawM[type] = self.M[type][idx, 0] * units.lbin
node.rawV[type] = self.V[type][idx, 0] * units.lbf
# calc factored values and set values at each node
# IDEA: any "shorthand" way to do all these nested for loops?
# https://stackoverflow.com/questions/5462047/pythonic-shortcut-for-doubly-nested-for-loops
for node in self.nodes:
for lc in self.strengthCombos:
node.ultV[str(lc)] = sum([node.rawV.get(load['type'], 0) * load['factor'] for load in lc.loads])
node.ultVapply[str(lc)] = sum([node.rawVapply.get(load['type'], 0) * load['factor'] for load in lc.loads])
node.ultM[str(lc)] = sum([node.rawM.get(load['type'], 0) * load['factor'] for load in lc.loads])
node.ultMapply[str(lc)] = sum([node.rawMapply.get(load['type'], 0) * load['factor'] for load in lc.loads])
for lc in self.deflCombos:
node.ultDefls[str(lc)] = sum([node.rawDefls.get(load['type'], 0 * units.inch) * load['factor'] for load in lc.loads])
for lc in self.Lcombos:
node.ultDeflsL[str(lc)] = sum([node.rawDefls.get(load['type'], 0 * units.inch) * load['factor'] for load in lc.loads])
node.deflMaxAbs['combo'] = max(node.ultDefls, key = lambda y: abs(node.ultDefls[y]))
node.deflMaxAbs['val'] = node.ultDefls[node.deflMaxAbs['combo']]
node.MuMaxAbs['combo'] = max(node.ultM, key = lambda y: abs(node.ultM[y]))
node.MuMaxAbs['val'] = node.ultM[node.MuMaxAbs['combo']]
node.VuMaxAbs['combo'] = max(node.ultV, key = lambda y: abs(node.ultV[y]))
node.VuMaxAbs['val'] = node.ultV[node.VuMaxAbs['combo']]
if 'L' in self.loadTypes:
node.deflMaxAbsL['combo'] = max(node.ultDeflsL, key = lambda y: abs(node.ultDeflsL[y]))
node.deflMaxAbsL['val'] = node.ultDeflsL[node.deflMaxAbsL['combo']]
# get max demand nodes
self.maxMomentNode = max(self.nodes, key = lambda node: abs(node.MuMaxAbs['val']))
self.maxShearNode = max(self.nodes, key = lambda node: abs(node.VuMaxAbs['val']))
self.maxDeflNodes['TL'] = max(self.nodes, key = lambda node: abs(node.deflMaxAbs['val']))
if 'L' in self.loadTypes:
self.maxDeflNodes['LL'] = max(self.nodes, key = lambda node: abs(node.deflMaxAbsL['val']))
# set max total load & live load deflection nodes in each span
for span in self.spans:
span.setMaxTLdeflNode()
if 'L' in self.loadTypes:
span.setMaxLLdeflNode()
# -------------------------- SET UNBRACED SPANS ------------------------
# always include both ends of the beam (even if cantilevered ends)
self.unbracedSpanBoundaryPts.add(self.nodes[0])
self.unbracedSpanBoundaryPts.add(self.nodes[-1])
# add in top/bottom brace points based on sign of moment demand at node
for node in self.nodes:
if (node.addUnbracedBoundaryPt()):
self.unbracedSpanBoundaryPts.add(node)
for idx, node in enumerate(self.unbracedSpanBoundaryPts):
if idx != 0:
self.unbracedSpans.append(UnbracedSpan(self, self.unbracedSpanBoundaryPts[idx-1], node))
spanIter = 0
for node in self.nodes:
if node.loc == self.unbracedSpans[spanIter].jNode.loc:
self.unbracedSpans[spanIter].nodes.add(node)
node.assignUnbracedSpan(self.unbracedSpans[spanIter])
spanIter += 1
if spanIter < len(self.unbracedSpans):
self.unbracedSpans[spanIter].nodes.add(node)
node.assignUnbracedSpan(self.unbracedSpans[spanIter])
# set max moment in unbraced spans
for span in self.unbracedSpans:
span.setMaxMomentNode()
# ------------- CALC CAPACITY, DCRs AND CHECK BENDING/SHEAR ------------
self.calcCapacity()
self.calcDCRs()
self.checkBending()
self.checkShear()
# --------------------------- CALC REACTIONS ---------------------------
for idx, node in enumerate(self.nodes):
if node in self.supports:
if node.loc == 0 * units.ft:
node.calcReaction(type = 'leftEnd')
elif node.loc == self.len:
node.calcReaction(type = 'rightEnd')
else:
node.calcReaction(leftNode = self.nodes[idx-1], rightNode = self.nodes[idx+1])
# ------------------------- CHECK DEFLECTIONS --------------------------
for span in self.spans:
if span.maxDeflNodes['TL'].deflMaxAbs['val'] != 0 * units.inch:
span.setTLdeflRatios(self.deflRatios['TL'])
span.checkDeflections('TL')
if 'L' in self.loadTypes:
if span.maxDeflNodes['LL'].deflMaxAbsL['val'] != 0 * units.inch:
span.setLLdeflRatios(self.deflRatios['LL'])
span.checkDeflections('LL')
for span in self.spans:
self.deflChecks.append(span.isDeflectionOK())
# deflection check w/ glass at discrete points
for node in self.nodes:
if node.condition == 'glass':
node.checkGlassDeflection(self.deflLimGlass)
# set beam deflection check at glass based on nodal deflections
self.deflChecks.append('NG' if any(n.glassDeflCheck == 'NG' for n in self.nodes) else 'OK')
# if glass everywhere, check that glass deflection passes everywhere
if self.glassEverywhere:
self.deflChecks.append('OK' if all(abs(n.deflMaxAbs['val']) < self.deflLimGlass for n in self.nodes) else 'NG')
# --------------------------- CHECK OVERALL ----------------------------
checks = []
checks.append(self.bendingCheck)
checks.append(self.shearCheck)
for check in self.deflChecks:
checks.append(check)
self.overallCheck = 'OK' if all(c == 'OK' for c in checks) else 'NG'
# ----------------------------- OUTPUT PDF ----------------------------
maxDeflNodes = [self.maxDeflNodes['TL']]
maxDeflVals = [self.maxDeflNodes['TL'].deflMaxAbs['val']]
maxDeflLabels = ['\Delta']
maxDeflAnnos = [self.maxDeflNodes['TL'].deflMaxAbs['combo']]
if 'L' in self.loadTypes:
maxDeflNodes.append(self.maxDeflNodes['LL'])
maxDeflVals.append(self.maxDeflNodes['LL'].deflMaxAbsL['val'])
maxDeflLabels.append('\Delta')
maxDeflAnnos.append(self.maxDeflNodes['LL'].deflMaxAbsL['combo'])
if self.outputPDF:
self.plotLoadDiagram()
self.plotEnvelope('ultV', self.outUnit['V'],
self.strengthCombos, 'shear', True, [self.maxShearNode],
[self.maxShearNode.VuMaxAbs['val']], ['V_u'],
[self.maxShearNode.VuMaxAbs['combo']])
self.plotEnvelope('ultM', self.outUnit['M'],
self.strengthCombos, 'moment', True, [self.maxMomentNode],
[self.maxMomentNode.MuMaxAbs['val']], ['M_u'],
[self.maxMomentNode.MuMaxAbs['combo']])
self.plotEnvelope('ultDefls', self.outUnit['defl'],
self.deflCombos, 'defl', False, maxDeflNodes, maxDeflVals,
maxDeflLabels, maxDeflAnnos)
self.plotMaterialSpecificFigures()
self.outputPDFreport()
@property
def nodes(self):
return self._nodes
@nodes.setter
def nodes(self, vals):
for node in vals:
if (not isinstance(node, Node)):
raise TypeError
self._nodes = vals
def setBeamSystem(self, nodes):
"""Set generic beam system parameters."""
if len(set(nodes)) < len(nodes):
sys.exit('ERROR: Multiple nodes cannot have the same location.')
for node in nodes:
self.nodes.add(node)
self.setShape()
self.setRefDesignValues()
self.len = self.nodes[-1].loc - self.nodes[0].loc
# ----------------- SET SPAN GEOMETRY (i AND j NODES) ------------------
for idx, node in enumerate(self.nodes):
if idx == 0:
self.realNodes.add(node) # 'real' nodes define spans
elif node.condition == 'pin' or node.condition == 'fix':
self.realNodes.add(node)
elif idx == (len(self.nodes) - 1):
self.realNodes.add(node)
for idx, iNode in enumerate(self.realNodes[:-1]):
self.spans.add(Span(iNode, self.realNodes[idx + 1]))
supportTypes = ['pin', 'fix']
self.supports = [node for node in self.realNodes if node.condition in supportTypes]
# --------------------------- SET LOAD TYPES ---------------------------
# NOTE: if load types are set after distributed loads, then seperate
# lines for including D if self weight is to be considered can be removed
# include self weight in load types if self weight is considered
if self.considerSelfWeight == True:
self.loadTypes.add('D')
# include load types for point loads
for node in self.nodes:
for pointLoad in node.pointLoads:
if (not isinstance(pointLoad, PointLoad)):
raise TypeError
self.loadTypes.add(pointLoad.type)
# include load types for distributed loads
for distLoad in self.rawDistLoads:
self.loadTypes.add(distLoad.type)
# remove pattern load types that aren't actually on beam
self.patternLoads = [load for load in self.patternLoads if load in self.loadTypes]
# set subdivided load types list (non-pattern load types + pattern load types w/ indicies)
tempList0 = [load for load in self.loadTypes if load not in self.patternLoads]
tempList1 = []
for patLoad in self.patternLoads:
for idx, span in enumerate(self.spans):
tempList1.append(f'{patLoad}{idx}')
self.loadTypesSub = tempList0 + tempList1
self.setMaterialSpecificSystem()
# ----------- CREATE NODES THAT ARE PROGRAMATICALLY REQUIRED -----------
# create nodes where dist loads start and end if no node already exists
if self.rawDistLoads:
for distLoad in self.rawDistLoads:
self.nodes.add(Node(distLoad.iLoc))
self.nodes.add(Node(distLoad.jLoc))
# add mesh nodes at element spacing interval
meshNodes = []
for idx in range(len(self.nodes)-1):
# skip the nodes it pertains to
for location in np.arange(self.nodes[idx].loc + self.eleSpacing, self.nodes[idx + 1].loc - self.eleSpacing, self.eleSpacing, dtype = 'object'):
meshNodes.append(Node(location))
for meshNode in meshNodes:
self.nodes.add(meshNode)
# ------------------------ ASSIGN NODES TO SPANS -----------------------
spanIter = 0
for node in self.nodes:
if node.loc == self.spans[spanIter].jNode.loc:
self.spans[spanIter].nodes.add(node)
spanIter += 1
if spanIter < len(self.spans):
self.spans[spanIter].nodes.add(node)
node.span = self.spans[spanIter]
# ----------------------- CREATE ELEMENT GEOMETRY ----------------------
for idx, iNode in enumerate(self.nodes[:-1]):
self.elements.add(Element(self, iNode, self.nodes[idx+1]))
# ----------------------- ASSIGN ELEMENTS TO SPANS ---------------------
spanIter = 0
for element in self.elements:
if element.jNode.loc == self.spans[spanIter].jNode.loc:
self.spans[spanIter].elements.add(element)
spanIter += 1
if spanIter < len(self.spans):
self.spans[spanIter].elements.add(element)
# ----------- CALC SELF WEIGHT & INCLUDE AS DISTRIBUTED LOAD -----------
self.weight = round((self.unitWt * self.A).to(units.plf), 1)
if self.considerSelfWeight:
self.rawDistLoads.append(LineLoad(iLoc = 0 * units.ft, jLoc = self.len, iLineLoad = self.weight, jLineLoad = self.weight, desc = 'Self weight'))
# ----------------- ASSIGN SUPERIMPOSED LOADS TO ELEMENTS --------------
if self.rawDistLoads:
for dl in self.rawDistLoads:
rangeElems = [elem for elem in self.elements if elem.iNode.loc >= dl.iLoc and elem.jNode.loc <= dl.jLoc]
for elem in rangeElems:
iDist = elem.iNode.loc - dl.iLoc
jDist = elem.jNode.loc - dl.iLoc
iMag = dl.iLineLoad + dl.slope * (iDist)
jMag = dl.iLineLoad + dl.slope * (jDist)
elem.iDistLoads[dl.type] = elem.iDistLoads.get(dl.type, 0) + iMag.to(units.pli).magnitude
elem.jDistLoads[dl.type] = elem.jDistLoads.get(dl.type, 0) + jMag.to(units.pli).magnitude
# ------------------- ASSIGN SUPERIMPOSED LOADS TO NODES ---------------
for node in self.nodes:
for type in [pointLoad.type for pointLoad in node.pointLoads]:
node.rawVapply[type] = sum([ptLd.shear for ptLd in node.pointLoads if ptLd.type == type])
node.rawMapply[type] = sum([ptLd.moment for ptLd in node.pointLoads if ptLd.type == type])
# ------------------------ BREAK OUT PATTERN LOADS ---------------------
# for applied distributed loads
for idx, span in enumerate(self.spans):
for elem in span.elements:
for patLoad in self.patternLoads:
elem.iDistLoads[f'{patLoad}{idx}'] = elem.iDistLoads.pop(patLoad, 0)
elem.jDistLoads[f'{patLoad}{idx}'] = elem.jDistLoads.pop(patLoad, 0)
# for applied point loads
for idx, span in enumerate(self.spans):
for node in span.nodes:
for patLoad in self.patternLoads:
node.rawVapply[f'{patLoad}{idx}'] = node.rawVapply.pop(patLoad, 0 * units.kip)
node.rawMapply[f'{patLoad}{idx}'] = node.rawMapply.pop(patLoad, 0 * units.kft)
# ------- SET SYSTEM PARAMETERS AND CHECK THAT ANALYSIS CAN RUN --------
for idx,node in enumerate(self.nodes):
if node.trans:
self.restrainDOFs.append(2*idx)
else:
self.freeDOFs.append(2*idx)
if node.rotate:
self.restrainDOFs.append(2*idx + 1)
else:
self.freeDOFs.append(2*idx + 1)
self.avgNodeSpacing = self.len / len(self.nodes)
if len(self.restrainDOFs) <= 1:
sys.exit('ERROR: Insufficient supports provided. Beam needs (2) pinned nodes or (1) fixed node to be stable.')
for idx, node in enumerate(self.nodes):
if node.condition == 'fix':
if idx == 0 or idx == len(self.nodes) - 1:
continue
else:
sys.exit('ERROR: Fixed nodes can only be at beginning or end.')
def setLoadCombos(self, collection, targetAttr):
"""Set load combinations for strength and deflection checks given a
collection of load combinations to pull from and a target attribute to
set with the list of load combinations."""
# determine which collection to look for load combos
if collection == 'LRFD':
db_path = f'../steel_beam_analysis/db/lrfd_combos.json'
elif collection == 'ASD':
db_path = f'../steel_beam_analysis/db/asd_combos.json'
elif collection == 'L':
db_path = f'../steel_beam_analysis/db/L_combos.json'
else:
sys.exit('bad collection option for load combos!')
# read load combo data from json db
with open(db_path) as f:
raw_combos = json.load(f)
# filter raw load combo data
filtered_combos = []
for combo in raw_combos:
filtered_combo = {}
for k in combo.keys():
if combo[k] != None and k in self.loadTypes:
filtered_combo[k] = combo[k]
if k == 'ref':
filtered_combo[k] = combo[k]
if len(filtered_combo) > 1:
filtered_combos.append(filtered_combo) # don't append 0 length combos
# build load combo objects
for combo in filtered_combos:
comboNoRef = {k: v for k, v in combo.items() if k != 'ref'}
patLoads = list(set(list(comboNoRef.keys())) & set(self.patternLoads))
nonPatLoads = [load for load in comboNoRef if load not in patLoads]
if patLoads:
tfPerms = list(itertools.product([True, False], repeat = len(self.spans)))
spanIdxsCombos = []
for perm in tfPerms:
spanIdxsCombos.append([i for i, v in enumerate(perm) if v])
spanIdxsCombos.remove([])
for perm in spanIdxsCombos:
lcLoads = []
for load in nonPatLoads:
lcLoads.append({'type': load, 'factor': eval(str(combo[load]))})
for spanIdx in perm:
for load in patLoads:
lcLoads.append({'type': f'{load}{spanIdx}', 'factor': eval(str(combo[load]))})
if lcLoads:
targetAttr.add(LoadCombo(self, lcLoads, combo['ref']))
else:
lcLoads = []
for load in nonPatLoads:
lcLoads.append({'type': load, 'factor': eval(str(combo[load]))})
if lcLoads:
targetAttr.add(LoadCombo(self, lcLoads, combo['ref']))
def __str__ (self):
bendingDCR = round(self.maxMomentNode.bendingDCR, 3)
shearDCR = round(self.maxShearNode.shearDCR, 3)
string = f'Bending DCR... \t{bendingDCR}\n'
string += f'Shear DCR... \t{shearDCR}\n'
string += f'Analysis ran successfully!'
return string
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.Polygon",
"steel_beam_analysis.unbracedSpan.UnbracedSpan",
"numpy.array",
"steel_beam_analysis.load.LineLoad",
"sys.exit",
"sortedcontainers.SortedSet",
"numpy.arange",
"steel_beam_analysis.node.Node",
"numpy.ix_",
"numpy.matmul",
"warnings.simplefi... | [((1364, 1385), 'matplotlib.use', 'matplotlib.use', (['"""pgf"""'], {}), "('pgf')\n", (1378, 1385), False, 'import matplotlib\n'), ((1386, 1514), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'pgf.texsystem': 'pdflatex', 'font.family': 'serif', 'text.usetex': True,\n 'pgf.rcfonts': False}"], {}), "({'pgf.texsystem': 'pdflatex', 'font.family':\n 'serif', 'text.usetex': True, 'pgf.rcfonts': False})\n", (1412, 1514), False, 'import matplotlib\n'), ((2279, 2292), 'sortedcontainers.SortedSet', 'SortedSet', (['[]'], {}), '([])\n', (2288, 2292), False, 'from sortedcontainers import SortedSet, SortedList\n'), ((2773, 2786), 'sortedcontainers.SortedSet', 'SortedSet', (['[]'], {}), '([])\n', (2782, 2786), False, 'from sortedcontainers import SortedSet, SortedList\n'), ((2984, 2997), 'sortedcontainers.SortedSet', 'SortedSet', (['[]'], {}), '([])\n', (2993, 2997), False, 'from sortedcontainers import SortedSet, SortedList\n'), ((3868, 3881), 'sortedcontainers.SortedSet', 'SortedSet', (['[]'], {}), '([])\n', (3877, 3881), False, 'from sortedcontainers import SortedSet, SortedList\n'), ((4339, 4351), 'sortedcontainers.SortedList', 'SortedList', ([], {}), '()\n', (4349, 4351), False, 'from sortedcontainers import SortedSet, SortedList\n'), ((4509, 4522), 'sortedcontainers.SortedSet', 'SortedSet', (['[]'], {}), '([])\n', (4518, 4522), False, 'from sortedcontainers import SortedSet, SortedList\n'), ((4714, 4760), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""', 'RuntimeWarning'], {}), "('error', RuntimeWarning)\n", (4735, 4760), False, 'import warnings\n'), ((5460, 5486), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 3)'}), '(figsize=(8, 3))\n', (5470, 5486), True, 'import matplotlib.pyplot as plt\n'), ((6882, 6987), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'top': '"""off"""', 'bottom': '"""off"""', 'left': '"""off"""', 'right': '"""off"""', 'labelleft': '"""off"""', 'labelbottom': '"""on"""'}), "(top='off', bottom='off', left='off', right='off', labelleft\n ='off', labelbottom='on')\n", (6897, 6987), True, 'import matplotlib.pyplot as plt\n'), ((6991, 7053), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'color': '"""k"""', 'linestyle': '"""dotted"""'}), "(b=True, which='major', color='k', linestyle='dotted')\n", (6999, 7053), True, 'import matplotlib.pyplot as plt\n'), ((7062, 7137), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{self.outputPath}/{self.projectInfo_memberName}_{title}.pgf"""'], {}), "(f'{self.outputPath}/{self.projectInfo_memberName}_{title}.pgf')\n", (7073, 7137), True, 'import matplotlib.pyplot as plt\n'), ((7241, 7345), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'gridspec_kw': "{'hspace': -0.09, 'height_ratios': [3, 1]}", 'figsize': '(8, 3)', 'sharex': '(True)'}), "(2, gridspec_kw={'hspace': -0.09, 'height_ratios': [3, 1]},\n figsize=(8, 3), sharex=True)\n", (7253, 7345), True, 'import matplotlib.pyplot as plt\n'), ((12033, 12140), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{self.outputPath}/{self.projectInfo_memberName}_loadDiagram.pgf"""'], {'dpi': '(90)', 'pad_inches': '(0.5)'}), "(f'{self.outputPath}/{self.projectInfo_memberName}_loadDiagram.pgf',\n dpi=90, pad_inches=0.5)\n", (12044, 12140), True, 'import matplotlib.pyplot as plt\n'), ((1237, 1257), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (1252, 1257), False, 'import os\n'), ((8583, 8649), 'matplotlib.pyplot.Polygon', 'plt.Polygon', (['points'], {'fill': '(True)', 'alpha': '(0.4)', 'color': 'cycle_colors[idx]'}), '(points, fill=True, alpha=0.4, color=cycle_colors[idx])\n', (8594, 8649), True, 'import matplotlib.pyplot as plt\n'), ((13915, 13959), 'numpy.add', 'np.add', (['self.F0Body[type]', 'self.F0Node[type]'], {}), '(self.F0Body[type], self.F0Node[type])\n', (13921, 13959), True, 'import numpy as np\n'), ((14321, 14389), 'numpy.add', 'np.add', (['self.K[2 * idx:2 * idx + 4, 2 * idx:2 * idx + 4]', 'element.kE'], {}), '(self.K[2 * idx:2 * idx + 4, 2 * idx:2 * idx + 4], element.kE)\n', (14327, 14389), True, 'import numpy as np\n'), ((14447, 14483), 'numpy.ix_', 'np.ix_', (['self.freeDOFs', 'self.freeDOFs'], {}), '(self.freeDOFs, self.freeDOFs)\n', (14453, 14483), True, 'import numpy as np\n'), ((24293, 24357), 'sys.exit', 'sys.exit', (['"""ERROR: Multiple nodes cannot have the same location."""'], {}), "('ERROR: Multiple nodes cannot have the same location.')\n", (24301, 24357), False, 'import sys\n'), ((27161, 27289), 'numpy.arange', 'np.arange', (['(self.nodes[idx].loc + self.eleSpacing)', '(self.nodes[idx + 1].loc - self.eleSpacing)', 'self.eleSpacing'], {'dtype': '"""object"""'}), "(self.nodes[idx].loc + self.eleSpacing, self.nodes[idx + 1].loc -\n self.eleSpacing, self.eleSpacing, dtype='object')\n", (27170, 27289), True, 'import numpy as np\n'), ((31274, 31394), 'sys.exit', 'sys.exit', (['"""ERROR: Insufficient supports provided. Beam needs (2) pinned nodes or (1) fixed node to be stable."""'], {}), "(\n 'ERROR: Insufficient supports provided. Beam needs (2) pinned nodes or (1) fixed node to be stable.'\n )\n", (31282, 31394), False, 'import sys\n'), ((32469, 32481), 'json.load', 'json.load', (['f'], {}), '(f)\n', (32478, 32481), False, 'import json\n'), ((12794, 12845), 'numpy.add', 'np.add', (['self.F0Body[type][2 * idx:2 * idx + 4]', 'f0e'], {}), '(self.F0Body[type][2 * idx:2 * idx + 4], f0e)\n', (12800, 12845), True, 'import numpy as np\n'), ((13507, 13541), 'steel_beam_analysis.stringFixer.fixUnits', 'sf.fixUnits', (['node.loc'], {'type': '"""text"""'}), "(node.loc, type='text')\n", (13518, 13541), True, 'import steel_beam_analysis.stringFixer as sf\n'), ((13568, 13610), 'steel_beam_analysis.stringFixer.fixUnits', 'sf.fixUnits', (['(-pointLoad.shear)'], {'type': '"""text"""'}), "(-pointLoad.shear, type='text')\n", (13579, 13610), True, 'import steel_beam_analysis.stringFixer as sf\n'), ((14087, 14108), 'numpy.ix_', 'np.ix_', (['self.freeDOFs'], {}), '(self.freeDOFs)\n', (14093, 14108), True, 'import numpy as np\n'), ((14600, 14623), 'numpy.linalg.inv', 'np.linalg.inv', (['self.KFF'], {}), '(self.KFF)\n', (14613, 14623), True, 'import numpy as np\n'), ((25044, 25080), 'steel_beam_analysis.span.Span', 'Span', (['iNode', 'self.realNodes[idx + 1]'], {}), '(iNode, self.realNodes[idx + 1])\n', (25048, 25080), False, 'from steel_beam_analysis.span import Span\n'), ((27998, 28039), 'steel_beam_analysis.element.Element', 'Element', (['self', 'iNode', 'self.nodes[idx + 1]'], {}), '(self, iNode, self.nodes[idx + 1])\n', (28005, 28039), False, 'from steel_beam_analysis.element import Element\n'), ((28663, 28776), 'steel_beam_analysis.load.LineLoad', 'LineLoad', ([], {'iLoc': '(0 * units.ft)', 'jLoc': 'self.len', 'iLineLoad': 'self.weight', 'jLineLoad': 'self.weight', 'desc': '"""Self weight"""'}), "(iLoc=0 * units.ft, jLoc=self.len, iLineLoad=self.weight, jLineLoad\n =self.weight, desc='Self weight')\n", (28671, 28776), False, 'from steel_beam_analysis.load import AreaLoad, LineLoad, LoadCombo, PointLoad\n'), ((6810, 6819), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6817, 6819), True, 'import matplotlib.pyplot as plt\n'), ((12713, 12743), 'numpy.array', 'np.array', (['([0], [0], [0], [0])'], {}), '(([0], [0], [0], [0]))\n', (12721, 12743), True, 'import numpy as np\n'), ((15776, 15806), 'numpy.array', 'np.array', (['([0], [0], [0], [0])'], {}), '(([0], [0], [0], [0]))\n', (15784, 15806), True, 'import numpy as np\n'), ((19660, 19723), 'steel_beam_analysis.unbracedSpan.UnbracedSpan', 'UnbracedSpan', (['self', 'self.unbracedSpanBoundaryPts[idx - 1]', 'node'], {}), '(self, self.unbracedSpanBoundaryPts[idx - 1], node)\n', (19672, 19723), False, 'from steel_beam_analysis.unbracedSpan import UnbracedSpan\n'), ((26894, 26913), 'steel_beam_analysis.node.Node', 'Node', (['distLoad.iLoc'], {}), '(distLoad.iLoc)\n', (26898, 26913), False, 'from steel_beam_analysis.node import Node\n'), ((26946, 26965), 'steel_beam_analysis.node.Node', 'Node', (['distLoad.jLoc'], {}), '(distLoad.jLoc)\n', (26950, 26965), False, 'from steel_beam_analysis.node import Node\n'), ((27322, 27336), 'steel_beam_analysis.node.Node', 'Node', (['location'], {}), '(location)\n', (27326, 27336), False, 'from steel_beam_analysis.node import Node\n'), ((31604, 31667), 'sys.exit', 'sys.exit', (['"""ERROR: Fixed nodes can only be at beginning or end."""'], {}), "('ERROR: Fixed nodes can only be at beginning or end.')\n", (31612, 31667), False, 'import sys\n'), ((32315, 32365), 'sys.exit', 'sys.exit', (['"""bad collection option for load combos!"""'], {}), "('bad collection option for load combos!')\n", (32323, 32365), False, 'import sys\n'), ((15842, 15895), 'numpy.matmul', 'np.matmul', (['elem.kE', 'self.U[type][2 * idx:2 * idx + 4]'], {}), '(elem.kE, self.U[type][2 * idx:2 * idx + 4])\n', (15851, 15895), True, 'import numpy as np\n'), ((34337, 34375), 'steel_beam_analysis.load.LoadCombo', 'LoadCombo', (['self', 'lcLoads', "combo['ref']"], {}), "(self, lcLoads, combo['ref'])\n", (34346, 34375), False, 'from steel_beam_analysis.load import AreaLoad, LineLoad, LoadCombo, PointLoad\n'), ((34061, 34099), 'steel_beam_analysis.load.LoadCombo', 'LoadCombo', (['self', 'lcLoads', "combo['ref']"], {}), "(self, lcLoads, combo['ref'])\n", (34070, 34099), False, 'from steel_beam_analysis.load import AreaLoad, LineLoad, LoadCombo, PointLoad\n')] |
# Classes for image and mask manipulation.
import os
import re
import glob
import cv2
import torch
import models
import numpy as np
from parser import Annotation
class Mask:
def __init__(self, height, width, boxes):
# Image dimensions.
self.height = height
self.width = width
# Bounding box information. Each bounding box
# is given by four coordinates . . . two for the
# upper-left pixel and two for the lower-right.
self.boxes = boxes
self.number_of_boxes = len(self.boxes)//4
self.boxes_area = self.get_area()
self.average_area = self.get_average_area()
# Mask information. Labels are element-wise normalizations
# for input into loss functions (e.g. nn.BCELoss in PyTorch).
self.array = Mask.make_mask(self)
self.array_label = Mask.make_mask(self, label=True)
self.inverted_array = Mask.make_mask(self, inverted=True)
self.inverted_array_label = Mask.make_mask(self, inverted=True, label=True)
def get_area(self):
area = 0
for k in range(0, len(self.boxes) // 4):
area += (self.boxes[4*k+3] - self.boxes[4*k+1]) * (self.boxes[4*k+2] - self.boxes[4*k])
return area
def get_average_area(self):
if self.number_of_boxes > 0:
return self.boxes_area / self.number_of_boxes
else:
return -1
def make_mask(self, scale_factor=255.0, inverted=False, label=False):
if inverted is True:
mask = np.zeros((self.height, self.width), dtype=float)
else:
if label is True:
scale_factor = 1
mask = scale_factor * np.ones((self.height, self.width), dtype=float)
for k in range(0, len(self.boxes)//4):
for j in range(self.boxes[4*k+1], self.boxes[4*k+3]):
for i in range(self.boxes[4*k], self.boxes[4*k+2]):
if inverted is True:
if label is True:
scale_factor = 1
mask[j, i] = scale_factor
else:
mask[j, i] = 0
return mask
# Prepares image data for processing by UNet.
class Loader:
path_string = "WIDER_images/*/images/*"
image_directories = glob.glob(path_string)
image_files = glob.glob(path_string+"/*.jpg")
annotation_train_directory = "WIDER_images/WIDER_annotations/WIDER_train"
annotation_val_directory = "WIDER_images/WIDER_annotations/WIDER_val"
# annotation_test_directory = "WIDER_images/WIDER_annotations/WIDER_test"
def __init__(self, dimension):
self.dimension = dimension
@classmethod
def open_image_monochrome(cls, file):
x = cv2.imread(file, cv2.IMREAD_GRAYSCALE)
return x
@classmethod
def get_mask_directories(cls):
for directory in Loader.image_directories:
if not os.path.exists(directory+"/masks"):
os.makedirs(directory+"/masks")
return glob.glob(Loader.path_string + "/masks")
@classmethod
def match_masks_to_images(cls):
mask_files = glob.glob(Loader.path_string + "/*_mask.jpg")
print("Matching masks to images.")
count = 0
for mask in mask_files:
image_file = re.sub(re.compile("_mask.jpg"), ".jpg", mask)
if not os.path.exists(image_file):
os.remove(mask)
count = count + 1
print(str(count) + " mask file(s) removed.")
# Given a set of image paths, construct its corresponding set of mask paths
@classmethod
def get_mask_paths(cls, image_paths):
mask_paths = []
for image_path in image_paths:
mask_path = image_path.split("/")
mask_file_name = re.sub(re.compile(".jpg"), "_mask.jpg", mask_path[4])
mask_path = "/".join(mask_path[0:4]) + "/masks/" + mask_file_name
mask_paths.append(mask_path)
return mask_paths
# Given a set of image paths and a set of mask paths, pair each image with its mask and return the desired batch
@classmethod
def get_batch(cls, image_paths, batch_size, batch, seed):
start = batch * batch_size
end = min((batch + 1) * batch_size, len(image_paths))
np.random.seed(seed)
np.random.shuffle(image_paths)
image_paths_batch = image_paths[start:end]
masks_images = []
mask_paths = Loader.get_mask_paths(image_paths_batch)
for i in range(0, len(image_paths_batch)):
x = Loader.open_image_monochrome(image_paths_batch[i])
y = Loader.open_image_monochrome(mask_paths[i])
masks_images.append([x, y])
masks_images = np.asarray(masks_images)
return masks_images
# Construct image masks from parsed Pascal VOC annotations, then write as .jpg files
def make_masks(self):
directory = self.annotation_train_directory
filepaths = glob.glob(directory + "/*.xml")
print("Making masks.")
for file in filepaths: # for each .xml annotation file . . .
# Get bounding box and path information from a list of .xml files in a directory.
file_annotation = Annotation(file)
file_name = re.sub(re.compile(".jpg"), "", file_annotation.path)
file_name = re.sub(re.compile(r"\./"), "WIDER_images/", file_name)
mask_name = file_name + "_mask.jpg"
mask_name = mask_name.split("/")
mask_name = "/".join(mask_name[0:4])+"/masks/"+mask_name[4]
# Use Rectangle.Mask to construct masks from bounding box info.
voc_mask = Mask(file_annotation.height, file_annotation.width, file_annotation.boxes)
# Use cv2 to write masks as .jpg files to a separate image directory.
cv2.imwrite(mask_name, voc_mask.inverted_array) # CHANGE directory to images directory
def resize_images(self):
new_directory = "WIDER_images_"+str(self.dimension)+"/"
main_directory = re.compile("WIDER_images/")
directories = Loader.image_directories
print("Resizing images.")
# Create filepaths for newly resized images
new_image_files = []
for image_file in Loader.image_files:
new_image_file = re.sub(main_directory, new_directory, image_file)
new_image_files.append(new_image_file)
# Replicate the WIDER_FACE directory structure for newly resized images
for image_directory in directories:
new_image_directory = re.sub(main_directory, new_directory, image_directory)
if not os.path.exists(new_image_directory):
os.makedirs(new_image_directory, exist_ok=True)
# Write the resized images
dim = (self.dimension, self.dimension)
for image_directory in Loader.image_directories + glob.glob(Loader.path_string + "/masks"):
files = glob.glob(image_directory+"/*.jpg")
images = [cv2.imread(file) for file in files]
for i in range(0, len(images)):
new_path = re.sub(re.compile("WIDER_images/"), new_directory, files[i])
im = cv2.resize(images[i], dim, interpolation=cv2.INTER_AREA)
cv2.imwrite(new_path, im)
del images
def resize_masks(self):
new_directory = "WIDER_images_"+str(self.dimension)+"/"
main_directory = re.compile("WIDER_images/")
directories = glob.glob(Loader.path_string + "/masks")
print("Resizing images.")
# Create filepaths for newly resized masks
new_image_files = []
for mask_file in glob.glob(Loader.path_string + "/masks/*.jpg"):
new_mask_file = re.sub(main_directory, new_directory, mask_file)
new_image_files.append(new_mask_file)
# Replicate the WIDER_FACE directory structure for newly resized masks
for image_directory in directories:
new_image_directory = re.sub(main_directory, new_directory, image_directory)
if not os.path.exists(new_image_directory):
os.makedirs(new_image_directory, exist_ok=True)
# Write the resized images
dim = (self.dimension, self.dimension)
for image_directory in Loader.image_directories + glob.glob(Loader.path_string + "/masks"):
files = glob.glob(image_directory+"/*.jpg")
images = [cv2.imread(file) for file in files]
for i in range(0, len(images)):
new_path = re.sub(re.compile("WIDER_images/"), new_directory, files[i])
im = cv2.resize(images[i], dim, interpolation=cv2.INTER_AREA)
cv2.imwrite(new_path, im)
del images
# In case there are more masks than images, delete the extraneous masks
def rename_masks(self):
new_directory = "WIDER_images_" + str(self.dimension) + "/"
new_mask_paths = glob.glob(new_directory+"*/images/*/masks/*.jpg")
for mask_file in new_mask_paths:
os.rename(mask_file, re.sub(re.compile("_mask.jpg"), ".jpg", mask_file))
def match_images_to_masks(self):
new_directory = "WIDER_images_" + str(self.dimension) + "/"
new_mask_paths = glob.glob(new_directory+"*/images/*/masks/*.jpg")
print("Matching masks to images.")
count = 0
for mask in new_mask_paths:
image_file = mask.split("/")
image_file = "/".join(image_file[0:3])+"/"+image_file[5]
if not os.path.exists(image_file):
os.remove(mask)
count += 1
print(str(count) + " mask file(s) removed.")
# Invert all masks
def invert_masks(self):
mask_path = "WIDER_images_" + str(self.dimension) + "/*/images/*/masks"
filepaths = glob.glob(mask_path + "/*.jpg")
for file in filepaths:
im = cv2.imread(file)
cv2.imwrite(file, cv2.bitwise_not(im))
# Post-processing of image masks outputs from UNet.
class Editor:
@classmethod
def apply_mask(cls, image, mask):
return np.bitwise_and(image, mask)
@classmethod
def resize_mask(cls, image, height, width):
image = cv2.resize(image, (height, width), interpolation=cv2.INTER_LINEAR)
return image
@classmethod
def smooth_mask(cls, image, kernel_size=81):
image = cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
return image
# Apply a NumPy array mask to a NumPy array image
@classmethod
def invert_mask(cls, mask):
if isinstance(mask, torch.Tensor):
mask = mask.detach().cpu().numpy()
inverter_array = np.max(mask) * np.ones(mask.shape)
mask = inverter_array - mask
return mask
# Occlude objects in an image
@classmethod
def occlude_image(cls, image, mask):
x = Editor.apply_mask(image, mask)
return x
# Apply an anonymization procedure to a NumPy array
@classmethod
def inpaint_occluded_image(cls, image, mask):
im = cv2.inpaint(image, mask, 10, cv2.INPAINT_TELEA)
return im
# From the distribution of pixel intensities, select
# the least pixel intensity in the highest bin, then
# use that value as a threshold for the image.
@classmethod
def make_binary_mask(cls, image, scalar):
row_length = image.shape[0]
column_length = image.shape[1]
distribution = np.histogram(image)
threshold = distribution[1][len(distribution[1])-5]
new_mask = np.zeros(image.shape)
for i in range(0, row_length):
for j in range(0, column_length):
if image[i, j] > threshold:
new_mask[i, j] = scalar
return new_mask
@classmethod
def make_binary_mask_from_torch(cls, image, scalar):
image = image.detach().cpu().numpy()
image = np.squeeze(image)
height, width = image.shape
new_mask = Editor.make_binary_mask(image, scalar)
new_mask = new_mask.reshape(1, 1, height, width)
new_mask = torch.from_numpy(new_mask)
return new_mask.float()
@classmethod
def reshape_for_display(cls, i, list_of_images):
x = list_of_images[i]
if type(x) is torch.Tensor:
x = x.detach().cpu().numpy()
x = np.reshape(x, [256, 256])
return x
# Return the intersection over union of two NumPy arrays
@classmethod
def intersection_over_union(cls, y, z):
iou = (np.sum(np.minimum(y, z))) / (np.sum(np.maximum(y, z)))
return iou
def __init__(self, image_paths, seed_index):
self.image_paths = image_paths
self.seed_index = seed_index
self.samples = Loader.get_batch(self.image_paths, len(self.image_paths), 0, self.seed_index)
self.samples_images = self.samples[:, 0]
self.samples_masks = self.samples[:, 1]
self.model = models.UNet()
def initiate_model(self, state_dict):
self.model.load_state_dict(state_dict)
self.model.eval()
if torch.cuda.is_available():
self.model = self.model.cuda()
def get_raw_masks(self):
with torch.no_grad:
x = self.model(self.samples_images)
return x
def get_input(self, i, samples_images):
return self.reshape_for_display(i, samples_images)
def get_output(self, i, samples_images):
y = samples_images[i]
y = y.reshape(1, 1, y.shape[0], y.shape[1])
y = torch.from_numpy(y)
if torch.cuda.is_available():
y = y.cuda()
y = y.float()
y = self.model(y)
y = y.detach().cpu().numpy()
y = np.reshape(y, [256, 256])
return y
| [
"parser.Annotation",
"re.compile",
"torch.from_numpy",
"torch.cuda.is_available",
"os.remove",
"os.path.exists",
"numpy.histogram",
"numpy.reshape",
"numpy.asarray",
"numpy.max",
"numpy.random.seed",
"numpy.maximum",
"glob.glob",
"numpy.ones",
"numpy.squeeze",
"models.UNet",
"cv2.res... | [((2319, 2341), 'glob.glob', 'glob.glob', (['path_string'], {}), '(path_string)\n', (2328, 2341), False, 'import glob\n'), ((2360, 2393), 'glob.glob', 'glob.glob', (["(path_string + '/*.jpg')"], {}), "(path_string + '/*.jpg')\n", (2369, 2393), False, 'import glob\n'), ((2765, 2803), 'cv2.imread', 'cv2.imread', (['file', 'cv2.IMREAD_GRAYSCALE'], {}), '(file, cv2.IMREAD_GRAYSCALE)\n', (2775, 2803), False, 'import cv2\n'), ((3043, 3083), 'glob.glob', 'glob.glob', (["(Loader.path_string + '/masks')"], {}), "(Loader.path_string + '/masks')\n", (3052, 3083), False, 'import glob\n'), ((3159, 3204), 'glob.glob', 'glob.glob', (["(Loader.path_string + '/*_mask.jpg')"], {}), "(Loader.path_string + '/*_mask.jpg')\n", (3168, 3204), False, 'import glob\n'), ((4317, 4337), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4331, 4337), True, 'import numpy as np\n'), ((4346, 4376), 'numpy.random.shuffle', 'np.random.shuffle', (['image_paths'], {}), '(image_paths)\n', (4363, 4376), True, 'import numpy as np\n'), ((4758, 4782), 'numpy.asarray', 'np.asarray', (['masks_images'], {}), '(masks_images)\n', (4768, 4782), True, 'import numpy as np\n'), ((5000, 5031), 'glob.glob', 'glob.glob', (["(directory + '/*.xml')"], {}), "(directory + '/*.xml')\n", (5009, 5031), False, 'import glob\n'), ((6074, 6101), 're.compile', 're.compile', (['"""WIDER_images/"""'], {}), "('WIDER_images/')\n", (6084, 6101), False, 'import re\n'), ((7466, 7493), 're.compile', 're.compile', (['"""WIDER_images/"""'], {}), "('WIDER_images/')\n", (7476, 7493), False, 'import re\n'), ((7516, 7556), 'glob.glob', 'glob.glob', (["(Loader.path_string + '/masks')"], {}), "(Loader.path_string + '/masks')\n", (7525, 7556), False, 'import glob\n'), ((7697, 7743), 'glob.glob', 'glob.glob', (["(Loader.path_string + '/masks/*.jpg')"], {}), "(Loader.path_string + '/masks/*.jpg')\n", (7706, 7743), False, 'import glob\n'), ((8977, 9028), 'glob.glob', 'glob.glob', (["(new_directory + '*/images/*/masks/*.jpg')"], {}), "(new_directory + '*/images/*/masks/*.jpg')\n", (8986, 9028), False, 'import glob\n'), ((9284, 9335), 'glob.glob', 'glob.glob', (["(new_directory + '*/images/*/masks/*.jpg')"], {}), "(new_directory + '*/images/*/masks/*.jpg')\n", (9293, 9335), False, 'import glob\n'), ((9854, 9885), 'glob.glob', 'glob.glob', (["(mask_path + '/*.jpg')"], {}), "(mask_path + '/*.jpg')\n", (9863, 9885), False, 'import glob\n'), ((10142, 10169), 'numpy.bitwise_and', 'np.bitwise_and', (['image', 'mask'], {}), '(image, mask)\n', (10156, 10169), True, 'import numpy as np\n'), ((10252, 10318), 'cv2.resize', 'cv2.resize', (['image', '(height, width)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(image, (height, width), interpolation=cv2.INTER_LINEAR)\n', (10262, 10318), False, 'import cv2\n'), ((10423, 10477), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(kernel_size, kernel_size)', '(0)'], {}), '(image, (kernel_size, kernel_size), 0)\n', (10439, 10477), False, 'import cv2\n'), ((11100, 11147), 'cv2.inpaint', 'cv2.inpaint', (['image', 'mask', '(10)', 'cv2.INPAINT_TELEA'], {}), '(image, mask, 10, cv2.INPAINT_TELEA)\n', (11111, 11147), False, 'import cv2\n'), ((11494, 11513), 'numpy.histogram', 'np.histogram', (['image'], {}), '(image)\n', (11506, 11513), True, 'import numpy as np\n'), ((11594, 11615), 'numpy.zeros', 'np.zeros', (['image.shape'], {}), '(image.shape)\n', (11602, 11615), True, 'import numpy as np\n'), ((11950, 11967), 'numpy.squeeze', 'np.squeeze', (['image'], {}), '(image)\n', (11960, 11967), True, 'import numpy as np\n'), ((12139, 12165), 'torch.from_numpy', 'torch.from_numpy', (['new_mask'], {}), '(new_mask)\n', (12155, 12165), False, 'import torch\n'), ((12389, 12414), 'numpy.reshape', 'np.reshape', (['x', '[256, 256]'], {}), '(x, [256, 256])\n', (12399, 12414), True, 'import numpy as np\n'), ((12989, 13002), 'models.UNet', 'models.UNet', ([], {}), '()\n', (13000, 13002), False, 'import models\n'), ((13130, 13155), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13153, 13155), False, 'import torch\n'), ((13567, 13586), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (13583, 13586), False, 'import torch\n'), ((13598, 13623), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13621, 13623), False, 'import torch\n'), ((13747, 13772), 'numpy.reshape', 'np.reshape', (['y', '[256, 256]'], {}), '(y, [256, 256])\n', (13757, 13772), True, 'import numpy as np\n'), ((1535, 1583), 'numpy.zeros', 'np.zeros', (['(self.height, self.width)'], {'dtype': 'float'}), '((self.height, self.width), dtype=float)\n', (1543, 1583), True, 'import numpy as np\n'), ((5259, 5275), 'parser.Annotation', 'Annotation', (['file'], {}), '(file)\n', (5269, 5275), False, 'from parser import Annotation\n'), ((5867, 5914), 'cv2.imwrite', 'cv2.imwrite', (['mask_name', 'voc_mask.inverted_array'], {}), '(mask_name, voc_mask.inverted_array)\n', (5878, 5914), False, 'import cv2\n'), ((6340, 6389), 're.sub', 're.sub', (['main_directory', 'new_directory', 'image_file'], {}), '(main_directory, new_directory, image_file)\n', (6346, 6389), False, 'import re\n'), ((6600, 6654), 're.sub', 're.sub', (['main_directory', 'new_directory', 'image_directory'], {}), '(main_directory, new_directory, image_directory)\n', (6606, 6654), False, 'import re\n'), ((6916, 6956), 'glob.glob', 'glob.glob', (["(Loader.path_string + '/masks')"], {}), "(Loader.path_string + '/masks')\n", (6925, 6956), False, 'import glob\n'), ((6978, 7015), 'glob.glob', 'glob.glob', (["(image_directory + '/*.jpg')"], {}), "(image_directory + '/*.jpg')\n", (6987, 7015), False, 'import glob\n'), ((7773, 7821), 're.sub', 're.sub', (['main_directory', 'new_directory', 'mask_file'], {}), '(main_directory, new_directory, mask_file)\n', (7779, 7821), False, 'import re\n'), ((8030, 8084), 're.sub', 're.sub', (['main_directory', 'new_directory', 'image_directory'], {}), '(main_directory, new_directory, image_directory)\n', (8036, 8084), False, 'import re\n'), ((8346, 8386), 'glob.glob', 'glob.glob', (["(Loader.path_string + '/masks')"], {}), "(Loader.path_string + '/masks')\n", (8355, 8386), False, 'import glob\n'), ((8408, 8445), 'glob.glob', 'glob.glob', (["(image_directory + '/*.jpg')"], {}), "(image_directory + '/*.jpg')\n", (8417, 8445), False, 'import glob\n'), ((9935, 9951), 'cv2.imread', 'cv2.imread', (['file'], {}), '(file)\n', (9945, 9951), False, 'import cv2\n'), ((10718, 10730), 'numpy.max', 'np.max', (['mask'], {}), '(mask)\n', (10724, 10730), True, 'import numpy as np\n'), ((10733, 10752), 'numpy.ones', 'np.ones', (['mask.shape'], {}), '(mask.shape)\n', (10740, 10752), True, 'import numpy as np\n'), ((1695, 1742), 'numpy.ones', 'np.ones', (['(self.height, self.width)'], {'dtype': 'float'}), '((self.height, self.width), dtype=float)\n', (1702, 1742), True, 'import numpy as np\n'), ((2944, 2980), 'os.path.exists', 'os.path.exists', (["(directory + '/masks')"], {}), "(directory + '/masks')\n", (2958, 2980), False, 'import os\n'), ((2996, 3029), 'os.makedirs', 'os.makedirs', (["(directory + '/masks')"], {}), "(directory + '/masks')\n", (3007, 3029), False, 'import os\n'), ((3331, 3354), 're.compile', 're.compile', (['"""_mask.jpg"""'], {}), "('_mask.jpg')\n", (3341, 3354), False, 'import re\n'), ((3389, 3415), 'os.path.exists', 'os.path.exists', (['image_file'], {}), '(image_file)\n', (3403, 3415), False, 'import os\n'), ((3433, 3448), 'os.remove', 'os.remove', (['mask'], {}), '(mask)\n', (3442, 3448), False, 'import os\n'), ((3822, 3840), 're.compile', 're.compile', (['""".jpg"""'], {}), "('.jpg')\n", (3832, 3840), False, 'import re\n'), ((5307, 5325), 're.compile', 're.compile', (['""".jpg"""'], {}), "('.jpg')\n", (5317, 5325), False, 'import re\n'), ((5384, 5402), 're.compile', 're.compile', (['"""\\\\./"""'], {}), "('\\\\./')\n", (5394, 5402), False, 'import re\n'), ((6674, 6709), 'os.path.exists', 'os.path.exists', (['new_image_directory'], {}), '(new_image_directory)\n', (6688, 6709), False, 'import os\n'), ((6727, 6774), 'os.makedirs', 'os.makedirs', (['new_image_directory'], {'exist_ok': '(True)'}), '(new_image_directory, exist_ok=True)\n', (6738, 6774), False, 'import os\n'), ((7036, 7052), 'cv2.imread', 'cv2.imread', (['file'], {}), '(file)\n', (7046, 7052), False, 'import cv2\n'), ((7226, 7282), 'cv2.resize', 'cv2.resize', (['images[i]', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(images[i], dim, interpolation=cv2.INTER_AREA)\n', (7236, 7282), False, 'import cv2\n'), ((7299, 7324), 'cv2.imwrite', 'cv2.imwrite', (['new_path', 'im'], {}), '(new_path, im)\n', (7310, 7324), False, 'import cv2\n'), ((8104, 8139), 'os.path.exists', 'os.path.exists', (['new_image_directory'], {}), '(new_image_directory)\n', (8118, 8139), False, 'import os\n'), ((8157, 8204), 'os.makedirs', 'os.makedirs', (['new_image_directory'], {'exist_ok': '(True)'}), '(new_image_directory, exist_ok=True)\n', (8168, 8204), False, 'import os\n'), ((8466, 8482), 'cv2.imread', 'cv2.imread', (['file'], {}), '(file)\n', (8476, 8482), False, 'import cv2\n'), ((8656, 8712), 'cv2.resize', 'cv2.resize', (['images[i]', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(images[i], dim, interpolation=cv2.INTER_AREA)\n', (8666, 8712), False, 'import cv2\n'), ((8729, 8754), 'cv2.imwrite', 'cv2.imwrite', (['new_path', 'im'], {}), '(new_path, im)\n', (8740, 8754), False, 'import cv2\n'), ((9561, 9587), 'os.path.exists', 'os.path.exists', (['image_file'], {}), '(image_file)\n', (9575, 9587), False, 'import os\n'), ((9605, 9620), 'os.remove', 'os.remove', (['mask'], {}), '(mask)\n', (9614, 9620), False, 'import os\n'), ((9982, 10001), 'cv2.bitwise_not', 'cv2.bitwise_not', (['im'], {}), '(im)\n', (9997, 10001), False, 'import cv2\n'), ((12577, 12593), 'numpy.minimum', 'np.minimum', (['y', 'z'], {}), '(y, z)\n', (12587, 12593), True, 'import numpy as np\n'), ((12606, 12622), 'numpy.maximum', 'np.maximum', (['y', 'z'], {}), '(y, z)\n', (12616, 12622), True, 'import numpy as np\n'), ((7151, 7178), 're.compile', 're.compile', (['"""WIDER_images/"""'], {}), "('WIDER_images/')\n", (7161, 7178), False, 'import re\n'), ((8581, 8608), 're.compile', 're.compile', (['"""WIDER_images/"""'], {}), "('WIDER_images/')\n", (8591, 8608), False, 'import re\n'), ((9108, 9131), 're.compile', 're.compile', (['"""_mask.jpg"""'], {}), "('_mask.jpg')\n", (9118, 9131), False, 'import re\n')] |
# coding: utf-8
# ### DEMAPP07
# # Solve Cournot oligopoly model via collocation
# To illustrate implementation of the collocation method for implicit function problems, consider the example of Cournot oligopoly. In the standard microeconomic model of the firm, the firm maximizes profit by equating marginal revenue to marginal cost (MC). An oligopolistic firm, recognizing that its actions affect price, takes the marginal revenue to be $p + q\frac{dp}{dq}$, where $p$ is price, $q$ is quantity produced, and $\frac{dp}{dq}$ is the marginal impact of output on market price. The Cournot assumption is that the firm acts as if any change in its output will be unmatched by its competitors. This implies that
#
# \begin{equation}
# \frac{dp}{dq} = \frac{1}{D'(p)}
# \end{equation}
#
# where $D(p)$ is the market demand curve.
#
# Suppose we wish to derive the effective supply function for the firm, which specifies
# the quantity $q = S(p)$ it will supply at any price. The firm's effective supply function is
# characterized by the functional equation
#
# \begin{equation}
# p + \frac{S(p)}{D'(p)} - MC(S(p)) = 0
# \end{equation}
#
# for all positive prices $p$. In simple cases, this function can be found explicitly. However,
# in more complicated cases, no explicit solution exists.
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from compecon import BasisChebyshev, NLP, demo
# ### Model parameters
#
# Here, the demand elasticity and the marginal cost function parameter are
# In[2]:
alpha, eta = 1.0, 3.5
# In[3]:
D = lambda p: p**(-eta)
# ### Approximation structure
#
# A degree-25 Chebychev basis on the interval [0.5, 3.0] is selected; also, the associated collocation nodes `p` are computed.
# In[4]:
n, a, b = 25, 0.5, 2.0
S = BasisChebyshev(n, a, b, labels=['price'], y=np.ones(n))
p = S.nodes
# In[5]:
S2 = BasisChebyshev(n, a, b, labels=['price'], l=['supply'])
S2.y = np.ones_like(p)
# ### Residual function
#
# Suppose, for example, that
#
# \begin{equation}
# D(p) = p^{-\eta} \quad\text{and}\quad MC(q) = \alpha\sqrt{q} + q^2
# \end{equation}
#
# Then the functional equation to be solved for S(p),
#
# \begin{equation}
# \left[p - \frac{S(p)p^{\eta+1}}{\eta}\right] -\left[\alpha\sqrt{S(p)} + S(p)^2\right] = 0
# \end{equation}
#
# has no known closed-form solution.
# In[6]:
def resid(c):
S.c = c # update interpolation coefficients
q = S(p) # compute quantity supplied at price nodes
return p - q * (p ** (eta+1) / eta) - alpha * np.sqrt(q) - q ** 2
# Notice that `resid` only takes one argument. The other parameters (`Q`, `p`, `eta`, `alpha`) should be declared as such in the main script, were Python's scoping rules will find them.
# ### Solve for effective supply function
#
# Class `NLP` defines nonlinear problems. It can be used to solve `resid` by Broyden's method.
# In[7]:
cournot = NLP(resid)
S.c = cournot.broyden(S.c, tol=1e-12)
# ### Plot demand and effective supply for m=5 firms
# In[8]:
prices = np.linspace(a, b, 501)
fig1 = demo.figure('Cournot Effective Firm Supply Function',
'Quantity', 'Price', [0, 4], [0.5, 2])
plt.plot(5 * S(prices), prices, D(prices), prices)
plt.legend(('Supply','Demand'))
# ### Plot residual
#
# Notice that `resid` does not take explicit parameters, so to evaluate it when prices are `prices` we need to assign `p = prices`.
# In order to assess the quality of the approximation, one plots the residual function over the approximation domain. Here, the residual function is plotted by computing the residual at a refined grid of 501 equally spaced points.
# In[9]:
p = prices
fig2 = demo.figure('Residual Function for Cournot Problem',
'Quantity', 'Residual')
plt.hlines(0, a, b, 'k', '--', lw=2)
plt.plot(prices, resid(S.c))
# ### Plot demand and effective supply for m=1, 3, 5, 10, 15, 20 firms
# In[10]:
fig3 = demo.figure('Industry Supply and Demand Functions',
'Quantity', 'Price', [0, 12], figsize=[9,4])
lcolor = [z['color'] for z in plt.rcParams['axes.prop_cycle']]
for i, m in enumerate([1, 3, 5, 10, 15, 20]):
plt.plot(m*S(prices), prices) # supply
demo.annotate(m*S(1.2)-0.25,1.4-i/12,f'm={m:d}',color=lcolor[i],ms=0,fs=12)
plt.plot(D(prices), prices, linewidth=4, color='grey') # demand
demo.annotate(10,0.5,'demand',color='grey', ms=0, fs=12)
# ### Plot equilibrium price as a function of number of firms m
# In[14]:
pp = (b + a) / 2
dp = (b - a) / 2
m = np.arange(1, 26)
for i in range(50):
dp /= 2
pp = pp - np.sign(S(pp) * m - pp ** (-eta)) * dp
fig4 = demo.figure('Cournot Equilibrium Price as Function of Industry Size',
'Number of Firms', 'Price')
plt.plot(m, pp)
# In[12]:
demo.savefig([fig1,fig2,fig3,fig4])
| [
"numpy.ones_like",
"compecon.demo.savefig",
"numpy.ones",
"compecon.BasisChebyshev",
"compecon.demo.figure",
"numpy.arange",
"numpy.sqrt",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hlines",
"compecon.demo.annotate",
"numpy.linspace",
"compecon.NLP",
"matplotlib.pyplot.legend"
] | [((1879, 1934), 'compecon.BasisChebyshev', 'BasisChebyshev', (['n', 'a', 'b'], {'labels': "['price']", 'l': "['supply']"}), "(n, a, b, labels=['price'], l=['supply'])\n", (1893, 1934), False, 'from compecon import BasisChebyshev, NLP, demo\n'), ((1942, 1957), 'numpy.ones_like', 'np.ones_like', (['p'], {}), '(p)\n', (1954, 1957), True, 'import numpy as np\n'), ((2909, 2919), 'compecon.NLP', 'NLP', (['resid'], {}), '(resid)\n', (2912, 2919), False, 'from compecon import BasisChebyshev, NLP, demo\n'), ((3034, 3056), 'numpy.linspace', 'np.linspace', (['a', 'b', '(501)'], {}), '(a, b, 501)\n', (3045, 3056), True, 'import numpy as np\n'), ((3064, 3160), 'compecon.demo.figure', 'demo.figure', (['"""Cournot Effective Firm Supply Function"""', '"""Quantity"""', '"""Price"""', '[0, 4]', '[0.5, 2]'], {}), "('Cournot Effective Firm Supply Function', 'Quantity', 'Price',\n [0, 4], [0.5, 2])\n", (3075, 3160), False, 'from compecon import BasisChebyshev, NLP, demo\n'), ((3221, 3253), 'matplotlib.pyplot.legend', 'plt.legend', (["('Supply', 'Demand')"], {}), "(('Supply', 'Demand'))\n", (3231, 3253), True, 'import matplotlib.pyplot as plt\n'), ((3672, 3748), 'compecon.demo.figure', 'demo.figure', (['"""Residual Function for Cournot Problem"""', '"""Quantity"""', '"""Residual"""'], {}), "('Residual Function for Cournot Problem', 'Quantity', 'Residual')\n", (3683, 3748), False, 'from compecon import BasisChebyshev, NLP, demo\n'), ((3761, 3797), 'matplotlib.pyplot.hlines', 'plt.hlines', (['(0)', 'a', 'b', '"""k"""', '"""--"""'], {'lw': '(2)'}), "(0, a, b, 'k', '--', lw=2)\n", (3771, 3797), True, 'import matplotlib.pyplot as plt\n'), ((3920, 4021), 'compecon.demo.figure', 'demo.figure', (['"""Industry Supply and Demand Functions"""', '"""Quantity"""', '"""Price"""', '[0, 12]'], {'figsize': '[9, 4]'}), "('Industry Supply and Demand Functions', 'Quantity', 'Price', [0,\n 12], figsize=[9, 4])\n", (3931, 4021), False, 'from compecon import BasisChebyshev, NLP, demo\n'), ((4334, 4393), 'compecon.demo.annotate', 'demo.annotate', (['(10)', '(0.5)', '"""demand"""'], {'color': '"""grey"""', 'ms': '(0)', 'fs': '(12)'}), "(10, 0.5, 'demand', color='grey', ms=0, fs=12)\n", (4347, 4393), False, 'from compecon import BasisChebyshev, NLP, demo\n'), ((4509, 4525), 'numpy.arange', 'np.arange', (['(1)', '(26)'], {}), '(1, 26)\n', (4518, 4525), True, 'import numpy as np\n'), ((4619, 4720), 'compecon.demo.figure', 'demo.figure', (['"""Cournot Equilibrium Price as Function of Industry Size"""', '"""Number of Firms"""', '"""Price"""'], {}), "('Cournot Equilibrium Price as Function of Industry Size',\n 'Number of Firms', 'Price')\n", (4630, 4720), False, 'from compecon import BasisChebyshev, NLP, demo\n'), ((4730, 4745), 'matplotlib.pyplot.plot', 'plt.plot', (['m', 'pp'], {}), '(m, pp)\n', (4738, 4745), True, 'import matplotlib.pyplot as plt\n'), ((4760, 4798), 'compecon.demo.savefig', 'demo.savefig', (['[fig1, fig2, fig3, fig4]'], {}), '([fig1, fig2, fig3, fig4])\n', (4772, 4798), False, 'from compecon import BasisChebyshev, NLP, demo\n'), ((1837, 1847), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (1844, 1847), True, 'import numpy as np\n'), ((2538, 2548), 'numpy.sqrt', 'np.sqrt', (['q'], {}), '(q)\n', (2545, 2548), True, 'import numpy as np\n')] |
import torch
import triton
def assert_almost_equal(x, y, decimal=4, err_msg=''):
import numpy.testing as npt
if isinstance(x, torch.Tensor):
x = x.cpu().detach().numpy()
if isinstance(y, torch.Tensor):
y = y.cpu().detach().numpy()
npt.assert_array_almost_equal(x, y, err_msg=err_msg, decimal=decimal)
if __name__ == "__main__":
BLOCK = 16
key_layer = torch.load("./key_layer.pt")
value_layer = torch.load("./value_layer.pt")
query_layer = torch.load("./query_layer.pt")
key_layer_bsmm = torch.load("./key_layer_bsmm.pt")
value_layer_bsmm = torch.load("./value_layer_bsmm.pt")
query_layer_bsmm = torch.load("./query_layer_bsmm.pt")
assert_almost_equal(key_layer, key_layer_bsmm)
assert_almost_equal(value_layer, value_layer_bsmm)
assert_almost_equal(query_layer, query_layer_bsmm)
query_layer = query_layer.permute(1,2,0,3)
key_layer = key_layer.permute(1,2,3,0)
print(key_layer.shape)
print(value_layer.shape)
print(query_layer.shape)
layout = torch.tril(torch.ones((2,query_layer.size(2)//BLOCK, key_layer.size(3)//BLOCK),dtype=torch.long))
print(layout)
print(layout.shape)
attention_scores = torch.load("./attention_scores.pt")
attention_scores_bsmm = torch.load("./attention_scores_bsmm.pt")
print(attention_scores.shape)
attention_scores = triton.testing.sparsify_tensor(attention_scores, layout, BLOCK)
print(attention_scores.shape)
print(attention_scores_bsmm.shape)
assert_almost_equal(attention_scores, attention_scores_bsmm)
attention_probs = torch.load("./attention_probs.pt")
attention_probs = triton.testing.sparsify_tensor(attention_probs, layout, BLOCK)
attention_probs_bsmm = torch.load("./attention_probs_bsmm.pt")
assert_almost_equal(attention_probs, attention_probs_bsmm)
context_layer = torch.load("./context_layer.pt")
context_layer_bsmm = torch.load("./context_layer_bsmm.pt")
assert_almost_equal(context_layer, context_layer_bsmm)
assert_almost_ | [
"torch.load",
"numpy.testing.assert_array_almost_equal",
"triton.testing.sparsify_tensor"
] | [((266, 335), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['x', 'y'], {'err_msg': 'err_msg', 'decimal': 'decimal'}), '(x, y, err_msg=err_msg, decimal=decimal)\n', (295, 335), True, 'import numpy.testing as npt\n'), ((395, 423), 'torch.load', 'torch.load', (['"""./key_layer.pt"""'], {}), "('./key_layer.pt')\n", (405, 423), False, 'import torch\n'), ((442, 472), 'torch.load', 'torch.load', (['"""./value_layer.pt"""'], {}), "('./value_layer.pt')\n", (452, 472), False, 'import torch\n'), ((491, 521), 'torch.load', 'torch.load', (['"""./query_layer.pt"""'], {}), "('./query_layer.pt')\n", (501, 521), False, 'import torch\n'), ((543, 576), 'torch.load', 'torch.load', (['"""./key_layer_bsmm.pt"""'], {}), "('./key_layer_bsmm.pt')\n", (553, 576), False, 'import torch\n'), ((600, 635), 'torch.load', 'torch.load', (['"""./value_layer_bsmm.pt"""'], {}), "('./value_layer_bsmm.pt')\n", (610, 635), False, 'import torch\n'), ((659, 694), 'torch.load', 'torch.load', (['"""./query_layer_bsmm.pt"""'], {}), "('./query_layer_bsmm.pt')\n", (669, 694), False, 'import torch\n'), ((1210, 1245), 'torch.load', 'torch.load', (['"""./attention_scores.pt"""'], {}), "('./attention_scores.pt')\n", (1220, 1245), False, 'import torch\n'), ((1274, 1314), 'torch.load', 'torch.load', (['"""./attention_scores_bsmm.pt"""'], {}), "('./attention_scores_bsmm.pt')\n", (1284, 1314), False, 'import torch\n'), ((1372, 1435), 'triton.testing.sparsify_tensor', 'triton.testing.sparsify_tensor', (['attention_scores', 'layout', 'BLOCK'], {}), '(attention_scores, layout, BLOCK)\n', (1402, 1435), False, 'import triton\n'), ((1597, 1631), 'torch.load', 'torch.load', (['"""./attention_probs.pt"""'], {}), "('./attention_probs.pt')\n", (1607, 1631), False, 'import torch\n'), ((1654, 1716), 'triton.testing.sparsify_tensor', 'triton.testing.sparsify_tensor', (['attention_probs', 'layout', 'BLOCK'], {}), '(attention_probs, layout, BLOCK)\n', (1684, 1716), False, 'import triton\n'), ((1744, 1783), 'torch.load', 'torch.load', (['"""./attention_probs_bsmm.pt"""'], {}), "('./attention_probs_bsmm.pt')\n", (1754, 1783), False, 'import torch\n'), ((1868, 1900), 'torch.load', 'torch.load', (['"""./context_layer.pt"""'], {}), "('./context_layer.pt')\n", (1878, 1900), False, 'import torch\n'), ((1926, 1963), 'torch.load', 'torch.load', (['"""./context_layer_bsmm.pt"""'], {}), "('./context_layer_bsmm.pt')\n", (1936, 1963), False, 'import torch\n')] |
import numpy as np
black_list = [106, 114, 117, 120, 12, 130, 134, 136, 138, 142, 153, 158, 170, 174, 20, 26, 2, 33, 40, 41, 42, 43, 44, 54, 58, 5, 63, 65, 6, 74, 86, 89, 94, 98]
black_list = list(map(lambda x: "query_{}".format(x), black_list))
def compute_overhead(suffix, metric=4):
def predicate(df):
return df["httpCalls"] > 1
def mapper(df):
return df[metric]
df_1 = np.genfromtxt("results/overhead/watdiv_{}_1.csv".format(suffix), delimiter=',', names=True, dtype=None, encoding='utf-8', invalid_raise=False)
df_2 = np.genfromtxt("results/overhead/watdiv_{}_2.csv".format(suffix), delimiter=',', names=True, dtype=None, encoding='utf-8', invalid_raise=False)
df_3 = np.genfromtxt("results/overhead/watdiv_{}_3.csv".format(suffix), delimiter=',', names=True, dtype=None, encoding='utf-8', invalid_raise=False)
all = list(map(mapper, filter(predicate, df_1))) + list(map(mapper, filter(predicate, df_2))) + list(map(mapper, filter(predicate, df_3)))
return all
def compute_space(suffix, metric=3):
def predicate(df):
return df["httpCalls"] > 1
def mapper(df):
return df[metric]
df_1 = np.genfromtxt("results/overhead/watdiv_{}_space_1.csv".format(suffix), delimiter=',', names=True, dtype=None, encoding='utf-8', invalid_raise=False)
return list(map(mapper, filter(predicate, df_1)))
def compute_http_watdiv(path, suffix):
df_1 = np.genfromtxt("results/{}/run1/1clients/mix_watdiv_queries_0/execution_times_{}.csv".format(path, suffix), delimiter=',', names=True, dtype=None, encoding='utf-8', invalid_raise=False)
df_2 = np.genfromtxt("results/{}/run2/1clients/mix_watdiv_queries_0/execution_times_{}.csv".format(path, suffix), delimiter=',', names=True, dtype=None, encoding='utf-8', invalid_raise=False)
df_3 = np.genfromtxt("results/{}/run2/1clients/mix_watdiv_queries_0/execution_times_{}.csv".format(path, suffix), delimiter=',', names=True, dtype=None, encoding='utf-8', invalid_raise=False)
return np.mean([np.sum(df_1["httpCalls"]), np.sum(df_2["httpCalls"]), np.sum(df_3["httpCalls"])])
def feasible(path):
def predicate(df):
return df["query"] not in black_list
def mapper(df):
return df[2]
df = np.genfromtxt("results/feasible/{}".format(path), delimiter=',', names=True, dtype=None, encoding='utf-8', invalid_raise=False)
return np.sum(list(map(mapper, filter(predicate, df))))
def optional(join_times, triples_times, cardinalities):
df_joins = np.genfromtxt(join_times, delimiter=',', names=True, dtype=None, encoding='utf-8', invalid_raise=False)
df_triples = np.genfromtxt(triples_times, delimiter=',', names=True, dtype=None, encoding='utf-8', invalid_raise=False)
df_card = np.genfromtxt(cardinalities, delimiter=',', names=True, dtype=None, encoding='utf-8', invalid_raise=False)
# load all times by query
triples = dict()
cards = dict()
for row in df_triples:
triples[row['query']] = row['httpCalls']
for row in df_card:
cards[row['query']] = row['cardinality']
# compute results for bind joins
bind_join = list()
for row in df_joins:
if row['query'] in triples:
v = triples[row['query']] + (cards[row['query']] / 15)
bind_join.append(v)
# compute results for optimized
optimized = list()
for row in df_joins:
if row['query'] in triples:
v = row['httpCalls'] + triples[row['query']] - 1
optimized.append(v)
return (np.sum(bind_join), np.sum(optimized))
# 1M
sage_1M_import = compute_overhead('1M')
sage_1M_export = compute_overhead('1M', metric=5)
# 10M
sage_10M_import = compute_overhead('10M')
sage_10M_export = compute_overhead('10M', metric=5)
sage_10M_space = compute_space('10M')
# 100M
sage_100M_import = compute_overhead('100M')
sage_100M_export = compute_overhead('100M', metric="exportTime")
print('Overhead')
print('WatDiv 1M')
print('import', np.mean(sage_1M_import))
print('export', np.mean(sage_1M_export))
print('WatDiv 10M')
print('import', np.mean(sage_10M_import))
print('export', np.mean(sage_10M_export))
print('space min', np.min(sage_10M_space) / 1000, 'kb')
print('space max', np.max(sage_10M_space) / 1000, 'kb')
print('space avg', np.mean(sage_10M_space) / 1000, 'kb')
print('space std', np.std(sage_10M_space) / 1000, 'kb')
print('WatDiv 100M')
print('import', np.mean(sage_100M_import))
print('export', np.mean(sage_100M_export))
print('----------------------')
bj_75, opt_75 = optional('results/watdiv-sage-75ms/run1/1clients/mix_watdiv_queries_0/execution_times_sage.csv', 'results/optionals/1triple_75ms.csv', 'results/optionals/cardinalities.csv')
bj_1s, opt_1s = optional('results/watdiv-sage-1s/run1/1clients/mix_watdiv_queries_0/execution_times_sage.csv', 'results/optionals/1triple_1s.csv', 'results/optionals/cardinalities.csv')
print('Sage-75ms')
print('Sage bind join')
print(bj_75)
print('Sage optimized')
print(opt_75)
print('Sage-1s')
print('Sage bind join')
print(bj_1s)
print('Sage optimized')
print(opt_1s)
print('----------------------')
# http requests
print("HTTP requests WatDiv")
sage_1s = compute_http_watdiv("watdiv-sage-1s", "sage")
sage_75ms = compute_http_watdiv("watdiv-sage-75ms", "sage")
brtpf = np.genfromtxt("results/watdiv-brtpf/run1/1clients/mix_watdiv_queries_0/execution_times_brtpf.csv", delimiter=',', names=True, dtype=None, encoding='utf-8', invalid_raise=False)
brtpf = np.sum(brtpf['httpCalls'])
tpf = compute_http_watdiv("watdiv-tpf", "tpf")
print("sage 1s")
print(sage_1s)
print("sage 75ms")
print(sage_75ms)
print('BrTPF')
print(brtpf)
print("TPF")
print(tpf)
print('----------------------')
print("HTTP requests FEASIBLE")
sage_1s = feasible("sage_1s.csv")
sage_75ms = feasible("sage_75.csv")
brtpf = feasible('brtpf.csv')
tpf = feasible("tpf.csv")
print("sage 1s")
print(sage_1s)
print("sage 75ms")
print(sage_75ms)
print('BrTPF')
print(brtpf)
print("TPF")
print(tpf)
# plt.rc('text', usetex=True)
# ax = plt.axes(yscale='linear')
# ax.yaxis.grid(color='lightgrey')
# plt.xlabel('WatDiv dataset size (nb triples)', fontsize=17)
# plt.ylabel('Avg. time overhead (ms)', fontsize=17)
# plt.tick_params(axis='both', which='major', labelsize=15)
# plt.tight_layout()
# plt.boxplot([sage_107_import, sage_107_export], positions=[1, 2], vert=True, labels=['\\texttt{Resume}', '\\texttt{Suspend}'])
# plt.boxplot([sage_108_import, sage_108_export], positions=[3, 4], vert=True, labels=['\\texttt{Resume}', '\\texttt{Suspend}'])
# plt.show()
| [
"numpy.mean",
"numpy.std",
"numpy.max",
"numpy.sum",
"numpy.min",
"numpy.genfromtxt"
] | [((5278, 5468), 'numpy.genfromtxt', 'np.genfromtxt', (['"""results/watdiv-brtpf/run1/1clients/mix_watdiv_queries_0/execution_times_brtpf.csv"""'], {'delimiter': '""","""', 'names': '(True)', 'dtype': 'None', 'encoding': '"""utf-8"""', 'invalid_raise': '(False)'}), "(\n 'results/watdiv-brtpf/run1/1clients/mix_watdiv_queries_0/execution_times_brtpf.csv'\n , delimiter=',', names=True, dtype=None, encoding='utf-8',\n invalid_raise=False)\n", (5291, 5468), True, 'import numpy as np\n'), ((5463, 5489), 'numpy.sum', 'np.sum', (["brtpf['httpCalls']"], {}), "(brtpf['httpCalls'])\n", (5469, 5489), True, 'import numpy as np\n'), ((2512, 2620), 'numpy.genfromtxt', 'np.genfromtxt', (['join_times'], {'delimiter': '""","""', 'names': '(True)', 'dtype': 'None', 'encoding': '"""utf-8"""', 'invalid_raise': '(False)'}), "(join_times, delimiter=',', names=True, dtype=None, encoding=\n 'utf-8', invalid_raise=False)\n", (2525, 2620), True, 'import numpy as np\n'), ((2633, 2743), 'numpy.genfromtxt', 'np.genfromtxt', (['triples_times'], {'delimiter': '""","""', 'names': '(True)', 'dtype': 'None', 'encoding': '"""utf-8"""', 'invalid_raise': '(False)'}), "(triples_times, delimiter=',', names=True, dtype=None,\n encoding='utf-8', invalid_raise=False)\n", (2646, 2743), True, 'import numpy as np\n'), ((2754, 2864), 'numpy.genfromtxt', 'np.genfromtxt', (['cardinalities'], {'delimiter': '""","""', 'names': '(True)', 'dtype': 'None', 'encoding': '"""utf-8"""', 'invalid_raise': '(False)'}), "(cardinalities, delimiter=',', names=True, dtype=None,\n encoding='utf-8', invalid_raise=False)\n", (2767, 2864), True, 'import numpy as np\n'), ((3973, 3996), 'numpy.mean', 'np.mean', (['sage_1M_import'], {}), '(sage_1M_import)\n', (3980, 3996), True, 'import numpy as np\n'), ((4014, 4037), 'numpy.mean', 'np.mean', (['sage_1M_export'], {}), '(sage_1M_export)\n', (4021, 4037), True, 'import numpy as np\n'), ((4075, 4099), 'numpy.mean', 'np.mean', (['sage_10M_import'], {}), '(sage_10M_import)\n', (4082, 4099), True, 'import numpy as np\n'), ((4117, 4141), 'numpy.mean', 'np.mean', (['sage_10M_export'], {}), '(sage_10M_export)\n', (4124, 4141), True, 'import numpy as np\n'), ((4405, 4430), 'numpy.mean', 'np.mean', (['sage_100M_import'], {}), '(sage_100M_import)\n', (4412, 4430), True, 'import numpy as np\n'), ((4448, 4473), 'numpy.mean', 'np.mean', (['sage_100M_export'], {}), '(sage_100M_export)\n', (4455, 4473), True, 'import numpy as np\n'), ((3528, 3545), 'numpy.sum', 'np.sum', (['bind_join'], {}), '(bind_join)\n', (3534, 3545), True, 'import numpy as np\n'), ((3547, 3564), 'numpy.sum', 'np.sum', (['optimized'], {}), '(optimized)\n', (3553, 3564), True, 'import numpy as np\n'), ((4162, 4184), 'numpy.min', 'np.min', (['sage_10M_space'], {}), '(sage_10M_space)\n', (4168, 4184), True, 'import numpy as np\n'), ((4218, 4240), 'numpy.max', 'np.max', (['sage_10M_space'], {}), '(sage_10M_space)\n', (4224, 4240), True, 'import numpy as np\n'), ((4274, 4297), 'numpy.mean', 'np.mean', (['sage_10M_space'], {}), '(sage_10M_space)\n', (4281, 4297), True, 'import numpy as np\n'), ((4331, 4353), 'numpy.std', 'np.std', (['sage_10M_space'], {}), '(sage_10M_space)\n', (4337, 4353), True, 'import numpy as np\n'), ((2026, 2051), 'numpy.sum', 'np.sum', (["df_1['httpCalls']"], {}), "(df_1['httpCalls'])\n", (2032, 2051), True, 'import numpy as np\n'), ((2053, 2078), 'numpy.sum', 'np.sum', (["df_2['httpCalls']"], {}), "(df_2['httpCalls'])\n", (2059, 2078), True, 'import numpy as np\n'), ((2080, 2105), 'numpy.sum', 'np.sum', (["df_3['httpCalls']"], {}), "(df_3['httpCalls'])\n", (2086, 2105), True, 'import numpy as np\n')] |
"""
clone
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy
from mcedit2.command import SimpleRevisionCommand
from mcedit2.editortools import EditorTool
from mcedit2.imports import PendingImportNode, PendingImport
from mcedit2.rendering.scenegraph import scenenode
from PySide import QtGui
from mcedit2.util.showprogress import showProgress
from mcedit2.widgets.coord_widget import CoordinateWidget
from mcedit2.widgets.layout import Column, Row
from mcedit2.widgets.rotation_widget import RotationWidget
from mcedit2.widgets.scale_widget import ScaleWidget
from mceditlib import transform
log = logging.getLogger(__name__)
class CloneSelectionCommand(SimpleRevisionCommand):
def __init__(self, cloneTool, pendingImport, text=None, *args, **kwargs):
if text is None:
text = cloneTool.tr("Clone Selected Object")
super(CloneSelectionCommand, self).__init__(cloneTool.editorSession, text, *args, **kwargs)
self.pendingImport = pendingImport
self.cloneTool = cloneTool
def undo(self):
super(CloneSelectionCommand, self).undo()
self.cloneTool.mainPendingClone = None
self.cloneTool.editorSession.chooseTool("Select")
def redo(self):
self.cloneTool.mainPendingClone = self.pendingImport
self.cloneTool.editorSession.chooseTool("Clone")
super(CloneSelectionCommand, self).redo()
class CloneOffsetCommand(QtGui.QUndoCommand):
def __init__(self, cloneTool, oldPoint, newPoint):
super(CloneOffsetCommand, self).__init__()
self.setText(cloneTool.tr("Move Cloned Object"))
self.newPoint = newPoint
self.oldPoint = oldPoint
self.cloneTool = cloneTool
def undo(self):
self.cloneTool.clonePosition = self.oldPoint
def redo(self):
self.cloneTool.clonePosition = self.newPoint
class CloneRotateCommand(QtGui.QUndoCommand):
def __init__(self, oldRotation, newRotation, cloneTool):
super(CloneRotateCommand, self).__init__()
self.cloneTool = cloneTool
self.setText(QtGui.qApp.tr("Rotate Cloned Objects"))
self.newRotation = newRotation
self.oldRotation = oldRotation
def undo(self):
self.cloneTool.setRotation(self.oldRotation)
def redo(self):
self.cloneTool.setRotation(self.newRotation)
class CloneScaleCommand(QtGui.QUndoCommand):
def __init__(self, oldScale, newScale, cloneTool):
super(CloneScaleCommand, self).__init__()
self.cloneTool = cloneTool
self.setText(QtGui.qApp.tr("Scale Cloned Objects"))
self.newScale = newScale
self.oldScale = oldScale
def undo(self):
self.cloneTool.setScale(self.oldScale)
def redo(self):
self.cloneTool.setScale(self.newScale)
class CloneFinishCommand(SimpleRevisionCommand):
def __init__(self, cloneTool, pendingImport, originPoint, *args, **kwargs):
super(CloneFinishCommand, self).__init__(cloneTool.editorSession, cloneTool.tr("Finish Clone"), *args, **kwargs)
self.pendingImport = pendingImport
self.cloneTool = cloneTool
self.originPoint = originPoint
self.previousSelection = None
def undo(self):
super(CloneFinishCommand, self).undo()
self.cloneTool.mainPendingClone = self.pendingImport
self.cloneTool.originPoint = self.originPoint
self.editorSession.currentSelection = self.previousSelection
self.editorSession.chooseTool("Clone")
def redo(self):
super(CloneFinishCommand, self).redo()
self.previousSelection = self.editorSession.currentSelection
self.editorSession.currentSelection = self.pendingImport.bounds
self.cloneTool.mainPendingClone = None
self.cloneTool.originPoint = None
self.editorSession.chooseTool("Select")
class CloneTool(EditorTool):
"""
Make multiple copies of the selected area. When selected, displays a preview of the
copies and allows the position, repeat count, and transforms to be changed.
Attributes
----------
mainPendingClone : PendingImport
The object currently being cloned.
pendingClones : list of PendingImport
Repeated imports of the object being cloned
"""
iconName = "clone"
name = "Clone"
modifiesWorld = True
def __init__(self, editorSession, *args, **kwargs):
super(CloneTool, self).__init__(editorSession, *args, **kwargs)
self.originPoint = None
self.pendingClones = []
self.pendingCloneNodes = []
self.mainCloneNode = None
self.overlayNode = scenenode.Node("cloneOverlay")
self.toolWidget = QtGui.QWidget()
self.pointInput = CoordinateWidget()
self.pointInput.pointChanged.connect(self.pointInputChanged)
self.rotationInput = RotationWidget()
self.rotationInput.rotationChanged.connect(self.rotationChanged)
self.scaleInput = ScaleWidget()
self.scaleInput.scaleChanged.connect(self.scaleChanged)
confirmButton = QtGui.QPushButton(self.tr("Confirm")) # xxxx should be in worldview
confirmButton.clicked.connect(self.confirmClone)
self.repeatCount = 1
self.repeatCountInput = QtGui.QSpinBox(minimum=1, maximum=10000, value=1)
self.repeatCountInput.valueChanged.connect(self.setRepeatCount)
self.rotateRepeatsCheckbox = QtGui.QCheckBox(self.tr("Rotate Repeats"))
self.rotateRepeatsCheckbox.toggled.connect(self.updateTiling)
self.rotateOffsetCheckbox = QtGui.QCheckBox(self.tr("Rotate Offset"))
self.rotateOffsetCheckbox.toggled.connect(self.updateTiling)
self.toolWidget.setLayout(Column(self.pointInput,
self.rotationInput,
Row(self.rotateRepeatsCheckbox,
self.rotateOffsetCheckbox),
self.scaleInput,
Row(QtGui.QLabel(self.tr("Repeat count: ")), self.repeatCountInput),
confirmButton,
None))
self.mainPendingClone = None # Do this after creating pointInput to disable inputs
def pointInputChanged(self, value):
if self.mainPendingClone.basePosition != value:
self.mainPendingClone.basePosition = value
self.updateTiling()
def rotationChanged(self, rots, live):
scale = self.scaleInput.scale
if live:
for node, (nodePos, nodeRots, nodeScale) in zip(self.pendingCloneNodes, self.getTilingPositions(None, rots, scale)):
node.setPreviewRotation(nodeRots)
node.setPreviewScale(nodeScale)
node.setPreviewBasePosition(nodePos + node.pendingImport.transformOffset)
self.editorSession.updateView()
else:
if self.mainPendingClone and self.mainPendingClone.rotation != rots:
command = CloneRotateCommand(self.mainPendingClone.rotation, rots, self)
self.editorSession.pushCommand(command)
self.updateTiling()
def scaleChanged(self, scale, live):
rots = self.rotationInput.rotation
if live:
for node, (nodePos, nodeRots, nodeScale) in zip(self.pendingCloneNodes, self.getTilingPositions(None, rots, scale)):
node.setPreviewRotation(nodeRots)
node.setPreviewScale(nodeScale)
node.setPreviewBasePosition(nodePos + node.pendingImport.transformOffset)
self.editorSession.updateView()
else:
if self.mainPendingClone and self.mainPendingClone.scale != scale:
command = CloneScaleCommand(self.mainPendingClone.scale, scale, self)
self.editorSession.pushCommand(command)
self.updateTiling()
def setRepeatCount(self, value):
self.repeatCount = value
self.updateTiling()
def setRotation(self, rots):
if self.mainPendingClone is None:
return
else:
self.mainPendingClone.rotation = rots
self.updateTiling()
def setScale(self, scale):
if self.mainPendingClone is None:
return
else:
self.mainPendingClone.scale = scale
self.updateTiling()
def updateTiling(self):
if self.mainPendingClone is None:
repeatCount = 0
else:
repeatCount = self.repeatCount
while len(self.pendingClones) > repeatCount:
node = self.pendingCloneNodes.pop()
self.overlayNode.removeChild(node)
self.pendingClones.pop()
while len(self.pendingClones) < repeatCount:
clone = PendingImport(self.mainPendingClone.sourceDim,
self.mainPendingClone.basePosition,
self.mainPendingClone.selection,
self.mainPendingClone.text + " %d" % len(self.pendingClones))
node = PendingImportNode(clone,
self.editorSession.textureAtlas,
hasHandle=len(self.pendingClones) == 0)
self.pendingClones.append(clone)
self.pendingCloneNodes.append(node)
self.overlayNode.addChild(node)
# This is stupid.
if self.mainCloneNode:
self.mainCloneNode.importMoved.disconnect(self.cloneDidMove)
self.mainCloneNode.importIsMoving.disconnect(self.cloneIsMoving)
if repeatCount > 0:
self.mainCloneNode = self.pendingCloneNodes[0]
self.mainCloneNode.importMoved.connect(self.cloneDidMove)
self.mainCloneNode.importIsMoving.connect(self.cloneIsMoving)
else:
self.mainCloneNode = None
self.updateTilingPositions()
def updateTilingPositions(self, offsetPoint=None):
if self.originPoint is not None:
for clone, (pos, rots, scale) in zip(self.pendingClones, self.getTilingPositions(offsetPoint)):
clone.basePosition = pos
clone.rotation = rots
clone.scale = scale
self.editorSession.updateView()
def getTilingPositions(self, offsetPoint=None, rotations=None, scale=None):
rotateRepeats = self.rotateRepeatsCheckbox.isChecked()
rotateOffsets = self.rotateOffsetCheckbox.isChecked()
baseRotations = rotations or self.mainPendingClone.rotation
rotations = baseRotations
scale = scale or self.mainPendingClone.scale
matrix = transform.transformationMatrix((0, 0, 0), rotations, scale)
matrix = numpy.linalg.inv(matrix)[:3, :3]
# TODO: Use scales here
if offsetPoint is None:
offsetPoint = self.mainPendingClone.basePosition
if None not in (offsetPoint, self.originPoint):
pos = self.originPoint
offset = offsetPoint - self.originPoint
for i in range(self.repeatCount):
pos = pos + offset
yield pos.intfloor(), rotations, scale
if rotateRepeats:
rotations = [a+b for a,b in zip(rotations, baseRotations)]
if rotateOffsets:
# Convert to 4-element column and back
offset = (offset * matrix).T
offset = tuple(float(x) for x in offset)
@property
def mainPendingClone(self):
return self._pendingClone
@mainPendingClone.setter
def mainPendingClone(self, pendingImport):
log.info("Begin clone: %s", pendingImport)
self._pendingClone = pendingImport
self.pointInput.setEnabled(pendingImport is not None)
if pendingImport:
self.pointInput.point = pendingImport.basePosition
self.updateTiling()
def toolActive(self):
self.editorSession.selectionTool.hideSelectionWalls = True
if self.mainPendingClone is None:
if self.editorSession.currentSelection is None:
return
# This makes a reference to the latest revision in the editor.
# If the cloned area is changed between "Clone" and "Confirm", the changed
# blocks will be cloned.
pos = self.editorSession.currentSelection.origin
self.pointInput.origin = self.originPoint = pos
pendingImport = PendingImport(self.editorSession.currentDimension, pos,
self.editorSession.currentSelection,
self.tr("<Cloned Object>"))
moveCommand = CloneSelectionCommand(self, pendingImport)
self.editorSession.pushCommand(moveCommand)
self.updateTiling()
def toolInactive(self):
self.editorSession.selectionTool.hideSelectionWalls = False
# if self.mainCloneNode:
# self.mainCloneNode.hoverFace(None)
self.confirmClone()
def confirmClone(self):
if self.mainPendingClone is None:
return
command = CloneFinishCommand(self, self.mainPendingClone, self.originPoint)
with command.begin():
tasks = []
for clone in self.pendingClones:
# TODO don't use intermediate schematic...
destDim = self.editorSession.currentDimension
dim, selection = clone.getSourceForDim(destDim)
task = destDim.copyBlocksIter(dim, selection, clone.importPos,
biomes=True, create=True, copyAir=False)
tasks.append(task)
showProgress(self.tr("Pasting..."), *tasks)
self.editorSession.pushCommand(command)
@property
def clonePosition(self):
return None if self.mainPendingClone is None else self.mainPendingClone.basePosition
@clonePosition.setter
def clonePosition(self, value):
"""
:type value: Vector
"""
self.pointInput.point = value
self.pointInputChanged(value)
# --- Mouse events ---
def mouseMove(self, event):
if self.mainCloneNode is not None:
self.mainCloneNode.mouseMove(event)
def mouseDrag(self, event):
if self.mainCloneNode is not None:
self.mainCloneNode.mouseMove(event)
def mousePress(self, event):
if self.mainCloneNode is not None:
self.mainCloneNode.mousePress(event)
def mouseRelease(self, event):
if self.mainCloneNode is not None:
self.mainCloneNode.mouseRelease(event)
# --- Box handle events ---
def cloneDidMove(self, newPoint, oldPoint):
log.info("clone offset command: %s %s", oldPoint, newPoint)
if newPoint != oldPoint:
command = CloneOffsetCommand(self, oldPoint, newPoint)
self.editorSession.pushCommand(command)
def cloneIsMoving(self, newPoint):
self.updateTilingPositions(newPoint)
| [
"logging.getLogger",
"PySide.QtGui.QSpinBox",
"mcedit2.widgets.rotation_widget.RotationWidget",
"mcedit2.rendering.scenegraph.scenenode.Node",
"PySide.QtGui.qApp.tr",
"mcedit2.widgets.scale_widget.ScaleWidget",
"mceditlib.transform.transformationMatrix",
"mcedit2.widgets.coord_widget.CoordinateWidget"... | [((668, 695), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (685, 695), False, 'import logging\n'), ((4680, 4710), 'mcedit2.rendering.scenegraph.scenenode.Node', 'scenenode.Node', (['"""cloneOverlay"""'], {}), "('cloneOverlay')\n", (4694, 4710), False, 'from mcedit2.rendering.scenegraph import scenenode\n'), ((4738, 4753), 'PySide.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (4751, 4753), False, 'from PySide import QtGui\n'), ((4780, 4798), 'mcedit2.widgets.coord_widget.CoordinateWidget', 'CoordinateWidget', ([], {}), '()\n', (4796, 4798), False, 'from mcedit2.widgets.coord_widget import CoordinateWidget\n'), ((4898, 4914), 'mcedit2.widgets.rotation_widget.RotationWidget', 'RotationWidget', ([], {}), '()\n', (4912, 4914), False, 'from mcedit2.widgets.rotation_widget import RotationWidget\n'), ((5015, 5028), 'mcedit2.widgets.scale_widget.ScaleWidget', 'ScaleWidget', ([], {}), '()\n', (5026, 5028), False, 'from mcedit2.widgets.scale_widget import ScaleWidget\n'), ((5314, 5363), 'PySide.QtGui.QSpinBox', 'QtGui.QSpinBox', ([], {'minimum': '(1)', 'maximum': '(10000)', 'value': '(1)'}), '(minimum=1, maximum=10000, value=1)\n', (5328, 5363), False, 'from PySide import QtGui\n'), ((10792, 10851), 'mceditlib.transform.transformationMatrix', 'transform.transformationMatrix', (['(0, 0, 0)', 'rotations', 'scale'], {}), '((0, 0, 0), rotations, scale)\n', (10822, 10851), False, 'from mceditlib import transform\n'), ((2129, 2167), 'PySide.QtGui.qApp.tr', 'QtGui.qApp.tr', (['"""Rotate Cloned Objects"""'], {}), "('Rotate Cloned Objects')\n", (2142, 2167), False, 'from PySide import QtGui\n'), ((2603, 2640), 'PySide.QtGui.qApp.tr', 'QtGui.qApp.tr', (['"""Scale Cloned Objects"""'], {}), "('Scale Cloned Objects')\n", (2616, 2640), False, 'from PySide import QtGui\n'), ((10869, 10893), 'numpy.linalg.inv', 'numpy.linalg.inv', (['matrix'], {}), '(matrix)\n', (10885, 10893), False, 'import numpy\n'), ((5896, 5954), 'mcedit2.widgets.layout.Row', 'Row', (['self.rotateRepeatsCheckbox', 'self.rotateOffsetCheckbox'], {}), '(self.rotateRepeatsCheckbox, self.rotateOffsetCheckbox)\n', (5899, 5954), False, 'from mcedit2.widgets.layout import Column, Row\n')] |
from numpy import sqrt
def E2V(E):
"""
Takes a neutron energy in meV and converts it to velocity in m/s
"""
# for energy in mev returns velocity in m/s
return sqrt(E/5.227e-6)
def V2E(V):
"""
Takes a neutron velocity in m/s and converts it to energy in meV
"""
# for v in m/s returns energy in meV
return 5.227e-6*V*V
def E2K(E):
"""
Takes a neutron of E mev and converts it to k ^A-1
"""
return sqrt(E/2.0723)
def V2lambda(V):
"""
Takes a neutron velocity V in m/s and converts it to lambda in angstroms
"""
return(3956/V)
| [
"numpy.sqrt"
] | [((178, 197), 'numpy.sqrt', 'sqrt', (['(E / 5.227e-06)'], {}), '(E / 5.227e-06)\n', (182, 197), False, 'from numpy import sqrt\n'), ((463, 479), 'numpy.sqrt', 'sqrt', (['(E / 2.0723)'], {}), '(E / 2.0723)\n', (467, 479), False, 'from numpy import sqrt\n')] |
from readability_score.calculators.fleschkincaid import FleschKincaid
# Download from https://github.com/wimmuskee/readability-score
import pandas as pd
import numpy as np
from loadRandom import loadRandom, loadRandom2
from models import getLM, getNBM
import matplotlib.pyplot as plt
import numpy_indexed as npi
if __name__ == '__main__':
seed = 2
# data = loadRandom(
# '/Users/caitchison/Documents/Yelp/yelp_dataset/review.json', 1e5, seed).loc[
# :, ('stars', 'text', 'useful', 'cool', 'funny')]
data = loadRandom2(
'/Users/caitchison/Documents/Yelp/yelp_dataset/restaurants_only.csv', 1e5, seed, 3778803).loc[
:, ('stars', 'text', 'useful', 'cool', 'funny')]
# Get metric and mask
interactions = np.array(data.useful + data.cool + data.funny)
mask = interactions >= np.median(interactions)
interactions = interactions[mask]
# Get readability score using FleischKincaid
reviews = np.array(data.text)[mask]
minAge = np.array([FleschKincaid(text).min_age for text in reviews])
mask = minAge > 0
minAge = minAge[mask]
interactions = interactions[mask]
x = np.log10(minAge)
x_unique, y_mean = npi.group_by(x).median(interactions)
# Get results
results = getLM(x_unique[x_unique < 1.3][1:],
y_mean[x_unique < 1.3][1:])
"""
plt.hist(rsq, density=False, bins=15, rwidth=0.95)
plt.title('Correlations of Readability to Interactions (N=1e5, n=1e2)')
plt.xlabel('Pearson Correlation Coefficient')
plt.ylabel('Count')
plt.show()
"""
print("READABILITY (Means)\n", results.summary())
# Get graph
plt.subplot(2, 1, 1)
plt.scatter(x, np.log10(interactions), alpha=0.5)
plt.title('Readability Score vs Interactions')
plt.ylabel('Interactions Count (log10)')
# Get average y-value per x-value
plt.subplot(2, 1, 2)
plt.scatter(x_unique, y_mean, alpha=0.5)
plt.xlabel('Log(10) Minimum Reading Age (FK)')
plt.ylabel('Interactions Count (means)')
plt.show()
| [
"numpy.log10",
"numpy.median",
"matplotlib.pyplot.ylabel",
"loadRandom.loadRandom2",
"matplotlib.pyplot.xlabel",
"readability_score.calculators.fleschkincaid.FleschKincaid",
"models.getLM",
"numpy.array",
"numpy_indexed.group_by",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplo... | [((762, 808), 'numpy.array', 'np.array', (['(data.useful + data.cool + data.funny)'], {}), '(data.useful + data.cool + data.funny)\n', (770, 808), True, 'import numpy as np\n'), ((1156, 1172), 'numpy.log10', 'np.log10', (['minAge'], {}), '(minAge)\n', (1164, 1172), True, 'import numpy as np\n'), ((1266, 1329), 'models.getLM', 'getLM', (['x_unique[x_unique < 1.3][1:]', 'y_mean[x_unique < 1.3][1:]'], {}), '(x_unique[x_unique < 1.3][1:], y_mean[x_unique < 1.3][1:])\n', (1271, 1329), False, 'from models import getLM, getNBM\n'), ((1663, 1683), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (1674, 1683), True, 'import matplotlib.pyplot as plt\n'), ((1742, 1788), 'matplotlib.pyplot.title', 'plt.title', (['"""Readability Score vs Interactions"""'], {}), "('Readability Score vs Interactions')\n", (1751, 1788), True, 'import matplotlib.pyplot as plt\n'), ((1793, 1833), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Interactions Count (log10)"""'], {}), "('Interactions Count (log10)')\n", (1803, 1833), True, 'import matplotlib.pyplot as plt\n'), ((1877, 1897), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1888, 1897), True, 'import matplotlib.pyplot as plt\n'), ((1902, 1942), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_unique', 'y_mean'], {'alpha': '(0.5)'}), '(x_unique, y_mean, alpha=0.5)\n', (1913, 1942), True, 'import matplotlib.pyplot as plt\n'), ((1947, 1993), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Log(10) Minimum Reading Age (FK)"""'], {}), "('Log(10) Minimum Reading Age (FK)')\n", (1957, 1993), True, 'import matplotlib.pyplot as plt\n'), ((1998, 2038), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Interactions Count (means)"""'], {}), "('Interactions Count (means)')\n", (2008, 2038), True, 'import matplotlib.pyplot as plt\n'), ((2044, 2054), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2052, 2054), True, 'import matplotlib.pyplot as plt\n'), ((836, 859), 'numpy.median', 'np.median', (['interactions'], {}), '(interactions)\n', (845, 859), True, 'import numpy as np\n'), ((962, 981), 'numpy.array', 'np.array', (['data.text'], {}), '(data.text)\n', (970, 981), True, 'import numpy as np\n'), ((1703, 1725), 'numpy.log10', 'np.log10', (['interactions'], {}), '(interactions)\n', (1711, 1725), True, 'import numpy as np\n'), ((539, 655), 'loadRandom.loadRandom2', 'loadRandom2', (['"""/Users/caitchison/Documents/Yelp/yelp_dataset/restaurants_only.csv"""', '(100000.0)', 'seed', '(3778803)'], {}), "(\n '/Users/caitchison/Documents/Yelp/yelp_dataset/restaurants_only.csv', \n 100000.0, seed, 3778803)\n", (550, 655), False, 'from loadRandom import loadRandom, loadRandom2\n'), ((1196, 1211), 'numpy_indexed.group_by', 'npi.group_by', (['x'], {}), '(x)\n', (1208, 1211), True, 'import numpy_indexed as npi\n'), ((1011, 1030), 'readability_score.calculators.fleschkincaid.FleschKincaid', 'FleschKincaid', (['text'], {}), '(text)\n', (1024, 1030), False, 'from readability_score.calculators.fleschkincaid import FleschKincaid\n')] |
#!/usr/bin/env python
from __future__ import print_function
import sys
import math
import numpy as np
#ROS Imports
import rospy
# from tf2_ros import transform_broadcaster
from tf.transformations import euler_from_quaternion
from sensor_msgs.msg import Image, LaserScan
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from nav_msgs.msg import Odometry
from message_filters import ApproximateTimeSynchronizer, Subscriber
# from ackermann_msgs.msg import AckermannDriveStamped, AckermannDrive
class local_obs:
def __init__(self):
#Topics & Subscriptions,Publishers
lidarscan_topic = 'scan'
odom_topic = 'odom'
obstacle_topic = 'obs_loc'
# Initialize publishers and subscribers
self.lidar_sub = Subscriber(lidarscan_topic, LaserScan, queue_size=1)
self.odom_sub = Subscriber(odom_topic, Odometry, queue_size=1)
# self.lidar_sub = rospy.Subscriber(lidarscan_topic, LaserScan, self.lidar_callback, queue_size=1)
# self.odom_sub = rospy.Subscriber(odom_topic, Odometry, self.lidar_callback, queue_size=1)
self.pub = rospy.Publisher(obstacle_topic, MarkerArray, queue_size="1")
#create the time synchronizer
self.sub = ApproximateTimeSynchronizer([self.lidar_sub,self.odom_sub], queue_size = 10, slop = 0.02)
#register the callback to the synchronizer
self.sub.registerCallback(self.master_callback)
def master_callback(self, lidarD, odomD):
################### 1) Get lidar data ################################
ranges = lidarD.ranges
proc_ranges,danger_idxs = self.preprocess_lidar(ranges)
#Find closest point to LiDAR
index = np.argmin(proc_ranges) # proc_ranges.index(min(proc_ranges))
min_distance = ranges[index]
# Change min_distance based on current speed and how far we look into the future to analyze safety
if min_distance > 2:
print('We are safe MA FRIENDS')
return
# Find number and size of obstacles
obstacles = self.find_obs(proc_ranges,danger_idxs)
if not obstacles:
# self.pub.publish([Marker])
return
les = self.obsCoordinates_mark(obstacles,ranges) # Obstacles for markers
les_car = self.obsCoordinates_car(obstacles,ranges) # Obstacles for car (rtreach)
########## 2) Get location and orientation of car ############################
car = odomD.pose.pose
(roll, pitch, yaw) = euler_from_quaternion([car.orientation.x, car.orientation.y, car.orientation.z, car.orientation.w])
x = car.position.x
y = car.position.y
car_pos = [x,y,roll,pitch,yaw]
print('Car driving at angle ' + str(yaw))
########## 3) Create obstacles as boxes ###########################
obs_intervals = self.obsZono_local(les_car) # vertices of rectangle obstacles
map_obs_intervals = self.obsZono_gen(les,car_pos) # vertices of rectangle obstacles fixed coordinates (map)
# print(len(obs_intervals))
# print(len(map_obs_intervals))
########## 4) Combine data to creat markers ###########################
obs_MarkerArray = self.createMakers(les, car_pos)
# print(obs_MarkerArray)
# Publish line markers
self.pub.publish(obs_MarkerArray)
return
# lidar indexing starts from right to left
def preprocess_lidar(self, ranges):
""" Preprocess the LiDAR scan array. Expert implementation includes:
1.Setting each value to the mean over some window
2.Rejecting high values (eg. > 3m) """
n = len(ranges)
proc_ranges = [0]*n
danger_idxs = []
for i in range(n):
proc_ranges[i] = (ranges[i] + ranges[i-1] + ranges[i-2])/3
if ranges[i] > 2:
proc_ranges[i] = 10
danger_idxs.append(i)
if ranges[i] == "nan":
proc_ranges[i] = max(proc_ranges[i-1], 0)
return proc_ranges, danger_idxs
# Find any lidar points less than the min distance that the car may collide with in the next time step
def find_obs(self,proc_ranges,danger_idxs):
n = len(danger_idxs)
obstacles = []
t1 = list(danger_idxs)
t1.insert(0,t1[0])
danger_idxs.append(danger_idxs[n-1])
jumps = np.asarray(danger_idxs) - np.asarray(t1)
jIdxs = np.where(jumps > 10)
if len(jIdxs[0]) == 0:
print('No danger... Keep the Autopilot going... zzZ')
x1 = 0
x2 = int(danger_idxs[n-1])
return False
else:
jIdxs = list(jIdxs[0])
for k in range(len(jIdxs)):
if k==0:
x1 = 0
x2 = int(danger_idxs[jIdxs[k]-1])
else:
x1 = int(danger_idxs[jIdxs[k-1]-1]+1)
x2 = int(danger_idxs[jIdxs[k]-1])
obs = [x1,x2]
obstacles.append(obs)
return obstacles
# Could create markers based on vehicle's current position, but
# that defeats the purpose of creating objects wrt obstacle position,
# so visualization in a different function
# Define interval coordinates for each object
def obsCoordinates_mark(self,obstacles,ranges):
nob = len(obstacles)
les = [] # list of all points of all obstacle (list of lists of coordinates x-y)
for ii in range(nob):
temp = obstacles[ii]
pObs = [] # points to create straight line within obstacle (list of coordinates x-y)
tlp = np.arange(temp[0],temp[1],4)
for i in tlp:
angle = -2.356 + i*0.25 # degrees
angRad = angle*math.pi/180
dist = ranges[i]
xyCord = [dist, angRad]
pObs.append(xyCord)
les.append(pObs)
return les
# Define interval coordinates for each object
def obsCoordinates_car(self,obstacles,ranges):
nob = len(obstacles)
les = [] # list of all points of all obstacle (list of lists of coordinates x-y)
for ii in range(nob):
temp = obstacles[ii]
pObs = [] # points to create straight line within obstacle (list of coordinates x-y)
tlp = np.arange(temp[0],temp[1],4)
for i in tlp:
angle = -2.356 + i*0.25 # degrees
angRad = angle*math.pi/180
dist = ranges[i]
xCord = dist*math.cos(angRad)
yCord = dist*math.sin(angRad)
xyCord = [xCord, yCord, angRad] # x,y,angle lidar point
pObs.append(xyCord)
les.append(pObs)
return les
# Create small boxes as obstacles (based on distances from the car as origin)
def obsZono_local(self,les):
boxes = []
for obs in les:
for i in range(len(obs)-2):
ob_cur = obs[i]
ob_next = obs[i+1]
xmin = min(ob_cur[0], ob_next[0])
xmax = min(ob_cur[0], ob_next[0])
ymin = min(ob_cur[1], ob_next[1])
ymax = min(ob_cur[1], ob_next[1])
box = [xmin,xmax,ymin,ymax]
boxes.append(box)
return boxes
# Create small boxes as obstacles (fixed coordinates, map)
def obsZono_gen(self,les,car):
boxes = []
for obs in les:
for i in range(len(obs)-2):
ob_cur = obs[i]
ob_next = obs[i+1]
Ang1 = car[4] - ob_cur[1]
Ang2 = car[4] - ob_next[1]
ob_cur[1] = ob_cur[0]*math.sin(Ang1) # y-position (first point)
ob_cur[0] = ob_cur[0]*math.cos(Ang1) # x-position (first point)
ob_next[1] = ob_next[0]*math.sin(Ang2) # y-position (second point)
ob_next[0] = ob_next[0]*math.cos(Ang2) # x-position (second point)
xmin = min(ob_cur[0], ob_next[0])
xmax = min(ob_cur[0], ob_next[0])
ymin = min(ob_cur[1], ob_next[1])
ymax = min(ob_cur[1], ob_next[1])
box = [xmin,xmax,ymin,ymax] # create box with vertices
boxes.append(box) # Add to the list
return boxes
# Create line makers based on the coordinates defined in obsCoordinates
def createMakers(self,les,car):
ml = len(les)
rgb_color = [0, 1, 0]
markerArray = MarkerArray()
for i in range(ml):
ob = les[i]
# lobs = len(ob)
line = Marker() # Create linked line
line.header.frame_id = "/map"
line.type = Marker.LINE_STRIP
line.action = Marker.ADD
line.scale.x = 0.1
line.scale.y = 0.1
line.scale.z = 0.1
line.color.a = 1.0
line.color.r = rgb_color[0]
line.color.g = rgb_color[1]
line.color.b = rgb_color[2]
for k in ob:
point1 = Marker()
point1.header.frame_id = "/map"
point1.type = Marker.POINTS
point1.action = Marker.ADD
point1.scale.x = 0.1
point1.scale.y = 0.1
point1.scale.z = 0.1
point1.color.a = 1.0
point1.color.r = rgb_color[0]
point1.color.g = rgb_color[1]
point1.color.b = rgb_color[2]
point1.pose.orientation.w = car[4]
newAng = car[4] - k[1]
x_rel = k[0]*math.cos(newAng)
y_rel = k[0]*math.sin(newAng)
point1.pose.position.x = car[0] + x_rel
point1.pose.position.y = car[1] + y_rel
point1.pose.position.z = 0
line.points.append(point1)
markerArray.markers.append(line)
rgb_color[0] += 0.05
rgb_color[1] -= 0.05
rgb_color[2] += 0.05
return markerArray
# def lidar_callback(self, data):
# """ Process each LiDAR scan as per the Follow Gap algorithm & publish an AckermannDriveStamped Message
# """
# ranges = data.ranges
# proc_ranges,danger_idxs = self.preprocess_lidar(ranges)
# #Find closest point to LiDAR
# index = np.argmin(proc_ranges) # proc_ranges.index(min(proc_ranges))
# min_distance = ranges[index]
# # Change min_distance based on current speed and how far we look into the future to analyze safety
# if min_distance > 2:
# print('We are safe MA FRIENDS')
# return
# # Find number and size of obstacles
# obstacles = self.find_obs(proc_ranges,danger_idxs)
# if not obstacles:
# # self.pub.publish([Marker])
# return
# # print(obstacles)
# les = self.obsCoordinates(obstacles,ranges)
# # print('Coordinates of several points within obstacles')
# # print(les)
# # print('*********************************************************************************************')
# # self.pub.publish(small_obs)
# obs_MakerArray = self.createMakers(les)
# return
if __name__ == '__main__':
rospy.init_node("localObs_node", anonymous=True)
rfgs = local_obs()
# rospy.sleep(0.1)
rospy.spin()
| [
"tf.transformations.euler_from_quaternion",
"visualization_msgs.msg.Marker",
"visualization_msgs.msg.MarkerArray",
"numpy.where",
"rospy.init_node",
"numpy.asarray",
"math.sin",
"math.cos",
"rospy.spin",
"numpy.argmin",
"message_filters.Subscriber",
"message_filters.ApproximateTimeSynchronizer... | [((11407, 11455), 'rospy.init_node', 'rospy.init_node', (['"""localObs_node"""'], {'anonymous': '(True)'}), "('localObs_node', anonymous=True)\n", (11422, 11455), False, 'import rospy\n'), ((11506, 11518), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (11516, 11518), False, 'import rospy\n'), ((788, 840), 'message_filters.Subscriber', 'Subscriber', (['lidarscan_topic', 'LaserScan'], {'queue_size': '(1)'}), '(lidarscan_topic, LaserScan, queue_size=1)\n', (798, 840), False, 'from message_filters import ApproximateTimeSynchronizer, Subscriber\n'), ((865, 911), 'message_filters.Subscriber', 'Subscriber', (['odom_topic', 'Odometry'], {'queue_size': '(1)'}), '(odom_topic, Odometry, queue_size=1)\n', (875, 911), False, 'from message_filters import ApproximateTimeSynchronizer, Subscriber\n'), ((1138, 1198), 'rospy.Publisher', 'rospy.Publisher', (['obstacle_topic', 'MarkerArray'], {'queue_size': '"""1"""'}), "(obstacle_topic, MarkerArray, queue_size='1')\n", (1153, 1198), False, 'import rospy\n'), ((1258, 1348), 'message_filters.ApproximateTimeSynchronizer', 'ApproximateTimeSynchronizer', (['[self.lidar_sub, self.odom_sub]'], {'queue_size': '(10)', 'slop': '(0.02)'}), '([self.lidar_sub, self.odom_sub], queue_size=10,\n slop=0.02)\n', (1285, 1348), False, 'from message_filters import ApproximateTimeSynchronizer, Subscriber\n'), ((1734, 1756), 'numpy.argmin', 'np.argmin', (['proc_ranges'], {}), '(proc_ranges)\n', (1743, 1756), True, 'import numpy as np\n'), ((2553, 2657), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['[car.orientation.x, car.orientation.y, car.orientation.z, car.orientation.w]'], {}), '([car.orientation.x, car.orientation.y, car.\n orientation.z, car.orientation.w])\n', (2574, 2657), False, 'from tf.transformations import euler_from_quaternion\n'), ((4496, 4516), 'numpy.where', 'np.where', (['(jumps > 10)'], {}), '(jumps > 10)\n', (4504, 4516), True, 'import numpy as np\n'), ((8586, 8599), 'visualization_msgs.msg.MarkerArray', 'MarkerArray', ([], {}), '()\n', (8597, 8599), False, 'from visualization_msgs.msg import MarkerArray\n'), ((4439, 4462), 'numpy.asarray', 'np.asarray', (['danger_idxs'], {}), '(danger_idxs)\n', (4449, 4462), True, 'import numpy as np\n'), ((4465, 4479), 'numpy.asarray', 'np.asarray', (['t1'], {}), '(t1)\n', (4475, 4479), True, 'import numpy as np\n'), ((5694, 5724), 'numpy.arange', 'np.arange', (['temp[0]', 'temp[1]', '(4)'], {}), '(temp[0], temp[1], 4)\n', (5703, 5724), True, 'import numpy as np\n'), ((6397, 6427), 'numpy.arange', 'np.arange', (['temp[0]', 'temp[1]', '(4)'], {}), '(temp[0], temp[1], 4)\n', (6406, 6427), True, 'import numpy as np\n'), ((8700, 8708), 'visualization_msgs.msg.Marker', 'Marker', ([], {}), '()\n', (8706, 8708), False, 'from visualization_msgs.msg import Marker\n'), ((9145, 9153), 'visualization_msgs.msg.Marker', 'Marker', ([], {}), '()\n', (9151, 9153), False, 'from visualization_msgs.msg import Marker\n'), ((6607, 6623), 'math.cos', 'math.cos', (['angRad'], {}), '(angRad)\n', (6615, 6623), False, 'import math\n'), ((6653, 6669), 'math.sin', 'math.sin', (['angRad'], {}), '(angRad)\n', (6661, 6669), False, 'import math\n'), ((7763, 7777), 'math.sin', 'math.sin', (['Ang1'], {}), '(Ang1)\n', (7771, 7777), False, 'import math\n'), ((7845, 7859), 'math.cos', 'math.cos', (['Ang1'], {}), '(Ang1)\n', (7853, 7859), False, 'import math\n'), ((7929, 7943), 'math.sin', 'math.sin', (['Ang2'], {}), '(Ang2)\n', (7937, 7943), False, 'import math\n'), ((8012, 8026), 'math.cos', 'math.cos', (['Ang2'], {}), '(Ang2)\n', (8020, 8026), False, 'import math\n'), ((9694, 9710), 'math.cos', 'math.cos', (['newAng'], {}), '(newAng)\n', (9702, 9710), False, 'import math\n'), ((9740, 9756), 'math.sin', 'math.sin', (['newAng'], {}), '(newAng)\n', (9748, 9756), False, 'import math\n')] |
from keras.models import Sequential
from keras import layers
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras import layers
from sklearn.feature_extraction.text import CountVectorizer
filepath_dict = {'yelp': 'sentiment_analysis/yelp_labelled.txt',
'amazon': 'sentiment_analysis/amazon_cells_labelled.txt',
'imdb': 'sentiment_analysis/imdb_labelled.txt'}
df_list = []
for source, filepath in filepath_dict.items():
df = pd.read_csv(filepath, names=['sentence', 'label'], sep='\t')
df['source'] = source # Add another column filled with the source name
df_list.append(df)
df = pd.concat(df_list)
print(df.iloc[0])
df_yelp = df[df['source'] == 'yelp']
sentences = df_yelp['sentence'].values
y = df_yelp['label'].values
sentences_train, sentences_test, y_train, y_test = train_test_split(
sentences, y, test_size=0.25, random_state=1000)
cities = ['London', 'Berlin', 'Berlin', 'New York', 'London']
encoder = LabelEncoder()
city_labels = encoder.fit_transform(cities)
city_labels
#OneHotEncoder
encoder = OneHotEncoder(sparse=False)
city_labels = city_labels.reshape((5, 1))
encoder.fit_transform(city_labels)
#word embedding
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(sentences_train)
X_train = tokenizer.texts_to_sequences(sentences_train)
X_test = tokenizer.texts_to_sequences(sentences_test)
vocab_size = len(tokenizer.word_index) + 1 # Adding 1 because of reserved 0 index
print(sentences_train[2])
print(X_train[2])
maxlen = 100
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_test = pad_sequences(X_test, padding='post', maxlen=maxlen)
print(X_train[0, :])
embedding_dim = 50
def create_embedding_matrix(filepath, word_index, embedding_dim):
vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index
embedding_matrix = np.zeros((vocab_size, embedding_dim))
with open(filepath,'r', encoding='UTF8') as f:
for line in f:
word, *vector = line.split()
if word in word_index:
idx = word_index[word]
embedding_matrix[idx] = np.array(
vector, dtype=np.float32)[:embedding_dim]
return embedding_matrix
embedding_dim = 50
embedding_matrix = create_embedding_matrix(
'glove.6B.50d.txt',
tokenizer.word_index, embedding_dim)
model = Sequential()
model.add(layers.Embedding(vocab_size, embedding_dim,
weights=[embedding_matrix],
input_length=maxlen,
trainable=True))
model.add(layers.GlobalMaxPool1D())
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
history = model.fit(X_train, y_train,
epochs=20,
verbose=False,
validation_data=(X_test, y_test),
batch_size=10)
loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print("Testing Accuracy: {:.4f}".format(accuracy)) | [
"sklearn.preprocessing.LabelEncoder",
"keras.preprocessing.text.Tokenizer",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder",
"keras.models.Sequential",
"keras.layers.Dense",
"numpy.zeros",
"numpy.array",
"keras.layers.GlobalMaxPool1D",
"keras.... | [((917, 935), 'pandas.concat', 'pd.concat', (['df_list'], {}), '(df_list)\n', (926, 935), True, 'import pandas as pd\n'), ((1113, 1178), 'sklearn.model_selection.train_test_split', 'train_test_split', (['sentences', 'y'], {'test_size': '(0.25)', 'random_state': '(1000)'}), '(sentences, y, test_size=0.25, random_state=1000)\n', (1129, 1178), False, 'from sklearn.model_selection import train_test_split\n'), ((1257, 1271), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1269, 1271), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1353, 1380), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (1366, 1380), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((1486, 1511), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': '(5000)'}), '(num_words=5000)\n', (1495, 1511), False, 'from keras.preprocessing.text import Tokenizer\n'), ((1817, 1870), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X_train'], {'padding': '"""post"""', 'maxlen': 'maxlen'}), "(X_train, padding='post', maxlen=maxlen)\n", (1830, 1870), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((1880, 1932), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X_test'], {'padding': '"""post"""', 'maxlen': 'maxlen'}), "(X_test, padding='post', maxlen=maxlen)\n", (1893, 1932), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((2656, 2668), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2666, 2668), False, 'from keras.models import Sequential\n'), ((752, 812), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'names': "['sentence', 'label']", 'sep': '"""\t"""'}), "(filepath, names=['sentence', 'label'], sep='\\t')\n", (763, 812), True, 'import pandas as pd\n'), ((2149, 2186), 'numpy.zeros', 'np.zeros', (['(vocab_size, embedding_dim)'], {}), '((vocab_size, embedding_dim))\n', (2157, 2186), True, 'import numpy as np\n'), ((2679, 2791), 'keras.layers.Embedding', 'layers.Embedding', (['vocab_size', 'embedding_dim'], {'weights': '[embedding_matrix]', 'input_length': 'maxlen', 'trainable': '(True)'}), '(vocab_size, embedding_dim, weights=[embedding_matrix],\n input_length=maxlen, trainable=True)\n', (2695, 2791), False, 'from keras import layers\n'), ((2880, 2904), 'keras.layers.GlobalMaxPool1D', 'layers.GlobalMaxPool1D', ([], {}), '()\n', (2902, 2904), False, 'from keras import layers\n'), ((2916, 2951), 'keras.layers.Dense', 'layers.Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (2928, 2951), False, 'from keras import layers\n'), ((2963, 3000), 'keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (2975, 3000), False, 'from keras import layers\n'), ((2417, 2451), 'numpy.array', 'np.array', (['vector'], {'dtype': 'np.float32'}), '(vector, dtype=np.float32)\n', (2425, 2451), True, 'import numpy as np\n')] |
import time
import torch.nn.parallel
import torch.optim
from ops.models import TSN
from ops.transforms import *
from tools.metric import ConfusionMatrix
from opts import parser
args = parser.parse_args()
if args.dataset == 'drive':
from ops.drive_dataset_with_keypoint import Drive as dataset
elif args.dataset == 'pcl': # for pcldriver
from ops.pcldriver_dataset_with_keypoint import BusDeriverDataset3D as dataset
from ops.pcldriver_dataset_with_keypoint import is_high_quality
filters = [
# only train with quality==0, the frames with other quality will disturb the training
is_high_quality,
]
else:
raise Exception('dataset not support')
print('Dataset: {}'.format(args.dataset))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
import logging
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
SEED = 777
random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
np.random.seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def load_model(checkpoint_path):
model = TSN(args.num_class, args.num_segments, args.modality,
base_model=args.arch,
consensus_type=args.crop_fusion_type,
img_feature_dim=args.img_feature_dim,
pretrain=args.pretrain,
is_shift=True, shift_div=8, shift_place='blockres',
first = args.first,
second = args.second,
gcn_stride=args.gcn_stride,
concat_layer=args.concat_layer,
xyc = args.xyc,
bn = args.bn,arch_cnn=args.arch_cnn,patch_size=args.patch_size
)
pretrained_dict = torch.load(checkpoint_path)
state_dict = pretrained_dict['state_dict']
epoch = pretrained_dict['epoch']
model.cuda()
model = nn.DataParallel(model)
model.load_state_dict(state_dict,strict=False)
return model, epoch
input_size = 224
if args.test_crops == 1:
cropping = torchvision.transforms.Compose([
GroupScale(256),
GroupCenterCrop(input_size),
])
this_arch = 'resnet'
input_mean = [0.485, 0.456, 0.406]
input_std = [0.229, 0.224, 0.225]
test_transforms = torchvision.transforms.Compose([
cropping,
Stack(roll=(this_arch in ['BNInception', 'InceptionV3'])),
ToTorchFormatTensor(div=(this_arch not in ['BNInception', 'InceptionV3'])),
GroupNormalize(input_mean, input_std),
])
def get_logger(args, mode='test'):
logger = logging.getLogger(__name__)
logger.propagate = False
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
handler.setLevel(0)
logger.addHandler(handler)
date = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
if not os.path.isdir(args.root_log):
os.makedirs(args.root_log)
logfile = os.path.join(args.root_log, 'val_test.log')
file_handler = logging.FileHandler(logfile, mode='w')
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def main():
global logger
logger = get_logger(args, 'test')
if args.gcn_stride == 2:
gcn_segments = 64
else: gcn_segments = args.num_segments
logger.info('runtime args\n{}\n\n'.format(args))
logger.info('train set: {},val set: {}'.format(args.view, args.view))
if args.dataset == 'drive':
val_dataset = dataset(args.root, args.val_split, view=args.view, mode='eval', patch_size=args.patch_size,
num_segments=args.num_segments, gcn_segments=gcn_segments, transforms=test_transforms)
test_dataset = dataset(args.root, args.test_split, view=args.view, mode='test', patch_size=args.patch_size,
num_segments=args.num_segments, gcn_segments=gcn_segments, transforms=test_transforms)
elif args.dataset == 'pcl':
val_dataset = dataset(
root=args.root,
anno_path=args.pcl_anno,
train=False,
filters=filters,
transforms=test_transforms,
n_frames=args.num_segments, gcn_segments=gcn_segments,
interval=0
)
test_dataset = dataset(
root=args.root,
anno_path=args.pcl_anno,
train=False,
filters=filters,
transforms=test_transforms,
n_frames=args.num_segments, gcn_segments=gcn_segments,
interval=0
)
else:
raise Exception
val_dataloader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True
)
test_dataloader = torch.utils.data.DataLoader(
test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True
)
model, epoch = load_model(args.model_path)
val(model, epoch, val_dataloader)
test(model, epoch, test_dataloader)
@torch.no_grad()
def val(model, epoch,val_dataloader):
CM = ConfusionMatrix(args.num_class)
top1 = AverageMeter()
top5 = AverageMeter()
rgb_losses = AverageMeter()
ske_losses = AverageMeter()
tot_loss = AverageMeter()
model.eval()
logger.info("The best model epoch is :{}".format(epoch))
for i, (input,target,ske_joint,index) in enumerate(val_dataloader):
batch_size = input.size(0)
input = input.cuda()
target = target.cuda()
index = index.cuda()
per_frame_logits = model(input, ske_joint,index)
if type(per_frame_logits) is tuple:
rgb_loss = F.cross_entropy(per_frame_logits[0], target)
ske_loss = F.cross_entropy(per_frame_logits[1], target)
rgb_losses.update(rgb_loss.item(), input.size(0))
ske_losses.update(ske_loss.item(), input.size(0))
loss = rgb_loss + ske_loss
# measure accuracy and record loss
prec1, prec5 = accuracy(per_frame_logits[0].data, target, topk=(1, 5))
per_frame_logits = per_frame_logits[0]
else:
prec1, prec5 = accuracy(per_frame_logits.data, target, topk=(1, 5))
loss = F.cross_entropy(per_frame_logits, target)
CM.update(target, per_frame_logits)
tot_loss.update(loss.item(), per_frame_logits.size(0))
top1.update(prec1.item(), batch_size)
top5.update(prec5.item(), batch_size)
if i % args.print_freq == 0 or i == len(val_dataloader)-1:
output = ('Val: [{0}/{1}]\t'
'Ske_Loss {ske_loss.val:.4f} ({ske_loss.avg:.4f})\t'
'RGB_Loss {rgb_loss.val:.4f} ({rgb_loss.avg:.4f})\t'
'Loss {tot_loss.val:.4f} ({tot_loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_dataloader)-1, tot_loss=tot_loss,ske_loss=ske_losses, rgb_loss=rgb_losses,
top1=top1, top5=top5))
logger.info(output)
prec = 0.0
for p in CM.precision():
prec += p
prec_avg = prec / args.num_class
recall = 0.0
for p in CM.recall():
recall += p
recall_avg = recall / args.num_class
logger.info("Val Stage: class-wise precision: {}\nclass-wise recall: {}".format(CM.precision(), CM.recall()))
logger.info(
"Val View: {}, top1 acc: {:.2f}, top5 acc: {:.2f}, precision: {:.2f}, recall: {:.2f}".format(args.view.split('/')[-1], top1.avg,
top5.avg,prec_avg*100, recall_avg*100))
@torch.no_grad()
def test(model, epoch, test_dataloader):
top1 = AverageMeter()
top5 = AverageMeter()
rgb_losses = AverageMeter()
ske_losses = AverageMeter()
tot_loss = AverageMeter()
model.eval()
logger.info("The best model epoch is :{}".format(epoch))
CM = ConfusionMatrix(args.num_class)
for i, (input, target,ske_joint,index) in enumerate(test_dataloader):
batch_size = input.size(0)
input = input.cuda()
target = target.cuda()
index = index.cuda()
per_frame_logits = model(input, ske_joint,index)
if type(per_frame_logits) is tuple:
rgb_loss = F.cross_entropy(per_frame_logits[0], target)
ske_loss = F.cross_entropy(per_frame_logits[1], target)
rgb_losses.update(rgb_loss.item(), input.size(0))
ske_losses.update(ske_loss.item(), input.size(0))
loss = rgb_loss + ske_loss
# measure accuracy and record loss
prec1, prec5 = accuracy(per_frame_logits[0].data, target, topk=(1, 5))
per_frame_logits = per_frame_logits[0]
else:
prec1, prec5 = accuracy(per_frame_logits.data, target, topk=(1, 5))
loss = F.cross_entropy(per_frame_logits, target)
CM.update(target, per_frame_logits)
tot_loss.update(loss.item(), per_frame_logits.size(0))
top1.update(prec1.item(), batch_size)
top5.update(prec5.item(), batch_size)
if i % args.print_freq == 0 or i == len(test_dataloader)-1:
output = ('Test: [{0}/{1}]\t'
'Ske_Loss {ske_loss.val:.4f} ({ske_loss.avg:.4f})\t'
'RGB_Loss {rgb_loss.val:.4f} ({rgb_loss.avg:.4f})\t'
'Loss {tot_loss.val:.4f} ({tot_loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(test_dataloader)-1, tot_loss=tot_loss,ske_loss=ske_losses, rgb_loss=rgb_losses,
top1=top1, top5=top5))
logger.info(output)
prec = 0.0
for p in CM.precision():
prec += p
prec_avg = prec / args.num_class
recall = 0.0
for p in CM.recall():
recall += p
recall_avg = recall / args.num_class
logger.info("Test Stage: class-wise precision: {}\nclass-wise recall: {}".format(CM.precision(), CM.recall()))
logger.info(
"Test View: {}, top1 acc: {:.2f}, top5 acc: {:.2f}, precision(34): {:.2f}, recall(34): {:.2f}".format(args.view.split('/')[-1], top1.avg,top5.avg,
prec_avg*100, recall_avg*100))
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"ops.pcldriver_dataset_with_keypoint.BusDeriverDataset3D",
"logging.StreamHandler",
"ops.models.TSN",
"os.path.isdir",
"logging.FileHandler",
"numpy.random.seed",
"tools.metric.ConfusionMatrix",
"time.time",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"os.makedirs",... | [((186, 205), 'opts.parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (203, 205), False, 'from opts import parser\n'), ((1798, 1815), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (1809, 1815), False, 'import random\n'), ((1816, 1839), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (1833, 1839), False, 'import torch\n'), ((1840, 1868), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['SEED'], {}), '(SEED)\n', (1862, 1868), False, 'import torch\n'), ((1869, 1901), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['SEED'], {}), '(SEED)\n', (1895, 1901), False, 'import torch\n'), ((1902, 1922), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (1916, 1922), True, 'import numpy as np\n'), ((6134, 6149), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6147, 6149), False, 'import torch\n'), ((8822, 8837), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8835, 8837), False, 'import torch\n'), ((2051, 2476), 'ops.models.TSN', 'TSN', (['args.num_class', 'args.num_segments', 'args.modality'], {'base_model': 'args.arch', 'consensus_type': 'args.crop_fusion_type', 'img_feature_dim': 'args.img_feature_dim', 'pretrain': 'args.pretrain', 'is_shift': '(True)', 'shift_div': '(8)', 'shift_place': '"""blockres"""', 'first': 'args.first', 'second': 'args.second', 'gcn_stride': 'args.gcn_stride', 'concat_layer': 'args.concat_layer', 'xyc': 'args.xyc', 'bn': 'args.bn', 'arch_cnn': 'args.arch_cnn', 'patch_size': 'args.patch_size'}), "(args.num_class, args.num_segments, args.modality, base_model=args.arch,\n consensus_type=args.crop_fusion_type, img_feature_dim=args.\n img_feature_dim, pretrain=args.pretrain, is_shift=True, shift_div=8,\n shift_place='blockres', first=args.first, second=args.second,\n gcn_stride=args.gcn_stride, concat_layer=args.concat_layer, xyc=args.\n xyc, bn=args.bn, arch_cnn=args.arch_cnn, patch_size=args.patch_size)\n", (2054, 2476), False, 'from ops.models import TSN\n'), ((2676, 2703), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (2686, 2703), False, 'import torch\n'), ((2817, 2839), 'torch.nn.DataParallel', 'nn.DataParallel', (['model'], {}), '(model)\n', (2832, 2839), True, 'import torch.nn as nn\n'), ((3470, 3497), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3487, 3497), False, 'import logging\n'), ((3575, 3598), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (3596, 3598), False, 'import logging\n'), ((3615, 3673), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)s %(message)s"""'], {}), "('%(asctime)s %(levelname)s %(message)s')\n", (3632, 3673), False, 'import logging\n'), ((3924, 3967), 'os.path.join', 'os.path.join', (['args.root_log', '"""val_test.log"""'], {}), "(args.root_log, 'val_test.log')\n", (3936, 3967), False, 'import os\n'), ((3987, 4025), 'logging.FileHandler', 'logging.FileHandler', (['logfile'], {'mode': '"""w"""'}), "(logfile, mode='w')\n", (4006, 4025), False, 'import logging\n'), ((4082, 4140), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)s %(message)s"""'], {}), "('%(asctime)s %(levelname)s %(message)s')\n", (4099, 4140), False, 'import logging\n'), ((5701, 5831), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.workers', 'pin_memory': '(True)'}), '(val_dataset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.workers, pin_memory=True)\n', (5728, 5831), False, 'import torch\n'), ((5864, 5995), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.workers', 'pin_memory': '(True)'}), '(test_dataset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.workers, pin_memory=True)\n', (5891, 5995), False, 'import torch\n'), ((6197, 6228), 'tools.metric.ConfusionMatrix', 'ConfusionMatrix', (['args.num_class'], {}), '(args.num_class)\n', (6212, 6228), False, 'from tools.metric import ConfusionMatrix\n'), ((9112, 9143), 'tools.metric.ConfusionMatrix', 'ConfusionMatrix', (['args.num_class'], {}), '(args.num_class)\n', (9127, 9143), False, 'from tools.metric import ConfusionMatrix\n'), ((3845, 3873), 'os.path.isdir', 'os.path.isdir', (['args.root_log'], {}), '(args.root_log)\n', (3858, 3873), False, 'import os\n'), ((3883, 3909), 'os.makedirs', 'os.makedirs', (['args.root_log'], {}), '(args.root_log)\n', (3894, 3909), False, 'import os\n'), ((4589, 4777), 'ops.pcldriver_dataset_with_keypoint.BusDeriverDataset3D', 'dataset', (['args.root', 'args.val_split'], {'view': 'args.view', 'mode': '"""eval"""', 'patch_size': 'args.patch_size', 'num_segments': 'args.num_segments', 'gcn_segments': 'gcn_segments', 'transforms': 'test_transforms'}), "(args.root, args.val_split, view=args.view, mode='eval', patch_size=\n args.patch_size, num_segments=args.num_segments, gcn_segments=\n gcn_segments, transforms=test_transforms)\n", (4596, 4777), True, 'from ops.pcldriver_dataset_with_keypoint import BusDeriverDataset3D as dataset\n'), ((4821, 5010), 'ops.pcldriver_dataset_with_keypoint.BusDeriverDataset3D', 'dataset', (['args.root', 'args.test_split'], {'view': 'args.view', 'mode': '"""test"""', 'patch_size': 'args.patch_size', 'num_segments': 'args.num_segments', 'gcn_segments': 'gcn_segments', 'transforms': 'test_transforms'}), "(args.root, args.test_split, view=args.view, mode='test', patch_size\n =args.patch_size, num_segments=args.num_segments, gcn_segments=\n gcn_segments, transforms=test_transforms)\n", (4828, 5010), True, 'from ops.pcldriver_dataset_with_keypoint import BusDeriverDataset3D as dataset\n'), ((3820, 3831), 'time.time', 'time.time', ([], {}), '()\n', (3829, 3831), False, 'import time\n'), ((5086, 5268), 'ops.pcldriver_dataset_with_keypoint.BusDeriverDataset3D', 'dataset', ([], {'root': 'args.root', 'anno_path': 'args.pcl_anno', 'train': '(False)', 'filters': 'filters', 'transforms': 'test_transforms', 'n_frames': 'args.num_segments', 'gcn_segments': 'gcn_segments', 'interval': '(0)'}), '(root=args.root, anno_path=args.pcl_anno, train=False, filters=\n filters, transforms=test_transforms, n_frames=args.num_segments,\n gcn_segments=gcn_segments, interval=0)\n', (5093, 5268), True, 'from ops.pcldriver_dataset_with_keypoint import BusDeriverDataset3D as dataset\n'), ((5377, 5559), 'ops.pcldriver_dataset_with_keypoint.BusDeriverDataset3D', 'dataset', ([], {'root': 'args.root', 'anno_path': 'args.pcl_anno', 'train': '(False)', 'filters': 'filters', 'transforms': 'test_transforms', 'n_frames': 'args.num_segments', 'gcn_segments': 'gcn_segments', 'interval': '(0)'}), '(root=args.root, anno_path=args.pcl_anno, train=False, filters=\n filters, transforms=test_transforms, n_frames=args.num_segments,\n gcn_segments=gcn_segments, interval=0)\n', (5384, 5559), True, 'from ops.pcldriver_dataset_with_keypoint import BusDeriverDataset3D as dataset\n'), ((6774, 6818), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['per_frame_logits[0]', 'target'], {}), '(per_frame_logits[0], target)\n', (6789, 6818), True, 'import torch.nn.functional as F\n'), ((6842, 6886), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['per_frame_logits[1]', 'target'], {}), '(per_frame_logits[1], target)\n', (6857, 6886), True, 'import torch.nn.functional as F\n'), ((7344, 7385), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['per_frame_logits', 'target'], {}), '(per_frame_logits, target)\n', (7359, 7385), True, 'import torch.nn.functional as F\n'), ((9466, 9510), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['per_frame_logits[0]', 'target'], {}), '(per_frame_logits[0], target)\n', (9481, 9510), True, 'import torch.nn.functional as F\n'), ((9534, 9578), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['per_frame_logits[1]', 'target'], {}), '(per_frame_logits[1], target)\n', (9549, 9578), True, 'import torch.nn.functional as F\n'), ((10036, 10077), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['per_frame_logits', 'target'], {}), '(per_frame_logits, target)\n', (10051, 10077), True, 'import torch.nn.functional as F\n')] |
import tensorflow as tf
import gym
import numpy as np
import shutil
import os
# reproducible results
np.random.seed(1)
tf.set_random_seed(1)
# Load Environment
ENV_NAME = 'BipedalWalker-v2'
env = gym.make(ENV_NAME)
# Reproducible environment parameters
env.seed(1)
STATE_DIMENSION = env.observation_space.shape[0]
ACTION_DIMENSION = env.action_space.shape[0]
ACTION_BOUND = env.action_space.high
######################################## Hyperparameters ########################################
# number of episodes to be trained
TRAIN_EPI_NUM=500
# Learning rate for actor and critic
ACTOR_LR=0.05
CRITIC_LR=0.05
R_DISCOUNT=0.9 # reward discount
MEMORY_CAPACITY=1000000
ACTOR_REP_ITE=1700 # after such many iterations, update ACTOR
CRITIC_REP_ITE=1500
BATCH=40 # size of batch used to learn
# Path used to store training result (parameters)
TRAIN_DATA_PATH='./train'
GLOBAL_STEP = tf.Variable(0, trainable=False) # record how many steps we have gone through
INCREASE_GLOBAL_STEP = GLOBAL_STEP.assign(tf.add(GLOBAL_STEP, 1))
# set automatically decaying learning rate to ensure convergence
ACTOR_LR = tf.train.exponential_decay(LR_A, GLOBAL_STEP, 10000, .95, staircase=True)
CRITIC_LR = tf.train.exponential_decay(LR_C, GLOBAL_STEP, 10000, .90, staircase=True)
END_POINT = (200 - 10) * (14/30) # The end point of the game
##################################################
LOAD_MODEL = True # Whether to load trained model#
##################################################
with tf.Session() as sess:
# Create actor and critic.
actor = Actor(sess, ACTION_DIMENSION, ACTION_BOUND, ACTOR_LR, REPLACE_ITER_A)
critic = Critic(sess, STATE_DIMENSION, ACTION_DIMENSION, CRITIC_LR, R_DISCOUNT, REPLACE_ITER_C, actor.a, actor.a_)
actor.add_grad_to_graph(critic.a_grads)
# Memory class implementation from: https://github.com/jaara/AI-blog/blob/master/Seaquest-DDQN-PER.py
memory = Memory(MEMORY_CAPACITY)
# saver is used to store or restore trained parameters
saver = tf.train.Saver(max_to_keep=100) # Maximum number of recent checkpoints to keep. Defaults to 5.
################################# Determine whether it's a new training or going-on training ###############3
if LOAD_MODEL: # Returns CheckpointState proto from the "checkpoint" file.
checkpoints = tf.train.get_checkpoint_state(TRAIN_DATA_PATH, 'checkpoint').all_model_checkpoint_paths
saver.restore(sess, checkpoints[-1]) # reload trained parameters into the tf session
else:
if os.path.isdir(TRAIN_DATA_PATH):
shutil.rmtree(TRAIN_DATA_PATH) # recursively remove all files under directory
os.mkdir(TRAIN_DATA_PATH)
sess.run(tf.global_variables_initializer())
explore_degree=0.1
explore_degree_minimum=0.0001
explore_decay_factor=0.99
################################# Main loop for training #################################
for i_episode in range(MAX_EPISODES):
state = env.reset()
episode_reward = 0 # the episode reward
while True:
action = actor.act(s)
action = np.clip(np.random.normal(action, explore_degree), -ACTION_BOUND, ACTION_BOUND) # explore using randomness
next_state, reward, done, _ = env.step(a)
trainsition = np.hstack((s, a, [r], s_))
probability = np.max(memory.tree.tree[-memory.tree.capacity:])
memory.store(probability, transition) # stored for later learning
# when r=-100, that means BipedalWalker has falled to the groud
episode_reward += reward
# when the training reaches stable stage, we lessen the probability of exploration
if GLOBAL_STEP.eval(sess) > MEMORY_CAPACITY/20:
explore_degree = max([explore_decay_factor*explore_degree, explore_degree_minimum]) # decay the action randomness
tree_index, b_memory, weights = memory.prio_sample(BATCH) # for critic update
b_state = b_memory[:, :STATE_DIMENSION]
b_action = b_memory[:, STATE_DIMENSION: STATE_DIMENSION + ACTION_DIMENSION]
b_reward = b_memory[:, -STATE_DIMENSION - 1: -STATE_DIMENSION]
b_next_state = b_memory[:, -STATE_DIMENSION:]
td = critic.learn(b_state, b_action, b_reward, b_next_state, weights)
actor.learn(b_state)
for i in range(len(tree_index)): # update priority
index = tree_idx[i]
memory.update(index, td[i])
# if GLOBAL_STEP.eval(sess) % SAVE_MODEL_ITER == 0:
# ckpt_path = os.path.join(TRAIN_DATA_PATH, 'DDPG.ckpt')
# save_path = saver.save(sess, ckpt_path, global_step=GLOBAL_STEP, write_meta_graph=False)
# print("\nSave Model %s\n" % save_path)
if done:
if "running_reward" not in globals():
running_reward = episode_reward
else:
running_reward = 0.95*running_r + 0.05*ep_r
print('running reward: ',running_reward,', episode reward: ',episode_reward)
break # start new episode
state = nextState
sess.run(INCREASE_GLOBAL_STEP)
| [
"numpy.random.normal",
"tensorflow.Variable",
"numpy.hstack",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.add",
"shutil.rmtree",
"tensorflow.train.get_checkpoint_state",
"tensorflow.global_variables_initializer",
"numpy.max",
"os.path.isdir",
"numpy.random.seed",
"os.mkdir",
... | [((102, 119), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (116, 119), True, 'import numpy as np\n'), ((120, 141), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (138, 141), True, 'import tensorflow as tf\n'), ((198, 216), 'gym.make', 'gym.make', (['ENV_NAME'], {}), '(ENV_NAME)\n', (206, 216), False, 'import gym\n'), ((898, 929), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (909, 929), True, 'import tensorflow as tf\n'), ((1119, 1193), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['LR_A', 'GLOBAL_STEP', '(10000)', '(0.95)'], {'staircase': '(True)'}), '(LR_A, GLOBAL_STEP, 10000, 0.95, staircase=True)\n', (1145, 1193), True, 'import tensorflow as tf\n'), ((1205, 1278), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['LR_C', 'GLOBAL_STEP', '(10000)', '(0.9)'], {'staircase': '(True)'}), '(LR_C, GLOBAL_STEP, 10000, 0.9, staircase=True)\n', (1231, 1278), True, 'import tensorflow as tf\n'), ((1017, 1039), 'tensorflow.add', 'tf.add', (['GLOBAL_STEP', '(1)'], {}), '(GLOBAL_STEP, 1)\n', (1023, 1039), True, 'import tensorflow as tf\n'), ((1507, 1519), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1517, 1519), True, 'import tensorflow as tf\n'), ((2023, 2054), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(100)'}), '(max_to_keep=100)\n', (2037, 2054), True, 'import tensorflow as tf\n'), ((2538, 2568), 'os.path.isdir', 'os.path.isdir', (['TRAIN_DATA_PATH'], {}), '(TRAIN_DATA_PATH)\n', (2551, 2568), False, 'import os\n'), ((2667, 2692), 'os.mkdir', 'os.mkdir', (['TRAIN_DATA_PATH'], {}), '(TRAIN_DATA_PATH)\n', (2675, 2692), False, 'import os\n'), ((2336, 2396), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['TRAIN_DATA_PATH', '"""checkpoint"""'], {}), "(TRAIN_DATA_PATH, 'checkpoint')\n", (2365, 2396), True, 'import tensorflow as tf\n'), ((2581, 2611), 'shutil.rmtree', 'shutil.rmtree', (['TRAIN_DATA_PATH'], {}), '(TRAIN_DATA_PATH)\n', (2594, 2611), False, 'import shutil\n'), ((2711, 2744), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2742, 2744), True, 'import tensorflow as tf\n'), ((3334, 3360), 'numpy.hstack', 'np.hstack', (['(s, a, [r], s_)'], {}), '((s, a, [r], s_))\n', (3343, 3360), True, 'import numpy as np\n'), ((3387, 3435), 'numpy.max', 'np.max', (['memory.tree.tree[-memory.tree.capacity:]'], {}), '(memory.tree.tree[-memory.tree.capacity:])\n', (3393, 3435), True, 'import numpy as np\n'), ((3152, 3192), 'numpy.random.normal', 'np.random.normal', (['action', 'explore_degree'], {}), '(action, explore_degree)\n', (3168, 3192), True, 'import numpy as np\n')] |
#! /usr/bin/python3.7
# -- coding: utf-8 -- **
### Here are a set of functions used in elec_pipe
### and a set of qthread class for elec_main_gui
import sys
import os
import re
import math
import numpy as np
from numpy import ndarray
import nibabel as nib
from scipy import ndimage
from sklearn.mixture import GaussianMixture as GMM
from sklearn.linear_model import LinearRegression, Lasso
from PyQt5.QtCore import QThread, pyqtSignal
# import matplotlib
# matplotlib.use("Qt5Agg")
# from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
# from matplotlib.figure import Figure
# from matplotlib import pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D, art3d
# import electrode
CMD_Hough3D = './hough-3d-lines/hough3dlines'
def run(cmd):
"""
Print the command.
Execute a command string on the shell (on bash).
Parameters
----------
cmd : str
Command to be sent to the shell.
"""
print(f"Running shell command: {cmd}")
os.system(cmd)
print(f"Done!\n")
def align(inp, ref, xfm=None, out=None, dof=12, searchrad=True, bins=256, interp=None, cost="mutualinfo", sch=None, wmseg=None, init=None, finesearch=None,):
"""Aligns two images using FSLs flirt function and stores the transform between them
Parameters
----------
inp : str
path to input image being altered to align with the reference image as a nifti image file
ref : str
path to reference image being aligned to as a nifti image file
xfm : str, optional
where to save the 4x4 affine matrix containing the transform between two images, by default None
out : str, optional
determines whether the image will be automatically aligned and where the resulting image will be saved, by default None
dof : int, optional
the number of degrees of free dome of the alignment, by default 12
searchrad : bool, optional
whether to use the predefined searchradius parameter (180 degree sweep in x, y, and z), by default True
bins : int, optional
number of histogram bins, by default 256
interp : str, optional
interpolation method to be used (trilinear,nearestneighbour,sinc,spline), by default None
cost : str, optional
cost function to be used in alignment (mutualinfo, corratio, normcorr, normmi, leastsq, labeldiff, or bbr), by default "mutualinfo"
sch : str, optional
the optional FLIRT schedule, by default None
wmseg : str, optional
an optional white-matter segmentation for bbr, by default None
init : str, optional
an initial guess of an alignment in the form of the path to a matrix file, by default None
finesearch : int, optional
angle in degrees, by default None
"""
cmd = f"flirt -in {inp} -ref {ref}"
if xfm is not None:
cmd += f" -omat {xfm}"
if out is not None:
cmd += f" -out {out}"
if dof is not None:
cmd += f" -dof {dof}"
if bins is not None:
cmd += f" -bins {bins}"
if interp is not None:
cmd += f" -interp {interp}"
if cost is not None:
cmd += f" -cost {cost}"
if searchrad is not None:
cmd += " -searchrx -180 180 -searchry -180 180 " + "-searchrz -180 180"
if sch is not None:
cmd += f" -schedule {sch}"
if wmseg is not None:
cmd += f" -wmseg {wmseg}"
if init is not None:
cmd += f" -init {init}"
run(cmd)
def align_nonlinear(inp, ref, xfm, out, warp, ref_mask=None, in_mask=None, config=None):
"""Aligns two images using nonlinear methods and stores the transform between them using fnirt
Parameters
----------
inp : str
path to the input image
ref : str
path to the reference image that the input will be aligned to
xfm : str
path to the file containing the affine transform matrix created by align()
out : str
path for the desired output image
warp : str
the path to store the output file containing the nonlinear warp coefficients/fields
ref_mask : str, optional
path to the reference image brain_mask, by default None
in_mask : str, optional
path for the file with mask in input image space, by default None
config : str, optional
path to the config file specifying command line arguments, by default None
"""
cmd = f"fnirt --in={inp} --ref={ref} --aff={xfm} --iout={out} --cout={warp} --warpres=8,8,8"
if ref_mask is not None:
cmd += f" --refmask={ref_mask} --applyrefmask=1"
if in_mask is not None:
cmd += f" --inmask={in_mask} --applyinmask=1"
if config is not None:
cmd += f" --config={config}"
run(cmd)
def dataExtraction(intraFile, thre=0.2):
rawData = nib.load(intraFile).get_fdata()
maxVal = np.amax(rawData)
# print(f"maxVal={maxVal}")
thre = maxVal * thre
threData = np.copy(rawData)
threData[threData < thre] = 0
xs, ys, zs = np.where(threData != 0)
return xs, ys, zs
def trackRecognition(patient, cmd_hough3d, CTresult_dir, intraFile, thre=0.2):
xs, ys, zs = dataExtraction(intraFile, thre)
X = np.transpose(np.array((xs, ys, zs)))
# print(X.shape)
fname = f"{CTresult_dir}/{patient}_3dPointClouds.dat"
np.savetxt(fname, X, fmt='%.4f', delimiter=',', newline='\n', header='point clouds', footer='', comments='# ', encoding=None)
cmd_hough = f"{cmd_hough3d} -o {CTresult_dir}/{patient}.txt -minvotes 5 {fname}"
run(cmd=cmd_hough)
return xs, ys, zs
def locateLine(row, info):
ax = info[row][1]
ay = info[row][2]
az = info[row][3]
bx = info[row][4]
by = info[row][5]
bz = info[row][6]
axx = np.linspace(ax, ax+bx*50, 50)
ayy = np.linspace(ay, ay+by*50, 50)
azz = np.linspace(az, az+bz*50, 50)
return axx, ayy, azz
class Preprocess_thread(QThread):
finished = pyqtSignal()
def __init__(self):
super(Preprocess_thread, self).__init__()
def run(self): # erode, skull, intra_save
mask_file = os.path.join(f"{self.directory_surf}/mri", f"mask.mgz")
img_mask = nib.load(mask_file)
data_mask = img_mask.get_fdata()
data_mask_ero = ndimage.morphology.binary_erosion(data_mask, iterations=self.ero_itr)
CTreg_file = os.path.join(self.directory_ct, f"{self.patient}CT_Reg.nii.gz")
img_ct = nib.load(CTreg_file)
data_ct = img_ct.get_fdata()
maxVal = np.amax(data_ct)
self.thre = self.thre / 100
thre = maxVal * self.thre
data_ct[data_mask_ero == 0] = 0
img1 = nib.Nifti1Image(data_ct, img_ct.affine)
intra_file1 = os.path.join(self.directory_ct, f"{self.patient}CT_intra.nii.gz")
nib.save(img1, intra_file1)
data_ct[data_ct < thre] = 0
img0 = nib.Nifti1Image(data_ct, img_ct.affine)
intra_file = os.path.join(self.directory_ct, f"{self.patient}CT_intracranial_{self.thre}_{self.K}_{self.ero_itr}.nii.gz")
nib.save(img0, intra_file)
self.finished.emit()
class PreprocessResult_thread(QThread):
send_axes = pyqtSignal(ndarray)
def __init__(self):
super(PreprocessResult_thread, self).__init__()
def run(self):
intra_file = self.CTintra_file
xs, ys, zs = dataExtraction(intraFile=intra_file, thre=self.thre)
pointsArray = np.transpose(np.vstack((xs, ys, zs)))
self.send_axes.emit(pointsArray)
class GenerateLabel_thread(QThread):
finished = pyqtSignal(int)
def __init__(self):
super(GenerateLabel_thread, self).__init__()
def run(self):
# process 3d line hough transform
hough_file = f"{self.directory_ct}/{self.patient}.txt"
if not os.path.exists(hough_file):
xs, ys, zs = trackRecognition(patient=self.patient, cmd_hough3d=CMD_Hough3D, CTresult_dir=self.directory_ct, intraFile=self.intra_file, thre=0)
else: # temporarily
# xs, ys, zs = utils.trackRecognition(patient=patient, cmd_hough3d=CMD_Hough3D, CTresult_dir=CTresult_dir, intraFile=intra_file, thre=Thre)
xs, ys, zs = dataExtraction(intraFile=self.intra_file, thre=0)
pass
# read detected lines' info
elec_track = []
with open(hough_file, 'r') as f:
for line in f.readlines():
a = re.findall(r"\d+\.?\d*", line)
for i in range(len(a)):
a[i] = float(a[i])
elec_track.append(a)
# print(f"{len(elec_track)} tracks has been detected!\n")
# print(elec_track)
elec_track = np.array(elec_track)
K_check = elec_track.shape[0]
if K_check < self.K:
self.finished.emit(1)
else: # if K_check != K:
print(f"Warning: {self.K} electrodes implanted, but {K_check} has been clustered by Hough!")
# sys.exit()
# process a gaussian mixture model for bug fixing
centroids = np.array(elec_track[0:self.K, 1:4])
# print(centroids)
X = np.transpose(np.vstack((xs, ys, zs)))
gmm = GMM(n_components=self.K, covariance_type='full',means_init=centroids, random_state=None).fit(X)
labels = gmm.predict(X)
# print(labels)
Labels = np.zeros((256, 256, 256)) # labeled space
for i in range(self.K):
ind = np.where(labels == i)
Labels[xs[ind], ys[ind], zs[ind]] = i + 1
np.save(os.path.join(self.directory_ct, f"{self.patient}_labels.npy"), Labels, allow_pickle=True, fix_imports=True)
self.finished.emit(0)
# class LabelResult_thread(QThread):
# def __init__(self):
# super(LabelResult_thread, self).__init__()
# def run(self):
# print('Yaah!')
class ContactSegment_thread(QThread):
finished = pyqtSignal()
def __init__(self):
super(ContactSegment_thread, self).__init__()
def run(self):
print('Yaah!')
for i in range(self.K):
iLabel = i + 1
# xxx = electrode.ElectrodeSeg(filePath=self.directory_labels, patName=self.patName, iLabel=iLabel, numMax=self.numMax, diameterSize=self.diameterSize, spacing=self.spacing, gap=self.gap)
xxx = ElectrodeSeg(filePath=self.directory_labels, patName=self.patName, iLabel=iLabel, numMax=self.numMax, diameterSize=self.diameterSize, spacing=self.spacing, gap=self.gap)
xxx.pipeline()
print(xxx.elecPos)
self.finished.emit()
def savenpy(filePath, patientName):
dir = f"{filePath}/{patientName}_result"
# dir1 = f"{filePath}/{patientName}_data"
elec_dict = {}
for root, dirs, files in os.walk(dir, topdown=True):
# print('files:', files)
if '.DS_Store' in files:
files.remove('.DS_Store')
if 'chnXyzDict.npy' in files:
files.remove('chnXyzDict.npy')
for file in files:
elec_name = file.split('.')[0]
elec_info = np.loadtxt(os.path.join(root, file))
elec_info = elec_info # [1:, :] # [:,np.array([2,1,0])]
elec_dict[elec_name] = elec_info
np.save(f"{filePath}/chnXyzDict.npy", elec_dict)
def lookupTable(subdir, patient, ctdir, elec_label):
annot_dir = f"{subdir}/subjects/{patient}/mri/aparc.a2009s+aseg.mgz"
lookup_table = f"{subdir}/FreeSurferColorLUT.txt"
annot_img = nib.load(annot_dir).get_fdata()
elecs_file = f"{ctdir}/{patient}_result/{elec_label}.txt"
elecs_xyz = np.loadtxt(elecs_file, dtype='float', comments='#')
elecs_xyz = elecs_xyz[:, [0, 2, 1]]
elecs_xyz[:, 0] = 128 - elecs_xyz[:, 0]
elecs_xyz[:, 1] = 128 - elecs_xyz[:, 1]
elecs_xyz[:, 2] = 128 + elecs_xyz[:, 2]
labels = []
for row in range(elecs_xyz.shape[0]):
x = elecs_xyz[row, 0]
y = elecs_xyz[row, 1]
z = elecs_xyz[row, 2]
x1 = int(x)
x2 = math.ceil(x)
y1 = int(y)
y2 = math.ceil(y)
z1 = int(z)
z2 = math.ceil(z)
val = [0]
val.append(annot_img[x1, y1, z1])
val.append(annot_img[x1, y1, z2])
val.append(annot_img[x1, y2, z1])
val.append(annot_img[x1, y2, z2])
val.append(annot_img[x2, y1, z1])
val.append(annot_img[x2, y1, z2])
val.append(annot_img[x2, y2, z1])
val.append(annot_img[x2, y2, z2])
val = val[1:]
labels.append(max(set(val), key = val.count))
# print(labels)
labels_name = []
for label in labels:
with open(lookup_table, 'r') as f:
lines = f.readlines()
rows = len(lines)
for row in range(rows):
line = lines[row][0: 8]
b = str(int(label))
if re.match(b, line):
# print(lines[row])
a = lines[row][len(b): -16].strip()
labels_name.append(a)
break
return labels_name
class ElectrodeSeg:
def __init__(self, filePath, patName, iLabel, numMax, diameterSize, spacing, gap):
super(ElectrodeSeg, self).__init__()
# set up input initials
self.filePath = filePath
self.patientName = patName
raw_flag = 0 # check for the filepath existance
for root, dirs, files in os.walk(self.filePath):
for filename in files:
if re.search(r'CT_intra.nii.gz', filename):
raw_flag = 1
self.rawDataPath = f"{self.filePath}/{filename}"
break
if not raw_flag:
sys.exit()
label_flag = 0
for root, dirs, files in os.walk(self.filePath):
for filename in files:
if re.search(r'_labels.npy', filename):
label_flag = 1
self.labelsPath = f"{self.filePath}/{filename}"
break
if not label_flag:
sys.exit()
self.rawData = nib.load(self.rawDataPath).get_fdata()
self.labels = np.load(self.labelsPath)
self.iLabel = iLabel
self.numMax = numMax
self.diameterSize = diameterSize
self.spacing = spacing
self.gap = gap
# some calculations to get the rest initials
self.labelValues = np.unique(self.labels)
self.numElecs = len(self.labelValues) - 1
if self.numElecs > 8: # remove 'I' from the alphabet list, a trivial custom not to name the electrode 'I'
self.alphaList = [chr(i) for i in range(65, 66+self.numElecs)]
self.alphaList.pop(8)
else:
self.alphaList = [chr(i) for i in range(65, 65+self.numElecs)]
self.iValue = self.labelValues[self.iLabel]
self.nameLabel = self.alphaList[self.iLabel-1]
data_elec = np.copy(self.labels)
data_elec[np.where(self.labels != self.iValue)] = 0 ## isolate a single cluster of voxels belonging to the ith electrode
self.xs, self.ys, self.zs = np.where(data_elec != 0)
self.pos_elec = np.transpose(np.vstack((self.xs, self.ys, self.zs))) ## positions of these voxels
### test!
data_elec1 = np.copy(self.labels)
data_elec1[np.where(self.labels == self.iValue)] = 0
self.xrest, self.yrest, self.zrest = np.where(data_elec1 != 0)
self.rawData[self.xrest, self.yrest, self.zrest] = 0
### test!
self.rawData_single = self.rawData
xmin = np.amin(self.xs)
xmax = np.amax(self.xs)
ymin = np.amin(self.ys)
ymax = np.amax(self.ys)
zmin = np.amin(self.zs)
zmax = np.amax(self.zs)
# self.rawData_single[self.xs, self.ys, self.zs] = self.rawData_single[self.xs, self.ys, self.zs] * 3
self.rawData_single[xmin:xmax+1, ymin:ymax+1, zmin:zmax+1] = self.rawData_single[xmin:xmax+1, ymin:ymax+1, zmin:zmax+1] * 3
self.resultPath = f"{self.filePath}/{self.patientName}_result"
if not os.path.exists(self.resultPath):
os.mkdir(self.resultPath)
self.resultFile = f"{self.resultPath}/{self.nameLabel}.txt"
self.elecPos = [0, 0, 0]
self.headStart = [0, 0, 0]
self.targetPoint = [0, 0, 0]
self.regressInfo = [0, 0, 0, 0]
def pipeline(self):
self.startPoint()
self.contactPoint(1)
self.regression()
for j in np.arange(self.numMax - 1):
# if self.rawData[int(round(self.elecPos[-1,0])), int(round(self.elecPos[-1,1])), int(round(self.elecPos[-1,2]))] == 0:
# self.elecPos = self.elecPos[0:-1, :]
# break
if int(self.elecPos[-1,0])==int(self.elecPos[-2,0]) and int(self.elecPos[-1,1])==int(self.elecPos[-2,1]) and int(self.elecPos[-1,2])==int(self.elecPos[-2,2]):
self.elecPos = self.elecPos[0:-1, :]
break
self.step()
if self.flag_step_stop:
break
self.elecPos = self.elecPos[1:, :]
# print(self.elecPos)
self.resulting()
# return self.elecPos
def resulting(self):
self.elecPos_true = np.copy(self.elecPos)
self.elecPos_true[:, 0] = 128 - self.elecPos[:, 0]
self.elecPos_true[:, 1] = 128 - self.elecPos[:, 1]
self.elecPos_true[:, 2] = self.elecPos[:, 2] - 128
self.elecPos_true = self.elecPos_true[:, [0, 2, 1]]
self.elecFilepath = os.path.join(self.filePath, f"{self.patientName}_result")
if not os.path.exists(self.elecFilepath):
os.mkdir(self.elecFilepath)
else:
self.elecFile = os.path.join(self.elecFilepath, f"{self.nameLabel}.txt")
with open(self.elecFile, "ab") as f:
f.seek(0)
f.truncate()
# f.write(b"\n")
np.savetxt(f, self.elecPos_true, fmt='%10.8f', delimiter=' ', newline='\n', header=f"{self.elecPos_true.shape[0]}")
## target point functions
def startPoint(self):
## firstly find a voxel near the target
x = [np.max(self.xs), np.min(self.xs)]
y = [np.max(self.ys), np.min(self.ys)]
z = [np.max(self.zs), np.min(self.zs)]
self.reg1 = LinearRegression().fit(X=self.xs.reshape(-1,1), y=self.ys) # x-y
self.reg2 = LinearRegression().fit(X=self.xs.reshape(-1,1), y=self.zs) # x-z
self.reg3 = LinearRegression().fit(X=self.ys.reshape(-1,1), y=self.zs) # y-z
coefs = [abs(self.reg1.coef_), abs(self.reg2.coef_), abs(self.reg3.coef_)]
coef_min = coefs.index(min(coefs))
if coef_min == 0:
index = [0 if self.reg2.coef_>0 else 1, 0 if self.reg3.coef_>0 else 1, 0]
elif coef_min == 1:
index = [0 if self.reg1.coef_>0 else 1, 0, 0 if self.reg3.coef_>0 else 1]
else:
index = [0, 0 if self.reg1.coef_>0 else 1, 0 if self.reg2.coef_>0 else 1]
indexreverse = [~index[0], ~index[1], ~index[2]]
point1 = np.array([x[index[0]], y[index[1]], z[index[2]]])
point2 = np.array([x[indexreverse[0]], y[indexreverse[1]], z[indexreverse[2]]])
center = 127.5 * np.ones(3)
diff1 = point1 - center
diff2 = point2 - center
headStart = point2 if np.sum(np.transpose(diff1)*diff1) > np.sum(np.transpose(diff2)*diff2) else point1
self.direction = indexreverse if np.sum(np.transpose(diff1)*diff1) > np.sum(np.transpose(diff2)*diff2) else index
## secondly specify a target voxel in label voxels
diffs = self.pos_elec - headStart
diffs2 = np.power(diffs[:,0], 2) + np.power(diffs[:,1], 2) + np.power(diffs[:,2], 2)
headPointPos = np.argmin(diffs2)
self.headStart = self.pos_elec[headPointPos, :]
def converge(self, x, y, z):
## converge to the mass center of a cluster of voxels
n = self.diameterSize
delta = math.ceil(round((n - 1) / 2, 1)) # represent the radius of the electrode contact
## extract a cubic ROI of the raw CT data
seq_s = np.arange(x - delta, x + delta + 1)
seq_r = np.arange(y - delta, y + delta + 1)
seq_c = np.arange(z - delta, z + delta + 1)
if not ((np.array(seq_s) > 0).all() and (np.array(seq_r) > 0).all() and (np.array(seq_c) > 0).all()):
print('Error: index too small 0!')
return 0, 0, 0
elif not ((np.array(seq_s) < 256).all() and (np.array(seq_r) < 256).all() and (np.array(seq_c) < 256).all()):
print('Error: index too large 256!')
return 0, 0, 0
else:
## extract the ROI cubic
# test!!!
matrixVoxels = self.rawData_local[seq_s[0]:seq_s[-1]+1, seq_r[0]:seq_r[-1]+1, seq_c[0]:seq_c[-1]+1]
sumVoxels = np.sum(matrixVoxels)
if (np.sum(matrixVoxels)== 0):
print('Error: Converge to non-elec region!')
return 0, 0, 0
else:
f = np.zeros((1, 4))
for index, element in np.ndenumerate(matrixVoxels):
x, y, z = index
tmp = np.array([x+seq_s[0], y+seq_r[0], z+seq_c[0], element])
f = np.vstack((f, tmp))
f = f[1:]
CM = np.average(f[:,:3], axis=0, weights=f[:,3])
C100 = CM[0]
C010 = CM[1]
C001 = CM[2]
x1 = C100
y1 = C010
z1 = C001
return x1, y1, z1
def contactPoint(self, target):
## converge to an electrode contact position
x0 = self.headStart[0] if target == 1 else self.x0
y0 = self.headStart[1] if target == 1 else self.y0
z0 = self.headStart[2] if target == 1 else self.z0
x = int(round(x0))
y = int(round(y0))
z = int(round(z0))
print(f"initial start voxel:({x0}, {y0}, {z0})")
# test!!!
self.rawData_local = self.rawData_single
diff_array = self.pos_elec - np.array([x0, y0, z0])
elec_diffs = np.sqrt(np.dot(diff_array, np.transpose(diff_array)).diagonal())
ind_diffs = np.where(elec_diffs <= 2)
self.rawData_local[self.xs[ind_diffs], self.ys[ind_diffs], self.zs[ind_diffs]] = self.rawData_local[self.xs[ind_diffs], self.ys[ind_diffs], self.zs[ind_diffs]] * 2
(x1, y1, z1) = self.converge(x, y, z)
itr = 1
flag_convergence = 0
while not ((x==int(round(x1))) and (y==int(round(y1))) and (z==int(round(z1)))):
x = int(round(x1))
y = int(round(y1))
z = int(round(z1))
(x1, y1, z1) = self.converge(x, y, z)
itr = itr + 1
if itr > 5:
flag_convergence = 1
break
print(f"Convergent center voxel coordinates:({x1},{y1},{z1})")
print(f"Convergent center voxel value:{self.rawData[int(round(x1)), int(round(y1)), int(round(z1))]}")
self.flag_step_stop = 0
if (x1, y1, z1) == (0, 0, 0):
self.flag_step_stop = 1
print('here1,converged to 0!')
# self.elecPos = np.vstack([self.elecPos, [x1, y1, z1]])
else:
if not flag_convergence:
print('here2,converged normally!')
self.targetPoint = [x1, y1, z1] if target == 1 else self.targetPoint
self.elecPos = np.vstack([self.elecPos, [x1, y1, z1]])
else:
print('here3, maybe not convergent!')
self.targetPoint = [x1, y1, z1] if target == 1 else self.targetPoint
self.elecPos = np.vstack([self.elecPos, [x1, y1, z1]])
def regression(self):
## regress an electrode and find the axis direction
X = np.transpose(np.vstack((self.xs, self.ys)))
y = self.zs
forcedX = np.transpose(np.array([self.targetPoint[0], self.targetPoint[1]]))
forcedy = self.targetPoint[2]
## implant a contraint regression, forcing on the head point
X = X - forcedX
y = y - forcedy
reg = Lasso(fit_intercept=False).fit(X=X, y=y)
reg.intercept_ = reg.intercept_ + forcedy - np.dot(forcedX, reg.coef_)
## regression between x and y
reg2 = LinearRegression(fit_intercept=True).fit(X=self.xs.reshape(-1,1), y=self.ys)
self.coef = reg.coef_
self.intercept = reg.intercept_
self.coef2 = reg2.coef_
self.intercept2 = reg2.intercept_
def step(self):
## step out along the electrode axis
dis = self.spacing # initial step size
# delta_x = np.sqrt(np.power(dis, 2) / (1 + np.power(self.coef2[0],2) + np.power(np.dot(self.coef, np.array([1, self.coef2[0]])) ,2)))
# delta_y = np.dot(self.coef2[0], delta_x)
# delta_z = np.dot(self.coef, np.array([1, self.coef2[0]])) * delta_x
diff_x = np.max(self.xs) - np.min(self.xs)
diff_y = np.max(self.ys) - np.min(self.ys)
diff_z = np.max(self.zs) - np.min(self.zs)
a = np.power(diff_x,2) + np.power(diff_y,2) + np.power(diff_z,2)
delta_x = diff_x * np.sqrt(np.power(dis,2) / a)
delta_y = diff_y * np.sqrt(np.power(dis,2) / a)
delta_z = diff_z * np.sqrt(np.power(dis,2) / a)
# delta_x = self.reg2.coef_ * np.sqrt(np.power(dis,2) / (1 + np.power(self.reg2.coef_,2) + np.power(self.reg3.coef_,2)))
# delta_y = self.reg3.coef_ * np.sqrt(np.power(dis,2) / (1 + np.power(self.reg2.coef_,2) + np.power(self.reg3.coef_,2)))
# delta_z = np.sqrt(np.power(dis,2) / (1 + np.power(self.reg2.coef_,2) + np.power(self.reg3.coef_,2)))
self.x0 = np.int(self.elecPos[-1,0] - np.round(delta_x)) if ((self.direction[0]==-2) or (self.direction[0]==0)) else np.int(self.elecPos[-1,0] + np.round(delta_x))
self.y0 = np.int(self.elecPos[-1,1] - np.round(delta_y)) if ((self.direction[1]==-2) or (self.direction[1]==0)) else np.int(self.elecPos[-1,1] + np.round(delta_y))
self.z0 = np.int(self.elecPos[-1,2] - np.round(delta_z)) if ((self.direction[2]==-2) or (self.direction[2]==0)) else np.int(self.elecPos[-1,2] + np.round(delta_z))
self.contactPoint(0)
| [
"sklearn.linear_model.Lasso",
"nibabel.load",
"numpy.array",
"sys.exit",
"numpy.save",
"os.walk",
"numpy.arange",
"os.path.exists",
"re.search",
"numpy.where",
"numpy.ndenumerate",
"numpy.max",
"numpy.linspace",
"numpy.dot",
"numpy.vstack",
"os.mkdir",
"numpy.min",
"numpy.argmin",
... | [((1004, 1018), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (1013, 1018), False, 'import os\n'), ((4828, 4844), 'numpy.amax', 'np.amax', (['rawData'], {}), '(rawData)\n', (4835, 4844), True, 'import numpy as np\n'), ((4917, 4933), 'numpy.copy', 'np.copy', (['rawData'], {}), '(rawData)\n', (4924, 4933), True, 'import numpy as np\n'), ((4985, 5008), 'numpy.where', 'np.where', (['(threData != 0)'], {}), '(threData != 0)\n', (4993, 5008), True, 'import numpy as np\n'), ((5298, 5428), 'numpy.savetxt', 'np.savetxt', (['fname', 'X'], {'fmt': '"""%.4f"""', 'delimiter': '""","""', 'newline': '"""\n"""', 'header': '"""point clouds"""', 'footer': '""""""', 'comments': '"""# """', 'encoding': 'None'}), "(fname, X, fmt='%.4f', delimiter=',', newline='\\n', header=\n 'point clouds', footer='', comments='# ', encoding=None)\n", (5308, 5428), True, 'import numpy as np\n'), ((5729, 5762), 'numpy.linspace', 'np.linspace', (['ax', '(ax + bx * 50)', '(50)'], {}), '(ax, ax + bx * 50, 50)\n', (5740, 5762), True, 'import numpy as np\n'), ((5769, 5802), 'numpy.linspace', 'np.linspace', (['ay', '(ay + by * 50)', '(50)'], {}), '(ay, ay + by * 50, 50)\n', (5780, 5802), True, 'import numpy as np\n'), ((5809, 5842), 'numpy.linspace', 'np.linspace', (['az', '(az + bz * 50)', '(50)'], {}), '(az, az + bz * 50, 50)\n', (5820, 5842), True, 'import numpy as np\n'), ((5915, 5927), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (5925, 5927), False, 'from PyQt5.QtCore import QThread, pyqtSignal\n'), ((7154, 7173), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['ndarray'], {}), '(ndarray)\n', (7164, 7173), False, 'from PyQt5.QtCore import QThread, pyqtSignal\n'), ((7543, 7558), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['int'], {}), '(int)\n', (7553, 7558), False, 'from PyQt5.QtCore import QThread, pyqtSignal\n'), ((9930, 9942), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (9940, 9942), False, 'from PyQt5.QtCore import QThread, pyqtSignal\n'), ((10775, 10801), 'os.walk', 'os.walk', (['dir'], {'topdown': '(True)'}), '(dir, topdown=True)\n', (10782, 10801), False, 'import os\n'), ((11258, 11306), 'numpy.save', 'np.save', (['f"""{filePath}/chnXyzDict.npy"""', 'elec_dict'], {}), "(f'{filePath}/chnXyzDict.npy', elec_dict)\n", (11265, 11306), True, 'import numpy as np\n'), ((11620, 11671), 'numpy.loadtxt', 'np.loadtxt', (['elecs_file'], {'dtype': '"""float"""', 'comments': '"""#"""'}), "(elecs_file, dtype='float', comments='#')\n", (11630, 11671), True, 'import numpy as np\n'), ((5191, 5213), 'numpy.array', 'np.array', (['(xs, ys, zs)'], {}), '((xs, ys, zs))\n', (5199, 5213), True, 'import numpy as np\n'), ((6070, 6125), 'os.path.join', 'os.path.join', (['f"""{self.directory_surf}/mri"""', 'f"""mask.mgz"""'], {}), "(f'{self.directory_surf}/mri', f'mask.mgz')\n", (6082, 6125), False, 'import os\n'), ((6145, 6164), 'nibabel.load', 'nib.load', (['mask_file'], {}), '(mask_file)\n', (6153, 6164), True, 'import nibabel as nib\n'), ((6230, 6299), 'scipy.ndimage.morphology.binary_erosion', 'ndimage.morphology.binary_erosion', (['data_mask'], {'iterations': 'self.ero_itr'}), '(data_mask, iterations=self.ero_itr)\n', (6263, 6299), False, 'from scipy import ndimage\n'), ((6330, 6393), 'os.path.join', 'os.path.join', (['self.directory_ct', 'f"""{self.patient}CT_Reg.nii.gz"""'], {}), "(self.directory_ct, f'{self.patient}CT_Reg.nii.gz')\n", (6342, 6393), False, 'import os\n'), ((6411, 6431), 'nibabel.load', 'nib.load', (['CTreg_file'], {}), '(CTreg_file)\n', (6419, 6431), True, 'import nibabel as nib\n'), ((6486, 6502), 'numpy.amax', 'np.amax', (['data_ct'], {}), '(data_ct)\n', (6493, 6502), True, 'import numpy as np\n'), ((6637, 6676), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['data_ct', 'img_ct.affine'], {}), '(data_ct, img_ct.affine)\n', (6652, 6676), True, 'import nibabel as nib\n'), ((6699, 6764), 'os.path.join', 'os.path.join', (['self.directory_ct', 'f"""{self.patient}CT_intra.nii.gz"""'], {}), "(self.directory_ct, f'{self.patient}CT_intra.nii.gz')\n", (6711, 6764), False, 'import os\n'), ((6773, 6800), 'nibabel.save', 'nib.save', (['img1', 'intra_file1'], {}), '(img1, intra_file1)\n', (6781, 6800), True, 'import nibabel as nib\n'), ((6862, 6901), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['data_ct', 'img_ct.affine'], {}), '(data_ct, img_ct.affine)\n', (6877, 6901), True, 'import nibabel as nib\n'), ((6923, 7040), 'os.path.join', 'os.path.join', (['self.directory_ct', 'f"""{self.patient}CT_intracranial_{self.thre}_{self.K}_{self.ero_itr}.nii.gz"""'], {}), "(self.directory_ct,\n f'{self.patient}CT_intracranial_{self.thre}_{self.K}_{self.ero_itr}.nii.gz'\n )\n", (6935, 7040), False, 'import os\n'), ((7040, 7066), 'nibabel.save', 'nib.save', (['img0', 'intra_file'], {}), '(img0, intra_file)\n', (7048, 7066), True, 'import nibabel as nib\n'), ((8664, 8684), 'numpy.array', 'np.array', (['elec_track'], {}), '(elec_track)\n', (8672, 8684), True, 'import numpy as np\n'), ((12031, 12043), 'math.ceil', 'math.ceil', (['x'], {}), '(x)\n', (12040, 12043), False, 'import math\n'), ((12077, 12089), 'math.ceil', 'math.ceil', (['y'], {}), '(y)\n', (12086, 12089), False, 'import math\n'), ((12123, 12135), 'math.ceil', 'math.ceil', (['z'], {}), '(z)\n', (12132, 12135), False, 'import math\n'), ((13433, 13455), 'os.walk', 'os.walk', (['self.filePath'], {}), '(self.filePath)\n', (13440, 13455), False, 'import os\n'), ((13785, 13807), 'os.walk', 'os.walk', (['self.filePath'], {}), '(self.filePath)\n', (13792, 13807), False, 'import os\n'), ((14172, 14196), 'numpy.load', 'np.load', (['self.labelsPath'], {}), '(self.labelsPath)\n', (14179, 14196), True, 'import numpy as np\n'), ((14431, 14453), 'numpy.unique', 'np.unique', (['self.labels'], {}), '(self.labels)\n', (14440, 14453), True, 'import numpy as np\n'), ((14943, 14963), 'numpy.copy', 'np.copy', (['self.labels'], {}), '(self.labels)\n', (14950, 14963), True, 'import numpy as np\n'), ((15129, 15153), 'numpy.where', 'np.where', (['(data_elec != 0)'], {}), '(data_elec != 0)\n', (15137, 15153), True, 'import numpy as np\n'), ((15300, 15320), 'numpy.copy', 'np.copy', (['self.labels'], {}), '(self.labels)\n', (15307, 15320), True, 'import numpy as np\n'), ((15427, 15452), 'numpy.where', 'np.where', (['(data_elec1 != 0)'], {}), '(data_elec1 != 0)\n', (15435, 15452), True, 'import numpy as np\n'), ((15590, 15606), 'numpy.amin', 'np.amin', (['self.xs'], {}), '(self.xs)\n', (15597, 15606), True, 'import numpy as np\n'), ((15622, 15638), 'numpy.amax', 'np.amax', (['self.xs'], {}), '(self.xs)\n', (15629, 15638), True, 'import numpy as np\n'), ((15654, 15670), 'numpy.amin', 'np.amin', (['self.ys'], {}), '(self.ys)\n', (15661, 15670), True, 'import numpy as np\n'), ((15686, 15702), 'numpy.amax', 'np.amax', (['self.ys'], {}), '(self.ys)\n', (15693, 15702), True, 'import numpy as np\n'), ((15718, 15734), 'numpy.amin', 'np.amin', (['self.zs'], {}), '(self.zs)\n', (15725, 15734), True, 'import numpy as np\n'), ((15750, 15766), 'numpy.amax', 'np.amax', (['self.zs'], {}), '(self.zs)\n', (15757, 15766), True, 'import numpy as np\n'), ((16507, 16533), 'numpy.arange', 'np.arange', (['(self.numMax - 1)'], {}), '(self.numMax - 1)\n', (16516, 16533), True, 'import numpy as np\n'), ((17260, 17281), 'numpy.copy', 'np.copy', (['self.elecPos'], {}), '(self.elecPos)\n', (17267, 17281), True, 'import numpy as np\n'), ((17556, 17613), 'os.path.join', 'os.path.join', (['self.filePath', 'f"""{self.patientName}_result"""'], {}), "(self.filePath, f'{self.patientName}_result')\n", (17568, 17613), False, 'import os\n'), ((19102, 19151), 'numpy.array', 'np.array', (['[x[index[0]], y[index[1]], z[index[2]]]'], {}), '([x[index[0]], y[index[1]], z[index[2]]])\n', (19110, 19151), True, 'import numpy as np\n'), ((19169, 19239), 'numpy.array', 'np.array', (['[x[indexreverse[0]], y[indexreverse[1]], z[indexreverse[2]]]'], {}), '([x[indexreverse[0]], y[indexreverse[1]], z[indexreverse[2]]])\n', (19177, 19239), True, 'import numpy as np\n'), ((19792, 19809), 'numpy.argmin', 'np.argmin', (['diffs2'], {}), '(diffs2)\n', (19801, 19809), True, 'import numpy as np\n'), ((20159, 20194), 'numpy.arange', 'np.arange', (['(x - delta)', '(x + delta + 1)'], {}), '(x - delta, x + delta + 1)\n', (20168, 20194), True, 'import numpy as np\n'), ((20211, 20246), 'numpy.arange', 'np.arange', (['(y - delta)', '(y + delta + 1)'], {}), '(y - delta, y + delta + 1)\n', (20220, 20246), True, 'import numpy as np\n'), ((20263, 20298), 'numpy.arange', 'np.arange', (['(z - delta)', '(z + delta + 1)'], {}), '(z - delta, z + delta + 1)\n', (20272, 20298), True, 'import numpy as np\n'), ((22305, 22330), 'numpy.where', 'np.where', (['(elec_diffs <= 2)'], {}), '(elec_diffs <= 2)\n', (22313, 22330), True, 'import numpy as np\n'), ((4783, 4802), 'nibabel.load', 'nib.load', (['intraFile'], {}), '(intraFile)\n', (4791, 4802), True, 'import nibabel as nib\n'), ((7423, 7446), 'numpy.vstack', 'np.vstack', (['(xs, ys, zs)'], {}), '((xs, ys, zs))\n', (7432, 7446), True, 'import numpy as np\n'), ((7777, 7803), 'os.path.exists', 'os.path.exists', (['hough_file'], {}), '(hough_file)\n', (7791, 7803), False, 'import os\n'), ((9044, 9079), 'numpy.array', 'np.array', (['elec_track[0:self.K, 1:4]'], {}), '(elec_track[0:self.K, 1:4])\n', (9052, 9079), True, 'import numpy as np\n'), ((9369, 9394), 'numpy.zeros', 'np.zeros', (['(256, 256, 256)'], {}), '((256, 256, 256))\n', (9377, 9394), True, 'import numpy as np\n'), ((11505, 11524), 'nibabel.load', 'nib.load', (['annot_dir'], {}), '(annot_dir)\n', (11513, 11524), True, 'import nibabel as nib\n'), ((13717, 13727), 'sys.exit', 'sys.exit', ([], {}), '()\n', (13725, 13727), False, 'import sys\n'), ((14068, 14078), 'sys.exit', 'sys.exit', ([], {}), '()\n', (14076, 14078), False, 'import sys\n'), ((14982, 15018), 'numpy.where', 'np.where', (['(self.labels != self.iValue)'], {}), '(self.labels != self.iValue)\n', (14990, 15018), True, 'import numpy as np\n'), ((15192, 15230), 'numpy.vstack', 'np.vstack', (['(self.xs, self.ys, self.zs)'], {}), '((self.xs, self.ys, self.zs))\n', (15201, 15230), True, 'import numpy as np\n'), ((15340, 15376), 'numpy.where', 'np.where', (['(self.labels == self.iValue)'], {}), '(self.labels == self.iValue)\n', (15348, 15376), True, 'import numpy as np\n'), ((16096, 16127), 'os.path.exists', 'os.path.exists', (['self.resultPath'], {}), '(self.resultPath)\n', (16110, 16127), False, 'import os\n'), ((16141, 16166), 'os.mkdir', 'os.mkdir', (['self.resultPath'], {}), '(self.resultPath)\n', (16149, 16166), False, 'import os\n'), ((17629, 17662), 'os.path.exists', 'os.path.exists', (['self.elecFilepath'], {}), '(self.elecFilepath)\n', (17643, 17662), False, 'import os\n'), ((17676, 17703), 'os.mkdir', 'os.mkdir', (['self.elecFilepath'], {}), '(self.elecFilepath)\n', (17684, 17703), False, 'import os\n'), ((17746, 17802), 'os.path.join', 'os.path.join', (['self.elecFilepath', 'f"""{self.nameLabel}.txt"""'], {}), "(self.elecFilepath, f'{self.nameLabel}.txt')\n", (17758, 17802), False, 'import os\n'), ((18191, 18206), 'numpy.max', 'np.max', (['self.xs'], {}), '(self.xs)\n', (18197, 18206), True, 'import numpy as np\n'), ((18208, 18223), 'numpy.min', 'np.min', (['self.xs'], {}), '(self.xs)\n', (18214, 18223), True, 'import numpy as np\n'), ((18238, 18253), 'numpy.max', 'np.max', (['self.ys'], {}), '(self.ys)\n', (18244, 18253), True, 'import numpy as np\n'), ((18255, 18270), 'numpy.min', 'np.min', (['self.ys'], {}), '(self.ys)\n', (18261, 18270), True, 'import numpy as np\n'), ((18285, 18300), 'numpy.max', 'np.max', (['self.zs'], {}), '(self.zs)\n', (18291, 18300), True, 'import numpy as np\n'), ((18302, 18317), 'numpy.min', 'np.min', (['self.zs'], {}), '(self.zs)\n', (18308, 18317), True, 'import numpy as np\n'), ((19265, 19275), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (19272, 19275), True, 'import numpy as np\n'), ((19745, 19769), 'numpy.power', 'np.power', (['diffs[:, 2]', '(2)'], {}), '(diffs[:, 2], 2)\n', (19753, 19769), True, 'import numpy as np\n'), ((22176, 22198), 'numpy.array', 'np.array', (['[x0, y0, z0]'], {}), '([x0, y0, z0])\n', (22184, 22198), True, 'import numpy as np\n'), ((23962, 23991), 'numpy.vstack', 'np.vstack', (['(self.xs, self.ys)'], {}), '((self.xs, self.ys))\n', (23971, 23991), True, 'import numpy as np\n'), ((24053, 24105), 'numpy.array', 'np.array', (['[self.targetPoint[0], self.targetPoint[1]]'], {}), '([self.targetPoint[0], self.targetPoint[1]])\n', (24061, 24105), True, 'import numpy as np\n'), ((24378, 24404), 'numpy.dot', 'np.dot', (['forcedX', 'reg.coef_'], {}), '(forcedX, reg.coef_)\n', (24384, 24404), True, 'import numpy as np\n'), ((25104, 25119), 'numpy.max', 'np.max', (['self.xs'], {}), '(self.xs)\n', (25110, 25119), True, 'import numpy as np\n'), ((25122, 25137), 'numpy.min', 'np.min', (['self.xs'], {}), '(self.xs)\n', (25128, 25137), True, 'import numpy as np\n'), ((25155, 25170), 'numpy.max', 'np.max', (['self.ys'], {}), '(self.ys)\n', (25161, 25170), True, 'import numpy as np\n'), ((25173, 25188), 'numpy.min', 'np.min', (['self.ys'], {}), '(self.ys)\n', (25179, 25188), True, 'import numpy as np\n'), ((25206, 25221), 'numpy.max', 'np.max', (['self.zs'], {}), '(self.zs)\n', (25212, 25221), True, 'import numpy as np\n'), ((25224, 25239), 'numpy.min', 'np.min', (['self.zs'], {}), '(self.zs)\n', (25230, 25239), True, 'import numpy as np\n'), ((25294, 25313), 'numpy.power', 'np.power', (['diff_z', '(2)'], {}), '(diff_z, 2)\n', (25302, 25313), True, 'import numpy as np\n'), ((8402, 8434), 're.findall', 're.findall', (['"""\\\\d+\\\\.?\\\\d*"""', 'line'], {}), "('\\\\d+\\\\.?\\\\d*', line)\n", (8412, 8434), False, 'import re\n'), ((9140, 9163), 'numpy.vstack', 'np.vstack', (['(xs, ys, zs)'], {}), '((xs, ys, zs))\n', (9149, 9163), True, 'import numpy as np\n'), ((9469, 9490), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (9477, 9490), True, 'import numpy as np\n'), ((9569, 9630), 'os.path.join', 'os.path.join', (['self.directory_ct', 'f"""{self.patient}_labels.npy"""'], {}), "(self.directory_ct, f'{self.patient}_labels.npy')\n", (9581, 9630), False, 'import os\n'), ((11093, 11117), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (11105, 11117), False, 'import os\n'), ((12875, 12892), 're.match', 're.match', (['b', 'line'], {}), '(b, line)\n', (12883, 12892), False, 'import re\n'), ((13511, 13549), 're.search', 're.search', (['"""CT_intra.nii.gz"""', 'filename'], {}), "('CT_intra.nii.gz', filename)\n", (13520, 13549), False, 'import re\n'), ((13863, 13897), 're.search', 're.search', (['"""_labels.npy"""', 'filename'], {}), "('_labels.npy', filename)\n", (13872, 13897), False, 'import re\n'), ((14111, 14137), 'nibabel.load', 'nib.load', (['self.rawDataPath'], {}), '(self.rawDataPath)\n', (14119, 14137), True, 'import nibabel as nib\n'), ((17956, 18075), 'numpy.savetxt', 'np.savetxt', (['f', 'self.elecPos_true'], {'fmt': '"""%10.8f"""', 'delimiter': '""" """', 'newline': '"""\n"""', 'header': 'f"""{self.elecPos_true.shape[0]}"""'}), "(f, self.elecPos_true, fmt='%10.8f', delimiter=' ', newline='\\n',\n header=f'{self.elecPos_true.shape[0]}')\n", (17966, 18075), True, 'import numpy as np\n'), ((18339, 18357), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (18355, 18357), False, 'from sklearn.linear_model import LinearRegression, Lasso\n'), ((18424, 18442), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (18440, 18442), False, 'from sklearn.linear_model import LinearRegression, Lasso\n'), ((18509, 18527), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (18525, 18527), False, 'from sklearn.linear_model import LinearRegression, Lasso\n'), ((19693, 19717), 'numpy.power', 'np.power', (['diffs[:, 0]', '(2)'], {}), '(diffs[:, 0], 2)\n', (19701, 19717), True, 'import numpy as np\n'), ((19719, 19743), 'numpy.power', 'np.power', (['diffs[:, 1]', '(2)'], {}), '(diffs[:, 1], 2)\n', (19727, 19743), True, 'import numpy as np\n'), ((20896, 20916), 'numpy.sum', 'np.sum', (['matrixVoxels'], {}), '(matrixVoxels)\n', (20902, 20916), True, 'import numpy as np\n'), ((23581, 23620), 'numpy.vstack', 'np.vstack', (['[self.elecPos, [x1, y1, z1]]'], {}), '([self.elecPos, [x1, y1, z1]])\n', (23590, 23620), True, 'import numpy as np\n'), ((23810, 23849), 'numpy.vstack', 'np.vstack', (['[self.elecPos, [x1, y1, z1]]'], {}), '([self.elecPos, [x1, y1, z1]])\n', (23819, 23849), True, 'import numpy as np\n'), ((24285, 24311), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (24290, 24311), False, 'from sklearn.linear_model import LinearRegression, Lasso\n'), ((24458, 24494), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {'fit_intercept': '(True)'}), '(fit_intercept=True)\n', (24474, 24494), False, 'from sklearn.linear_model import LinearRegression, Lasso\n'), ((25252, 25271), 'numpy.power', 'np.power', (['diff_x', '(2)'], {}), '(diff_x, 2)\n', (25260, 25271), True, 'import numpy as np\n'), ((25273, 25292), 'numpy.power', 'np.power', (['diff_y', '(2)'], {}), '(diff_y, 2)\n', (25281, 25292), True, 'import numpy as np\n'), ((9183, 9276), 'sklearn.mixture.GaussianMixture', 'GMM', ([], {'n_components': 'self.K', 'covariance_type': '"""full"""', 'means_init': 'centroids', 'random_state': 'None'}), "(n_components=self.K, covariance_type='full', means_init=centroids,\n random_state=None)\n", (9186, 9276), True, 'from sklearn.mixture import GaussianMixture as GMM\n'), ((20934, 20954), 'numpy.sum', 'np.sum', (['matrixVoxels'], {}), '(matrixVoxels)\n', (20940, 20954), True, 'import numpy as np\n'), ((21091, 21107), 'numpy.zeros', 'np.zeros', (['(1, 4)'], {}), '((1, 4))\n', (21099, 21107), True, 'import numpy as np\n'), ((21146, 21174), 'numpy.ndenumerate', 'np.ndenumerate', (['matrixVoxels'], {}), '(matrixVoxels)\n', (21160, 21174), True, 'import numpy as np\n'), ((21385, 21430), 'numpy.average', 'np.average', (['f[:, :3]'], {'axis': '(0)', 'weights': 'f[:, 3]'}), '(f[:, :3], axis=0, weights=f[:, 3])\n', (21395, 21430), True, 'import numpy as np\n'), ((25348, 25364), 'numpy.power', 'np.power', (['dis', '(2)'], {}), '(dis, 2)\n', (25356, 25364), True, 'import numpy as np\n'), ((25404, 25420), 'numpy.power', 'np.power', (['dis', '(2)'], {}), '(dis, 2)\n', (25412, 25420), True, 'import numpy as np\n'), ((25460, 25476), 'numpy.power', 'np.power', (['dis', '(2)'], {}), '(dis, 2)\n', (25468, 25476), True, 'import numpy as np\n'), ((25898, 25915), 'numpy.round', 'np.round', (['delta_x'], {}), '(delta_x)\n', (25906, 25915), True, 'import numpy as np\n'), ((26005, 26022), 'numpy.round', 'np.round', (['delta_x'], {}), '(delta_x)\n', (26013, 26022), True, 'import numpy as np\n'), ((26070, 26087), 'numpy.round', 'np.round', (['delta_y'], {}), '(delta_y)\n', (26078, 26087), True, 'import numpy as np\n'), ((26177, 26194), 'numpy.round', 'np.round', (['delta_y'], {}), '(delta_y)\n', (26185, 26194), True, 'import numpy as np\n'), ((26242, 26259), 'numpy.round', 'np.round', (['delta_z'], {}), '(delta_z)\n', (26250, 26259), True, 'import numpy as np\n'), ((26349, 26366), 'numpy.round', 'np.round', (['delta_z'], {}), '(delta_z)\n', (26357, 26366), True, 'import numpy as np\n'), ((19377, 19396), 'numpy.transpose', 'np.transpose', (['diff1'], {}), '(diff1)\n', (19389, 19396), True, 'import numpy as np\n'), ((19413, 19432), 'numpy.transpose', 'np.transpose', (['diff2'], {}), '(diff2)\n', (19425, 19432), True, 'import numpy as np\n'), ((19500, 19519), 'numpy.transpose', 'np.transpose', (['diff1'], {}), '(diff1)\n', (19512, 19519), True, 'import numpy as np\n'), ((19536, 19555), 'numpy.transpose', 'np.transpose', (['diff2'], {}), '(diff2)\n', (19548, 19555), True, 'import numpy as np\n'), ((21238, 21299), 'numpy.array', 'np.array', (['[x + seq_s[0], y + seq_r[0], z + seq_c[0], element]'], {}), '([x + seq_s[0], y + seq_r[0], z + seq_c[0], element])\n', (21246, 21299), True, 'import numpy as np\n'), ((21318, 21337), 'numpy.vstack', 'np.vstack', (['(f, tmp)'], {}), '((f, tmp))\n', (21327, 21337), True, 'import numpy as np\n'), ((22247, 22271), 'numpy.transpose', 'np.transpose', (['diff_array'], {}), '(diff_array)\n', (22259, 22271), True, 'import numpy as np\n'), ((20317, 20332), 'numpy.array', 'np.array', (['seq_s'], {}), '(seq_s)\n', (20325, 20332), True, 'import numpy as np\n'), ((20349, 20364), 'numpy.array', 'np.array', (['seq_r'], {}), '(seq_r)\n', (20357, 20364), True, 'import numpy as np\n'), ((20381, 20396), 'numpy.array', 'np.array', (['seq_c'], {}), '(seq_c)\n', (20389, 20396), True, 'import numpy as np\n'), ((20503, 20518), 'numpy.array', 'np.array', (['seq_s'], {}), '(seq_s)\n', (20511, 20518), True, 'import numpy as np\n'), ((20537, 20552), 'numpy.array', 'np.array', (['seq_r'], {}), '(seq_r)\n', (20545, 20552), True, 'import numpy as np\n'), ((20571, 20586), 'numpy.array', 'np.array', (['seq_c'], {}), '(seq_c)\n', (20579, 20586), True, 'import numpy as np\n')] |
import os
import numpy as np
from src.learn.bots.ValueBot import ValueBot
import src.learn.bots.utils as utils
from src.play.model.Game import WHITE, BLACK
class Bot_31(ValueBot):
def get_path_to_self(self):
return os.path.abspath(__file__)
@staticmethod
def generate_nn_input(flat_board, color):
encoded_boards = utils.encode_board(flat_board, color)
player_liberties = utils.get_liberties(flat_board, color)
opponent_liberties = utils.get_liberties(flat_board, -color)
# print(encoded_boards.shape)
# print(player_liberties.shape)
# print(opponent_liberties.shape)
X = np.concatenate(
(encoded_boards, player_liberties, opponent_liberties), axis=1)
return X
| [
"os.path.abspath",
"src.learn.bots.utils.encode_board",
"src.learn.bots.utils.get_liberties",
"numpy.concatenate"
] | [((229, 254), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (244, 254), False, 'import os\n'), ((345, 382), 'src.learn.bots.utils.encode_board', 'utils.encode_board', (['flat_board', 'color'], {}), '(flat_board, color)\n', (363, 382), True, 'import src.learn.bots.utils as utils\n'), ((410, 448), 'src.learn.bots.utils.get_liberties', 'utils.get_liberties', (['flat_board', 'color'], {}), '(flat_board, color)\n', (429, 448), True, 'import src.learn.bots.utils as utils\n'), ((478, 517), 'src.learn.bots.utils.get_liberties', 'utils.get_liberties', (['flat_board', '(-color)'], {}), '(flat_board, -color)\n', (497, 517), True, 'import src.learn.bots.utils as utils\n'), ((650, 728), 'numpy.concatenate', 'np.concatenate', (['(encoded_boards, player_liberties, opponent_liberties)'], {'axis': '(1)'}), '((encoded_boards, player_liberties, opponent_liberties), axis=1)\n', (664, 728), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Data Source
import yfinance as yf
import time, datetime, math
from datetime import datetime
import sqlite3
con = sqlite3.connect("DB/stocks.db")
#con.row_factory = sqlite3.Row
stocks = ['UBER']
#data = pd.read_sql_query("select DISTINCT symbol FROM stocks_hist",con)
#stocks = data['symbol']
#['UBER','FLT.V','TSLA','eark.ne','rci-b.to','SNDL','PLTR','PBR',"AAL",'A','AAL','AAP','AAPL','ABBV','ABC','ABMD','ABT','ACN','ADBE','ADI','ADM','ADP','ADSK','AEE','AEP','AES','AFL','AIG','AIZ','AJG','AKAM','ALB','ALGN','ALK','ALL','ALLE','ALXN','AMAT','AMCR','AMD','AME','AMGN','AMP','AMT','AMZN','ANET','ANSS','ANTM','AON','AOS','APA','APD','APH','APTV','ARE','ATO','ATVI','AVB','AVGO','AVY','AWK','AXP','AZO','BA','BAC','BAX','BBY','BDX','BEN','BF-B','BIIB','BIO','BK','BKNG','BKR','BLK','BLL','BMY','BR','BRK-B','BSX','BWA','BXP','C','CAG','CAH','CARR','CAT','CB','CBOE','CBRE','CCI','CCL','CDNS','CDW','CE','CERN','CF','CFG','CHD','CHRW','CHTR','CI','CINF','CL','CLX','CMA','CMCSA','CME']
#,'FLT.V','TSLA','eark.ne','rci-b.to','BTC-USD'
##AMD AND INTEL DOING BEST
#Interval required 5 minutes
StartBal = 1000
Nshares = 0
sl = StartBal
buy = 0
RSIL = 0
b = 0
tv = 0
olddata = 0
per=[]
for stock in stocks:
data = pd.read_sql_query("SELECT * FROM stocks_hist WHERE symbol='" + stock + "' ORDER BY Datetime DESC limit 100 ",con,index_col='Datetime')
data.sort_index()
#print(data)
#data = yf.download(tickers=stock, period='5d', interval='1m',progress=False)
#RSI CALC
data['Return'] = np.log(data['Close'] / data['Close'].shift(1) )
data['Movement'] = data['Close'] - data['Close'].shift(1)
data['up'] = np.where((data['Movement'] > 0) ,data['Movement'],0)
data['down'] = np.where((data['Movement'] < 0) ,data['Movement'],0)
window_length = 14
#calculate moving average of the last 14 days gains
up = data['up'].rolling(window_length).mean()
#calculate moving average of the last 14 days losses
down = data['down'].abs().rolling(window_length).mean()
RS = up / down
data['RSI'] = 100.0 - (100.0 / (1.0 + RS))
#Bollinger bands, 1 std and 2 std
data['MA20'] = data['Close'].rolling(window=20).mean()
data['20dSTD'] = data['Close'].rolling(window=20).std()
data['Upper'] = data['MA20'] + (data['20dSTD'] * 2)
data['Lower'] = data['MA20'] - (data['20dSTD'] * 2)
data['Upper1s'] = data['MA20'] + (data['20dSTD'] * 1)
data['Lower1s'] = data['MA20'] - (data['20dSTD'] * 1)
data['LBPer']=(data['Close']/data['Lower'])-1
data['UBPer']=(data['Upper']/data['Close'])-1
data['UBPer1s']=(data['Close']/data['Upper1s'])-1
data['AD'] = 0
#ADL Line
data['CMFV'] = (((data['Close']-data['Low'])-(data['High']-data['Close']))/(data['High']-data['Low']))*data['Volume']
data['AD'] = data['CMFV'].rolling(14, min_periods=14).sum()
data['AD'] = data['AD'].shift(1)
#data.to_csv('csv/' + stock + '.csv')
#data = data[data.index.strftime('%Y-%m-%d') == '2021-02-17']
LastRSI = 0
LastLBPer = 10000000
LastUBPer = 10000000
LastClose =10000000
now = 0
#data=data.tail(n=1)
for index, row in data.iterrows():
timestr = '15:57:00'
now = index
if now != olddata:
current_time = now.strftime("%H:%M:%S")
ftr = [3600,60,1]
tv = sum([a*b for a,b in zip(ftr, map(int,timestr.split(':')))]) - sum([a*b for a,b in zip(ftr, map(int,current_time.split(':')))])
TStop = 0
if tv<0:
#determines if time is 3:57 and sells the position
TStop = 1
'''if buy == 0 and row['RSI'] < 10 and LastRSI < row['RSI'] and row['Close'] < row['Lower'] and LastClose < row['Close'] and row['AD'] < row['Close']:
Nshares = math.floor(StartBal / row['Close'])
StartBal -= row['Close'] * Nshares
buy = 1
b+=1
print(f"{index} - BOUGHT - at {row['Close']} - {Nshares} # of shares")
if buy == 1 and row['RSI'] > 70 and LastRSI > row['RSI'] and row['Close'] > LastUB and LastClose < row['Close'] and row['AD'] > row['Close']:
StartBal += row['Close'] * Nshares
Nshares = 0
buy = 0
print(f"{index} - SOLD - Balance {StartBal}")'''
if buy == 1 and row['RSI'] >= 80 and LastRSI > row['RSI'] and LastClose < row['Close'] and row['AD'] > row['Close']:
StartBal += row['Close'] * Nshares
Nshares = 0
buy = 0
per.append((((StartBal/sl)-1)*100))
#print(f"{index} - SOLD - Balance {StartBal} % Made {(((StartBal/sl)-1)*100)}")
'''if TStop == 1 and buy ==1:
StartBal += row['Close'] * Nshares
Nshares = 0
buy = 0
per.append((((StartBal/sl)-1)*100))
#print(f"{index} - SOLD - Balance {StartBal} % Made {(((StartBal/sl)-1)*100)}")'''
if buy == 0 and row['RSI'] <= 30 and LastRSI < row['RSI'] and LastClose < row['Close'] and row['AD'] < row['Close'] and row['Close']>LastLB and row['Close']>row['Lower'] and row['Close']<row['Lower1s']:
sl = StartBal
Nshares = math.floor(StartBal / row['Close'])
StartBal -= row['Close'] * Nshares
buy = 1
b+=1
#print(f"{index} - BOUGHT - at {row['Close']} - {Nshares} # of shares")
LastRSI = row['RSI']
LastClose = row['Close']
LastLB = row['Lower']
LastUB = row['Upper']
#print(f"stock {stock} time {now} - current price {LastClose} its this clsoe to low bol {row['UBPer']} beg {sl} end {StartBal} pending {buy} and tr {b}")
olddata = now
print(f"total transactions {b} and pending {buy}")
#time.sleep(1)
Lost = sum(map(lambda x: x< 0,per))
Won = sum(map(lambda x: x> 0,per))
perwon = (Won/len(per)*100)
print(perwon)
print(sum(per))
print(sl)
| [
"pandas.read_sql_query",
"sqlite3.connect",
"numpy.where",
"math.floor"
] | [((186, 217), 'sqlite3.connect', 'sqlite3.connect', (['"""DB/stocks.db"""'], {}), "('DB/stocks.db')\n", (201, 217), False, 'import sqlite3\n'), ((1286, 1426), 'pandas.read_sql_query', 'pd.read_sql_query', (['("SELECT * FROM stocks_hist WHERE symbol=\'" + stock +\n "\' ORDER BY Datetime DESC limit 100 ")', 'con'], {'index_col': '"""Datetime"""'}), '("SELECT * FROM stocks_hist WHERE symbol=\'" + stock +\n "\' ORDER BY Datetime DESC limit 100 ", con, index_col=\'Datetime\')\n', (1303, 1426), True, 'import pandas as pd\n'), ((1683, 1734), 'numpy.where', 'np.where', (["(data['Movement'] > 0)", "data['Movement']", '(0)'], {}), "(data['Movement'] > 0, data['Movement'], 0)\n", (1691, 1734), True, 'import numpy as np\n'), ((1752, 1803), 'numpy.where', 'np.where', (["(data['Movement'] < 0)", "data['Movement']", '(0)'], {}), "(data['Movement'] < 0, data['Movement'], 0)\n", (1760, 1803), True, 'import numpy as np\n'), ((4841, 4876), 'math.floor', 'math.floor', (["(StartBal / row['Close'])"], {}), "(StartBal / row['Close'])\n", (4851, 4876), False, 'import time, datetime, math\n')] |
# ______ __
# / \ / |
# /$$$$$$ | __ __ __ ______ _______ $$ | __ __
# $$ | $$ |/ | / | / | / \ / \ $$ | / | / |
# $$ | $$ |$$ | $$ | $$ |/$$$$$$ |$$$$$$$ | $$ | $$ | $$ |
# $$ | $$ |$$ | $$ | $$ |$$ $$ |$$ | $$ | $$ | $$ | $$ |
# $$ \__$$ |$$ \_$$ \_$$ |$$$$$$$$/ $$ | $$ | $$ |_____ $$ \__$$ |
# $$ $$/ $$ $$ $$/ $$ |$$ | $$ | $$ |$$ $$/
# $$$$$$/ $$$$$/$$$$/ $$$$$$$/ $$/ $$/ $$$$$$$$/ $$$$$$/
#
# File: test_units.py
# Author: <NAME>
# Date:
# Email: <EMAIL>
# Description:
from typing import *
import tensorflow as tf
import anyconfig
import easydict
import numpy as np
from src.dataset import utils as data_utils
from src.tools.train_net import get_dataset
from src.model.backbone import Resnet31, BasicBlock
from src.model.model import MasterModel
from src.model.metrics import WordAccuary
from src.dataset.benchmark_data_generator import generator_lmdb
def test_dataset():
config = anyconfig.load('/home/luning/dev/projects/master-tf/configs/master.yaml')
config = easydict.EasyDict(config)
train_ds, eval_ds = get_dataset(config)
#ds = dataset.LMDBDataset("/home/luning/dev/data/SynthText800k/synth_lmdb", 100, 48)
for index,v in enumerate(eval_ds):
print(index)
def test_training():
pass
def test_backbone():
config = anyconfig.load('configs/master.yaml')
config = easydict.EasyDict(config)
bb_config = config['model']['backbone']
resnet = Resnet31(block=BasicBlock, backbone_config=bb_config)
input = tf.random.normal([10, 48, 160, 3])
output = resnet(input, training=True)
print(output.shape)
def test_master():
config = anyconfig.load('configs/master.yaml')
config = easydict.EasyDict(config)
image = tf.random.normal([10, 48, 160, 3])
target = tf.constant(np.random.randint(0,10, (10, 50)), dtype=tf.uint8)
model = MasterModel(config.model, 10, (48, 160))
optimizer = tf.optimizers.Adadelta(learning_rate=1.0, rho=0.9, epsilon=1e-6)
with tf.GradientTape() as tape:
logits = model(image, target, training=True)
loss = tf.keras.losses.sparse_categorical_crossentropy(target, logits, from_logits=True)
grad = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grad, model.trainable_variables))
print(logits)
def test_decoder():
config = anyconfig.load('/home/luning/dev/projects/master-tf/configs/master.yaml')
config = easydict.EasyDict(config)
image = tf.random.normal([10, 48, 160, 3])
model = MasterModel(config.model, 10, (48, 160))
ys = model.decode(image, padding=tf.constant(True))
decoded_tensor = data_utils.LabelTransformer.decode_tensor(ys)
print(decoded_tensor)
def test_accuarcy():
acc = WordAccuary()
preds = [['aaa', 'bbb', 'ccc'], ['ddd', 'ccc', 'bbb']]
gts = [['aaa', 'ccc', 'ddd'], ['aaa', 'ccc', 'bbb']]
for pred, gt in zip(preds, gts):
acc.update(pred, gt)
assert 0.5 == acc.compute()
acc.reset()
preds = [['aaa', 'bbb', 'ccc'], ['ddd', 'ccc', 'bbb']]
gts = [['aaa', 'bbb', 'ccc'], ['ddd', 'ccc', 'bbb']]
for pred, gt in zip(preds, gts):
acc.update(pred, gt)
assert 1.0 == acc.compute()
acc.reset()
preds = [['aaa', 'bbb', 'ccc'], ['ddd', 'ccc', 'bbb']]
gts = [['aad', 'bbd', 'cc'], ['dd', 'cc', 'bb']]
for pred, gt in zip(preds, gts):
acc.update(pred, gt)
assert 0.0 == acc.compute()
def test_benchmark_dataset():
for i in generator_lmdb('/data/ocr/reg/evaluation/IC15_2077', rgb=False):
print(i)
def test_hashtable():
keys_tensor = tf.constant(list(data_utils.LabelTransformer.dict.values()))
vals_tensor = tf.constant(list(data_utils.LabelTransformer.dict.keys()))
table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor, key_dtype=tf.int32, value_dtype=tf.string),
default_value=tf.constant('<UNK>')
)
inputs = tf.random.uniform(shape=[3,3,3], minval=0, maxval=len(data_utils.LabelTransformer.dict.keys())-1, dtype=tf.int32)
print(table.lookup(inputs)) | [
"anyconfig.load",
"src.tools.train_net.get_dataset",
"tensorflow.random.normal",
"src.dataset.utils.LabelTransformer.dict.values",
"src.dataset.utils.LabelTransformer.dict.keys",
"src.model.backbone.Resnet31",
"src.model.model.MasterModel",
"tensorflow.optimizers.Adadelta",
"tensorflow.GradientTape"... | [((1156, 1229), 'anyconfig.load', 'anyconfig.load', (['"""/home/luning/dev/projects/master-tf/configs/master.yaml"""'], {}), "('/home/luning/dev/projects/master-tf/configs/master.yaml')\n", (1170, 1229), False, 'import anyconfig\n'), ((1243, 1268), 'easydict.EasyDict', 'easydict.EasyDict', (['config'], {}), '(config)\n', (1260, 1268), False, 'import easydict\n'), ((1293, 1312), 'src.tools.train_net.get_dataset', 'get_dataset', (['config'], {}), '(config)\n', (1304, 1312), False, 'from src.tools.train_net import get_dataset\n'), ((1528, 1565), 'anyconfig.load', 'anyconfig.load', (['"""configs/master.yaml"""'], {}), "('configs/master.yaml')\n", (1542, 1565), False, 'import anyconfig\n'), ((1579, 1604), 'easydict.EasyDict', 'easydict.EasyDict', (['config'], {}), '(config)\n', (1596, 1604), False, 'import easydict\n'), ((1663, 1716), 'src.model.backbone.Resnet31', 'Resnet31', ([], {'block': 'BasicBlock', 'backbone_config': 'bb_config'}), '(block=BasicBlock, backbone_config=bb_config)\n', (1671, 1716), False, 'from src.model.backbone import Resnet31, BasicBlock\n'), ((1729, 1763), 'tensorflow.random.normal', 'tf.random.normal', (['[10, 48, 160, 3]'], {}), '([10, 48, 160, 3])\n', (1745, 1763), True, 'import tensorflow as tf\n'), ((1863, 1900), 'anyconfig.load', 'anyconfig.load', (['"""configs/master.yaml"""'], {}), "('configs/master.yaml')\n", (1877, 1900), False, 'import anyconfig\n'), ((1914, 1939), 'easydict.EasyDict', 'easydict.EasyDict', (['config'], {}), '(config)\n', (1931, 1939), False, 'import easydict\n'), ((1953, 1987), 'tensorflow.random.normal', 'tf.random.normal', (['[10, 48, 160, 3]'], {}), '([10, 48, 160, 3])\n', (1969, 1987), True, 'import tensorflow as tf\n'), ((2076, 2116), 'src.model.model.MasterModel', 'MasterModel', (['config.model', '(10)', '(48, 160)'], {}), '(config.model, 10, (48, 160))\n', (2087, 2116), False, 'from src.model.model import MasterModel\n'), ((2133, 2198), 'tensorflow.optimizers.Adadelta', 'tf.optimizers.Adadelta', ([], {'learning_rate': '(1.0)', 'rho': '(0.9)', 'epsilon': '(1e-06)'}), '(learning_rate=1.0, rho=0.9, epsilon=1e-06)\n', (2155, 2198), True, 'import tensorflow as tf\n'), ((2565, 2638), 'anyconfig.load', 'anyconfig.load', (['"""/home/luning/dev/projects/master-tf/configs/master.yaml"""'], {}), "('/home/luning/dev/projects/master-tf/configs/master.yaml')\n", (2579, 2638), False, 'import anyconfig\n'), ((2652, 2677), 'easydict.EasyDict', 'easydict.EasyDict', (['config'], {}), '(config)\n', (2669, 2677), False, 'import easydict\n'), ((2691, 2725), 'tensorflow.random.normal', 'tf.random.normal', (['[10, 48, 160, 3]'], {}), '([10, 48, 160, 3])\n', (2707, 2725), True, 'import tensorflow as tf\n'), ((2738, 2778), 'src.model.model.MasterModel', 'MasterModel', (['config.model', '(10)', '(48, 160)'], {}), '(config.model, 10, (48, 160))\n', (2749, 2778), False, 'from src.model.model import MasterModel\n'), ((2857, 2902), 'src.dataset.utils.LabelTransformer.decode_tensor', 'data_utils.LabelTransformer.decode_tensor', (['ys'], {}), '(ys)\n', (2898, 2902), True, 'from src.dataset import utils as data_utils\n'), ((2962, 2975), 'src.model.metrics.WordAccuary', 'WordAccuary', ([], {}), '()\n', (2973, 2975), False, 'from src.model.metrics import WordAccuary\n'), ((3698, 3761), 'src.dataset.benchmark_data_generator.generator_lmdb', 'generator_lmdb', (['"""/data/ocr/reg/evaluation/IC15_2077"""'], {'rgb': '(False)'}), "('/data/ocr/reg/evaluation/IC15_2077', rgb=False)\n", (3712, 3761), False, 'from src.dataset.benchmark_data_generator import generator_lmdb\n'), ((2013, 2047), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(10, 50)'], {}), '(0, 10, (10, 50))\n', (2030, 2047), True, 'import numpy as np\n'), ((2207, 2224), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2222, 2224), True, 'import tensorflow as tf\n'), ((2302, 2388), 'tensorflow.keras.losses.sparse_categorical_crossentropy', 'tf.keras.losses.sparse_categorical_crossentropy', (['target', 'logits'], {'from_logits': '(True)'}), '(target, logits, from_logits\n =True)\n', (2349, 2388), True, 'import tensorflow as tf\n'), ((4006, 4115), 'tensorflow.lookup.KeyValueTensorInitializer', 'tf.lookup.KeyValueTensorInitializer', (['keys_tensor', 'vals_tensor'], {'key_dtype': 'tf.int32', 'value_dtype': 'tf.string'}), '(keys_tensor, vals_tensor, key_dtype=tf.\n int32, value_dtype=tf.string)\n', (4041, 4115), True, 'import tensorflow as tf\n'), ((2817, 2834), 'tensorflow.constant', 'tf.constant', (['(True)'], {}), '(True)\n', (2828, 2834), True, 'import tensorflow as tf\n'), ((3838, 3879), 'src.dataset.utils.LabelTransformer.dict.values', 'data_utils.LabelTransformer.dict.values', ([], {}), '()\n', (3877, 3879), True, 'from src.dataset import utils as data_utils\n'), ((3917, 3956), 'src.dataset.utils.LabelTransformer.dict.keys', 'data_utils.LabelTransformer.dict.keys', ([], {}), '()\n', (3954, 3956), True, 'from src.dataset import utils as data_utils\n'), ((4134, 4154), 'tensorflow.constant', 'tf.constant', (['"""<UNK>"""'], {}), "('<UNK>')\n", (4145, 4154), True, 'import tensorflow as tf\n'), ((4229, 4268), 'src.dataset.utils.LabelTransformer.dict.keys', 'data_utils.LabelTransformer.dict.keys', ([], {}), '()\n', (4266, 4268), True, 'from src.dataset import utils as data_utils\n')] |
import numpy as np
from phonopy.harmonic.displacement import get_least_displacements, \
get_displacement, directions_axis, is_minus_displacement
from anharmonic.phonon3.displacement_fc3 import get_reduced_site_symmetry, get_least_orbits, get_next_displacements, get_bond_symmetry
def direction_to_displacement(dataset,
distance,
supercell):
lattice = supercell.get_cell()
new_dataset = {}
new_dataset['natom'] = supercell.get_number_of_atoms()
new_first_atoms = []
for first_atoms in dataset:
atom1 = first_atoms['number']
direction1 = first_atoms['direction']
disp_cart1 = np.dot(direction1, lattice)
disp_cart1 *= distance / np.linalg.norm(disp_cart1)
new_second_atoms = []
for second_atom in first_atoms['second_atoms']:
atom2 = second_atom['number']
direction2 = second_atom['direction']
disp_cart2 = np.dot(direction2, lattice)
disp_cart2 *= distance / np.linalg.norm(disp_cart2)
new_third_atoms = []
for third_atom in second_atom['third_atoms']:
atom3 = third_atom['number']
for direction3 in third_atom['directions']:
disp_cart3 = np.dot(direction3, lattice)
disp_cart3 *= distance / np.linalg.norm(disp_cart3)
new_third_atoms.append({'number': atom3,
'direction': direction3,
'displacement': disp_cart3})
new_second_atoms.append({'number': atom2,
'direction': direction2,
'displacement': disp_cart2,
'third_atoms': new_third_atoms})
new_first_atoms.append({'number': atom1,
'direction': direction1,
'displacement': disp_cart1,
'second_atoms': new_second_atoms})
new_dataset['first_atoms'] = new_first_atoms
return new_dataset
def get_fourth_order_displacements(cell,
symmetry,
is_plusminus='auto',
is_diagonal=False):
# Atoms 1, 2, and 3 are defined as follows:
#
# Atom 1: The first displaced atom. Fourth order force constant
# between Atoms 1, 2, 3, and 4 is calculated.
# Atom 2: The second displaced atom. Third order force constant
# between Atoms 2, 3, and 4 is calculated.
# Atom 3: The third displaced atom. Second order force constant
# between Atoms 3 and 4 is calculated.
# Atom 4: Force is mesuared on this atom.
# Least displacements for third order force constants
#
# Data structure
# [{'number': atom1,
# 'displacement': [0.00000, 0.007071, 0.007071],
# 'second_atoms': [ {'number': atom2,
# 'displacement': [0.007071, 0.000000, 0.007071],
# 'third_atoms': [ {'number': atom3,
# 'displacements':
# [[-0.007071, 0.000000, -0.007071],
# ,...]}, {...}, ... ]},
# Least displacements of first atoms (Atom 1) are searched by
# using respective site symmetries of the original crystal.
disps_first = get_least_displacements(symmetry,
is_plusminus=is_plusminus,
is_diagonal=False)
symprec = symmetry.get_symmetry_tolerance()
dds = []
for disp in disps_first:
atom1 = disp[0]
disp1 = disp[1:4]
site_sym = symmetry.get_site_symmetry(atom1)
dds_atom1 = {'number': atom1,
'direction': disp1,
'second_atoms': []}
reduced_site_sym = get_reduced_site_symmetry(site_sym, disp1, symprec)
second_atoms = get_least_orbits(atom1,
cell,
reduced_site_sym,
symprec)
for atom2 in second_atoms:
reduced_bond_sym = get_bond_symmetry(
reduced_site_sym,
cell.get_scaled_positions(),
atom1,
atom2,
symprec)
for disp2 in _get_displacements_second(reduced_bond_sym,
symprec,
is_diagonal):
dds_atom2 = _get_second_displacements(atom2,
disp2,
cell,
reduced_bond_sym,
symprec,
is_diagonal)
dds_atom1['second_atoms'].append(dds_atom2)
dds.append(dds_atom1)
return dds
def _get_displacements_second(reduced_bond_sym,
symprec,
is_diagonal):
if is_diagonal:
disps_second = get_displacement(reduced_bond_sym)
else:
disps_second = get_displacement(reduced_bond_sym, directions_axis)
disps_second_with_minus = []
for disp2 in disps_second:
disps_second_with_minus.append(disp2)
if is_minus_displacement(disp2, reduced_bond_sym):
disps_second_with_minus.append(-disp2)
return disps_second_with_minus
def _get_second_displacements(atom2,
disp2,
cell,
reduced_bond_sym,
symprec,
is_diagonal):
positions = cell.get_scaled_positions()
dds_atom2 = {'number': atom2,
'direction': disp2,
'third_atoms': []}
reduced_bond_sym2 = get_reduced_site_symmetry(reduced_bond_sym,
disp2,
symprec)
third_atoms = get_least_orbits(atom2,
cell,
reduced_bond_sym2,
symprec)
for atom3 in third_atoms:
reduced_plane_sym = get_bond_symmetry(
reduced_bond_sym2,
cell.get_scaled_positions(),
atom2,
atom3,
symprec)
dds_atom3 = get_next_displacements(atom2,
atom3,
reduced_plane_sym,
positions,
symprec,
is_diagonal)
dds_atom2['third_atoms'].append(dds_atom3)
return dds_atom2
| [
"phonopy.harmonic.displacement.get_least_displacements",
"anharmonic.phonon3.displacement_fc3.get_next_displacements",
"anharmonic.phonon3.displacement_fc3.get_least_orbits",
"phonopy.harmonic.displacement.is_minus_displacement",
"numpy.dot",
"anharmonic.phonon3.displacement_fc3.get_reduced_site_symmetry"... | [((3531, 3610), 'phonopy.harmonic.displacement.get_least_displacements', 'get_least_displacements', (['symmetry'], {'is_plusminus': 'is_plusminus', 'is_diagonal': '(False)'}), '(symmetry, is_plusminus=is_plusminus, is_diagonal=False)\n', (3554, 3610), False, 'from phonopy.harmonic.displacement import get_least_displacements, get_displacement, directions_axis, is_minus_displacement\n'), ((6188, 6247), 'anharmonic.phonon3.displacement_fc3.get_reduced_site_symmetry', 'get_reduced_site_symmetry', (['reduced_bond_sym', 'disp2', 'symprec'], {}), '(reduced_bond_sym, disp2, symprec)\n', (6213, 6247), False, 'from anharmonic.phonon3.displacement_fc3 import get_reduced_site_symmetry, get_least_orbits, get_next_displacements, get_bond_symmetry\n'), ((6366, 6423), 'anharmonic.phonon3.displacement_fc3.get_least_orbits', 'get_least_orbits', (['atom2', 'cell', 'reduced_bond_sym2', 'symprec'], {}), '(atom2, cell, reduced_bond_sym2, symprec)\n', (6382, 6423), False, 'from anharmonic.phonon3.displacement_fc3 import get_reduced_site_symmetry, get_least_orbits, get_next_displacements, get_bond_symmetry\n'), ((684, 711), 'numpy.dot', 'np.dot', (['direction1', 'lattice'], {}), '(direction1, lattice)\n', (690, 711), True, 'import numpy as np\n'), ((4038, 4089), 'anharmonic.phonon3.displacement_fc3.get_reduced_site_symmetry', 'get_reduced_site_symmetry', (['site_sym', 'disp1', 'symprec'], {}), '(site_sym, disp1, symprec)\n', (4063, 4089), False, 'from anharmonic.phonon3.displacement_fc3 import get_reduced_site_symmetry, get_least_orbits, get_next_displacements, get_bond_symmetry\n'), ((4113, 4169), 'anharmonic.phonon3.displacement_fc3.get_least_orbits', 'get_least_orbits', (['atom1', 'cell', 'reduced_site_sym', 'symprec'], {}), '(atom1, cell, reduced_site_sym, symprec)\n', (4129, 4169), False, 'from anharmonic.phonon3.displacement_fc3 import get_reduced_site_symmetry, get_least_orbits, get_next_displacements, get_bond_symmetry\n'), ((5394, 5428), 'phonopy.harmonic.displacement.get_displacement', 'get_displacement', (['reduced_bond_sym'], {}), '(reduced_bond_sym)\n', (5410, 5428), False, 'from phonopy.harmonic.displacement import get_least_displacements, get_displacement, directions_axis, is_minus_displacement\n'), ((5462, 5513), 'phonopy.harmonic.displacement.get_displacement', 'get_displacement', (['reduced_bond_sym', 'directions_axis'], {}), '(reduced_bond_sym, directions_axis)\n', (5478, 5513), False, 'from phonopy.harmonic.displacement import get_least_displacements, get_displacement, directions_axis, is_minus_displacement\n'), ((5636, 5682), 'phonopy.harmonic.displacement.is_minus_displacement', 'is_minus_displacement', (['disp2', 'reduced_bond_sym'], {}), '(disp2, reduced_bond_sym)\n', (5657, 5682), False, 'from phonopy.harmonic.displacement import get_least_displacements, get_displacement, directions_axis, is_minus_displacement\n'), ((6758, 6850), 'anharmonic.phonon3.displacement_fc3.get_next_displacements', 'get_next_displacements', (['atom2', 'atom3', 'reduced_plane_sym', 'positions', 'symprec', 'is_diagonal'], {}), '(atom2, atom3, reduced_plane_sym, positions, symprec,\n is_diagonal)\n', (6780, 6850), False, 'from anharmonic.phonon3.displacement_fc3 import get_reduced_site_symmetry, get_least_orbits, get_next_displacements, get_bond_symmetry\n'), ((745, 771), 'numpy.linalg.norm', 'np.linalg.norm', (['disp_cart1'], {}), '(disp_cart1)\n', (759, 771), True, 'import numpy as np\n'), ((975, 1002), 'numpy.dot', 'np.dot', (['direction2', 'lattice'], {}), '(direction2, lattice)\n', (981, 1002), True, 'import numpy as np\n'), ((1040, 1066), 'numpy.linalg.norm', 'np.linalg.norm', (['disp_cart2'], {}), '(disp_cart2)\n', (1054, 1066), True, 'import numpy as np\n'), ((1296, 1323), 'numpy.dot', 'np.dot', (['direction3', 'lattice'], {}), '(direction3, lattice)\n', (1302, 1323), True, 'import numpy as np\n'), ((1369, 1395), 'numpy.linalg.norm', 'np.linalg.norm', (['disp_cart3'], {}), '(disp_cart3)\n', (1383, 1395), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 14:15:43 2021
@author: kahg8
"""
import argparse
import queue
import sys
from data_analysis import frame_generator
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
import numpy as np
import sounddevice as sd
import webrtcvad
import tensorflow as tf
from tensorflow.keras.models import load_model
from preprocessing import preprocess_live_data
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
labels = ["yes", "no", "up", "down", "left",
"right", "on", "off", "stop", "go", "zero", "one", "two", "three", "four",
"five", "six", "seven", "eight", "nine","silence","unknown"]
unknown_labels = ["bed", "bird", "cat", "dog", "happy", "house", "marvin", "sheila",
"tree","wow"]
model1 = load_model('models/bests_silence0/cnn_mfcc_30epochs_50batchsize.h5')
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument(
'channels', type=int, default=[1], nargs='*', metavar='CHANNEL',
help='input channels to plot (default: the first)')
parser.add_argument(
'-d', '--device', type=int_or_str,
help='input device (numeric ID or substring)')
parser.add_argument(
'-w', '--window', type=float, default=200, metavar='DURATION',
help='visible time slot (default: %(default)s ms)')
parser.add_argument(
'-i', '--interval', type=float, default=30,
help='minimum time between plot updates (default: %(default)s ms)')
args = parser.parse_args(remaining)
if any(c < 1 for c in args.channels):
parser.error('argument CHANNEL: must be >= 1')
mapping = [c - 1 for c in args.channels] # Channel numbers start with 1
q = queue.Queue()
q_pred = queue.Queue(maxsize=31)
q_speak = queue.Queue()
vad = webrtcvad.Vad(int(3))
speaking = False
predicted_label = [[],[20]]
def get_best_chunk(data):
best_chunk = []
best_sum = 0
for k in range(0,len(data)-16000,10):
sub = data[k:k+16000]
sum_sub = np.sum(abs(sub))
if sum_sub > best_sum:
best_sum = sum_sub
best_chunk = sub
return best_chunk
def audio_callback(indata, frames, time, status):
if status:
print(status, file=sys.stderr)
# Fancy indexing with mapping creates a (necessary!) copy:
q.put(indata[::10, mapping])
new_data = []
for elem in indata:
new_data.append(elem[0])
frames_l = frame_generator(10, np.array(new_data), 16000)
frames_l = list(frames_l)
speech = True
for frame in frames_l:
if not vad.is_speech(frame.bytes, 16000):
speech = False
break
was_speaking = False
if not q_speak.empty():
was_speaking = q_speak.get_nowait()
if speech and was_speaking :
q_speak.put_nowait(True)
elif speech and not was_speaking:
q_speak.put_nowait(True)
elif not speech and was_speaking:
data = []
for item in list(q_pred.queue):
for elem in item:
data.append( elem[0])
data += new_data
data = np.array(data)
if len(data) >=16000:
inputs = preprocess_live_data(data[:16000],16000)
prediction = model1.predict(np.array([inputs]))
# prediction2 = model2.predict(inputs)
predicted_label = np.where(prediction == np.amax(prediction))
print("Predicted : ",labels[predicted_label[1][0]])
q_pred.queue.clear()
if q_pred.full():
q_pred.get_nowait()
q_pred.put(indata.copy())
def update_plot(frame):
global plotdata
global ax
while True:
try:
data = q.get_nowait()
except queue.Empty:
break
shift = len(data)
plotdata = np.roll(plotdata, -shift, axis=0)
plotdata[-shift:, :] = data
for column, line in enumerate(lines):
line.set_ydata(plotdata[:, column])
return lines
try:
length = int(args.window * 16000 / (1000 * 10))
plotdata = np.zeros((length, len(args.channels)))
fig, ax = plt.subplots()
lines = ax.plot(plotdata)
if len(args.channels) > 1:
ax.legend(['channel {}'.format(c) for c in args.channels],
loc='lower left', ncol=len(args.channels))
ax.axis((0, len(plotdata), -1000, 1000))
ax.set_yticks([0])
ax.yaxis.grid(True)
ax.tick_params(bottom=False, top=False, labelbottom=False,
right=False, left=False, labelleft=False)
fig.tight_layout(pad=0)
label_to_plot = ax.text(3, 8,'silence', transform = ax.transAxes, style='italic',
bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 10})
stream = sd.InputStream(
device=args.device,blocksize=500, channels=max(args.channels),
samplerate=16000, callback=audio_callback,dtype=np.int16)
ani = FuncAnimation(fig, update_plot, interval=args.interval, blit=True)
with stream:
plt.show()
except Exception as e:
parser.exit(type(e).__name__ + ': ' + str(e)) | [
"numpy.roll",
"argparse.ArgumentParser",
"matplotlib.pyplot.show",
"tensorflow.config.experimental.set_memory_growth",
"matplotlib.animation.FuncAnimation",
"sounddevice.query_devices",
"preprocessing.preprocess_live_data",
"numpy.array",
"tensorflow.keras.models.load_model",
"queue.Queue",
"num... | [((435, 486), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (479, 486), True, 'import tensorflow as tf\n'), ((861, 929), 'tensorflow.keras.models.load_model', 'load_model', (['"""models/bests_silence0/cnn_mfcc_30epochs_50batchsize.h5"""'], {}), "('models/bests_silence0/cnn_mfcc_30epochs_50batchsize.h5')\n", (871, 929), False, 'from tensorflow.keras.models import load_model\n'), ((1095, 1134), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (1118, 1134), False, 'import argparse\n'), ((1377, 1498), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'parents': '[parser]'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter, parents=[parser])\n', (1400, 1498), False, 'import argparse\n'), ((2259, 2272), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (2270, 2272), False, 'import queue\n'), ((2282, 2305), 'queue.Queue', 'queue.Queue', ([], {'maxsize': '(31)'}), '(maxsize=31)\n', (2293, 2305), False, 'import queue\n'), ((2316, 2329), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (2327, 2329), False, 'import queue\n'), ((510, 561), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (550, 561), True, 'import tensorflow as tf\n'), ((4769, 4783), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4781, 4783), True, 'import matplotlib.pyplot as plt\n'), ((5544, 5610), 'matplotlib.animation.FuncAnimation', 'FuncAnimation', (['fig', 'update_plot'], {'interval': 'args.interval', 'blit': '(True)'}), '(fig, update_plot, interval=args.interval, blit=True)\n', (5557, 5610), False, 'from matplotlib.animation import FuncAnimation\n'), ((1329, 1347), 'sounddevice.query_devices', 'sd.query_devices', ([], {}), '()\n', (1345, 1347), True, 'import sounddevice as sd\n'), ((3012, 3030), 'numpy.array', 'np.array', (['new_data'], {}), '(new_data)\n', (3020, 3030), True, 'import numpy as np\n'), ((4465, 4498), 'numpy.roll', 'np.roll', (['plotdata', '(-shift)'], {'axis': '(0)'}), '(plotdata, -shift, axis=0)\n', (4472, 4498), True, 'import numpy as np\n'), ((5636, 5646), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5644, 5646), True, 'import matplotlib.pyplot as plt\n'), ((3706, 3720), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3714, 3720), True, 'import numpy as np\n'), ((3773, 3814), 'preprocessing.preprocess_live_data', 'preprocess_live_data', (['data[:16000]', '(16000)'], {}), '(data[:16000], 16000)\n', (3793, 3814), False, 'from preprocessing import preprocess_live_data\n'), ((3884, 3902), 'numpy.array', 'np.array', (['[inputs]'], {}), '([inputs])\n', (3892, 3902), True, 'import numpy as np\n'), ((4020, 4039), 'numpy.amax', 'np.amax', (['prediction'], {}), '(prediction)\n', (4027, 4039), True, 'import numpy as np\n')] |
from typing import Dict
import torch
import numpy as np
from catalyst.dl.core import Callback, RunnerState, CallbackOrder
import cv2
from collections import OrderedDict
def calculate_confusion_matrix_from_arrays(
prediction: np.array, ground_truth: np.array, num_classes: int
) -> np.array:
"""Calculate confusion matrix for a given set of classes.
if GT value is outside of the [0, num_classes) it is excluded.
Args:
prediction:
ground_truth:
num_classes:
Returns:
"""
# a long 2xn array with each column being a pixel pair
replace_indices = np.vstack((ground_truth.flatten(), prediction.flatten()))
valid_index = replace_indices[0, :] < num_classes
replace_indices = replace_indices[:, valid_index].T
# add up confusion matrix
confusion_matrix, _ = np.histogramdd(
replace_indices,
bins=(num_classes, num_classes),
range=[(0, num_classes), (0, num_classes)],
)
return confusion_matrix.astype(np.uint64)
def get_confusion_matrix(y_pred_logits: torch.Tensor, y_true: torch.Tensor):
num_classes = y_pred_logits.shape[1]
y_pred = torch.argmax(y_pred_logits, dim=1)
ground_truth = y_true.cpu().numpy()
prediction = y_pred.cpu().numpy()
return calculate_confusion_matrix_from_arrays(ground_truth, prediction, num_classes)
def calculate_tp_fp_fn(confusion_matrix):
true_positives = {}
false_positives = {}
false_negatives = {}
for index in range(confusion_matrix.shape[0]):
true_positives[index] = confusion_matrix[index, index]
false_positives[index] = (
confusion_matrix[:, index].sum() - true_positives[index]
)
false_negatives[index] = (
confusion_matrix[index, :].sum() - true_positives[index]
)
return {
"true_positives": true_positives,
"false_positives": false_positives,
"false_negatives": false_negatives,
}
def calculate_dice(tp_fp_fn_dict):
epsilon = 1e-7
dice = {}
for i in range(len(tp_fp_fn_dict["true_positives"])):
tp = tp_fp_fn_dict["true_positives"][i]
fp = tp_fp_fn_dict["false_positives"][i]
fn = tp_fp_fn_dict["true_positives"][i]
dice[i] = (2 * tp + epsilon) / (2 * tp + fp + fn + epsilon)
if not 0 <= dice[i] <= 1:
raise ValueError()
return dice
class MulticlassDiceMetricCallback(Callback):
def __init__(
self,
prefix: str = "dice",
input_key: str = "targets",
output_key: str = "logits",
**metric_params,
):
super().__init__(CallbackOrder.Metric)
self.prefix = prefix
self.input_key = input_key
self.output_key = output_key
self.metric_params = metric_params
self.confusion_matrix = None
self.class_names = metric_params[
"class_names"
] # dictionary {class_id: class_name}
self.class_prefix = metric_params["class_prefix"]
def _reset_stats(self):
self.confusion_matrix = None
def on_batch_end(self, state: RunnerState):
outputs = state.output[self.output_key]
targets = state.input[self.input_key]
confusion_matrix = get_confusion_matrix(outputs, targets)
if self.confusion_matrix is None:
self.confusion_matrix = confusion_matrix
else:
self.confusion_matrix += confusion_matrix
def on_loader_end(self, state: RunnerState):
tp_fp_fn_dict = calculate_tp_fp_fn(self.confusion_matrix)
batch_metrics: Dict = calculate_dice(tp_fp_fn_dict)
for metric_id, dice_value in batch_metrics.items():
if metric_id not in self.class_names:
continue
metric_name = self.class_names[metric_id]
state.metrics.epoch_values[state.loader_name][
f"{self.class_prefix}_{metric_name}"
] = dice_value
state.metrics.epoch_values[state.loader_name]["mean"] = np.mean(
[x for x in batch_metrics.values()]
)
self._reset_stats()
class CustomSegmentationInferCallback(Callback):
def __init__(self, return_valid: bool = False):
super().__init__(CallbackOrder.Internal)
self.valid_masks = []
self.probabilities = np.zeros((2220, 350, 525))
self.return_valid = return_valid
def on_batch_end(self, state: RunnerState):
image, mask = state.input
output = state.output["logits"]
if self.return_valid:
for m in mask:
if m.shape != (350, 525):
m = cv2.resize(m, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)
self.valid_masks.append(m)
for j, probability in enumerate(output):
if probability.shape != (350, 525):
probability = cv2.resize(
probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR
)
self.probabilities[j, :, :] = probability
| [
"cv2.resize",
"numpy.zeros",
"numpy.histogramdd",
"torch.argmax"
] | [((827, 939), 'numpy.histogramdd', 'np.histogramdd', (['replace_indices'], {'bins': '(num_classes, num_classes)', 'range': '[(0, num_classes), (0, num_classes)]'}), '(replace_indices, bins=(num_classes, num_classes), range=[(0,\n num_classes), (0, num_classes)])\n', (841, 939), True, 'import numpy as np\n'), ((1146, 1180), 'torch.argmax', 'torch.argmax', (['y_pred_logits'], {'dim': '(1)'}), '(y_pred_logits, dim=1)\n', (1158, 1180), False, 'import torch\n'), ((4322, 4348), 'numpy.zeros', 'np.zeros', (['(2220, 350, 525)'], {}), '((2220, 350, 525))\n', (4330, 4348), True, 'import numpy as np\n'), ((4871, 4944), 'cv2.resize', 'cv2.resize', (['probability'], {'dsize': '(525, 350)', 'interpolation': 'cv2.INTER_LINEAR'}), '(probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)\n', (4881, 4944), False, 'import cv2\n'), ((4636, 4699), 'cv2.resize', 'cv2.resize', (['m'], {'dsize': '(525, 350)', 'interpolation': 'cv2.INTER_LINEAR'}), '(m, dsize=(525, 350), interpolation=cv2.INTER_LINEAR)\n', (4646, 4699), False, 'import cv2\n')] |
import torch
from torch import nn
import gym
from gym.spaces import Box, Discrete, Space
from copy import copy, deepcopy
import numpy as np
from typing import Optional, Union, Iterable, List, Dict, Tuple, Any
from numbers import Real, Integral
from .runningstat import RunningStat
from .misc import (
fill_parameters,
get_parameter_vector,
positive_int_or_none,
positive_int,
positive_float,
get_env_spaces,
get_1D_box_length,
get_action_space_length
)
ParamVector = Union[List[Real], np.ndarray]
Action = Union[List[Real], np.ndarray, Integral]
class Policy:
"""Base class for a policy."""
def __init__(self,
*,
env_name: str,
env_config: Optional[dict]=None,
observation_normalization: bool=True,
seed: Optional[Integral]=None):
"""``__init__(...)``: Initialize the policy object.
The initializer must be called from the initializer
of the inheriting classes.
Args:
env_name: Expected as a string specifying the gym
environment ID (e.g. 'Humanoid-v2').
env_config: Expected as None, or as a dictionary
containing the keyword arguments to be passed
to ``gym.make`` when creating the environment.
observation_normalization: Expected as boolean,
specifying whether or not the observations
are to be normalized.
seed: Expected as None or as an integer.
Pass here an integer for explicitly setting a
random seed for the stochastic operations of
the gym environment.
"""
self._policy: nn.Module
if bool(observation_normalization):
self._main_obs_stats = RunningStat()
self._collected_obs_stats = RunningStat()
else:
self._main_obs_stats = None
self._collected_obs_stats = None
if not isinstance(env_name, str):
raise TypeError(
"Environment name was expected as an str,"
+ " but it was received as: "
+ repr(env_name)
)
self._env_name = env_name
if env_config is None:
self._env_config = {}
else:
self._env_config = env_config
self._env: Optional[gym.Env] = None
self._observation_space, self._action_space = (
get_env_spaces(self._env_name, self._env_config)
)
self._seed = seed
self._collect_obs_stats = True
self.notes: Any = None
def _get_env(self) -> gym.Env:
if self._env is None:
self._env = gym.make(self._env_name, **(self._env_config))
if self._seed is not None:
self._env.seed(self._seed)
return self._env
def __getstate__(self):
state = {"_env": None}
for k, v in self.__dict__.items():
if k != "_env":
state[k] = v
return state
def __setstate__(self, state):
state: dict
for k, v in state.items():
self.__dict__[k] = v
def _use_policy(self, observation: Iterable[Real]) -> Action:
x = torch.as_tensor(observation, dtype=torch.float32)
with torch.no_grad():
action = self._policy(x).numpy()
if isinstance(self._action_space, Box):
action = np.clip(
action,
self._action_space.low,
self._action_space.high
)
elif isinstance(self._action_space, Discrete):
action = np.argmax(action)
else:
raise TypeError(
"Cannot work with this action space: "
+ repr(self._action_space)
)
return action
def run(self,
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> Tuple[float, int]:
"""Run an episode.
Args:
decrease_rewards_by: The reward at each timestep will be
decreased by this given amount.
max_episode_length: The maximum number of interactions
allowed in an episode.
Returns:
A tuple (cumulative_reward, number_of_interactions).
"""
max_episode_length = positive_int_or_none(max_episode_length)
def normalized(obs):
if self._main_obs_stats is not None:
if self._collect_obs_stats:
self._main_obs_stats.update(obs)
self._collected_obs_stats.update(obs)
return self._main_obs_stats.normalize(obs)
else:
return obs
t = 0
cumulative_reward = 0.0
env = self._get_env()
observation = env.reset()
observation = normalized(observation)
while True:
action = self._use_policy(observation)
observation, reward, done, info = env.step(action)
observation = normalized(observation)
t += 1
reward -= decrease_rewards_by
cumulative_reward += reward
if max_episode_length is not None and t > max_episode_length:
break
if done:
break
return cumulative_reward, t
def set_params_and_run(self,
policy_parameters: ParamVector,
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> (
Tuple[float, int]):
"""Set the the parameters of the policy by copying them
from the given parameter vector, then run an episode.
Args:
policy_parameters: The policy parameters to be used.
decrease_rewards_by: The reward at each timestep will be
decreased by this given amount.
max_episode_length: The maximum number of interactions
allowed in an episode.
Returns:
A tuple (cumulative_reward, number_of_interactions).
"""
self.set_parameters(policy_parameters)
return self.run(
decrease_rewards_by=decrease_rewards_by,
max_episode_length=max_episode_length
)
def _run_from_list(self,
policy_param_list: List[ParamVector],
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> (
List[Tuple[float, int]]):
results = []
for policy_params in policy_param_list:
results.append(
self.set_params_and_run(
policy_params,
decrease_rewards_by=decrease_rewards_by,
max_episode_length=max_episode_length
)
)
return results
def _run_from_dict(self,
policy_param_dict: Dict[Any, ParamVector],
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> (
Dict[Any, Tuple[float, int]]):
results = {}
for policy_key, policy_params in policy_param_dict.items():
results[policy_key] = (
self.set_params_and_run(
policy_params,
decrease_rewards_by=decrease_rewards_by,
max_episode_length=max_episode_length
)
)
return results
def set_params_and_run_all(self,
policy_params_all: Union[
List[ParamVector],
Dict[Any, ParamVector]
],
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> (
Union[
List[Tuple[float, int]],
Dict[Any, Tuple[float, int]]
]
):
"""For each of the items in the given parameters dictionary,
set the the parameters of the policy by copying them
from the given parameter vector, then run an episode.
Args:
policy_params_all: A dictionary, mapping a policy identifier
to a policy parameter vector.
For example, the policy identifier here could possibly
be an integer specifying the index of the
parameter vector within a batch of parameter vectors.
decrease_rewards_by: The reward at each timestep will be
decreased by this given amount.
max_episode_length: The maximum number of interactions
allowed in an episode.
Returns:
A dictionary where each item maps the policy identifier key
to a tuple (cumulative_reward, number_of_interactions).
"""
kwargs = dict(
decrease_rewards_by=decrease_rewards_by,
max_episode_length=max_episode_length
)
received_dict = (
hasattr(policy_params_all, "keys")
and hasattr(policy_params_all, "values")
)
if received_dict:
return self._run_from_dict(policy_params_all, **kwargs)
else:
return self._run_from_list(policy_params_all, **kwargs)
def set_parameters(self, parameters: ParamVector):
"""Set the parameters of the policy by copying the values
from the given parameter vector.
Args:
parameters: The parameter vector.
"""
#x = torch.as_tensor(parameters, dtype=torch.float32)
if isinstance(parameters, np.ndarray):
parameters = parameters.copy()
x = torch.as_tensor(parameters, dtype=torch.float32)
fill_parameters(self._policy, x)
def get_parameters(self) -> np.ndarray:
"""Get the parameters of the policy as a 1-D numpy array.
Returns:
The parameter vector.
"""
return get_parameter_vector(self._policy).numpy()
def pop_collected_obs_stats(self) -> RunningStat:
"""Get the collected observation statistics.
When this method is called, the contained collected
statistics are removed.
Returns:
The collected observation statistics.
"""
if self._collected_obs_stats is None:
raise ValueError(
"Observation stats are not configured to be collected,"
" therefore, they cannot be popped."
)
result = self._collected_obs_stats
self._collected_obs_stats = RunningStat()
return result
def set_main_obs_stats(self, obs_stats: RunningStat):
"""Set the observation statistics to be used for
observation normalization.
Args:
obs_stats: A RunningStat object containing the statistics.
"""
if obs_stats is None:
raise ValueError(
"The main observation stats cannot be given as None."
)
self._main_obs_stats = deepcopy(obs_stats)
def get_main_obs_stats(self) -> Optional[RunningStat]:
"""Get the observation statistics used for
observation normalization.
Returns:
A RunningStat object containing the statistics.
"""
return self._main_obs_stats
def update_main_obs_stats(self, obs_stats: Union[RunningStat, np.ndarray]):
"""Update the observation statistics used for
observation normalization.
Args:
obs_stats: A RunningStat object or a numpy array
(a numpy array representing a single observation vector).
"""
if self._main_obs_stats is None:
raise ValueError(
"There is no observation stats to update."
+ " Was "
+ repr(self)
+ " initialized with observation_normalization=False?"
)
self._main_obs_stats.update(obs_stats)
def get_parameters_count(self) -> int:
"""Get the number of parameters of the policy
(also corresponds to the length of parameter vector).
"""
return len(self.get_parameters())
def get_collect_obs_stats(self) -> bool:
"""Get, as boolean, whether or not the policy is configured
to collect observation statistics when running episodes.
Returns:
A boolean.
"""
return self._collect_obs_stats
def set_collect_obs_stats(self, b: bool):
"""Set, as boolean, whether or not the policy is to collect
observation statistics when running episodes.
Args:
b: A boolean.
"""
self._collect_obs_stats = bool(b)
class LinearPolicy(Policy):
"""A linear policy."""
def __init__(self,
*,
env_name: str,
env_config: Optional[dict]=None,
observation_normalization: bool=True,
seed: Optional[Integral]=None,
bias: bool=True):
"""``__init__(...)``: Initialize the linear policy.
Args:
env_name: Expected as a string specifying the gym
environment ID (e.g. 'Humanoid-v2').
env_config: Expected as None, or as a dictionary
containing the keyword arguments to be passed
to ``gym.make`` when creating the environment.
observation_normalization: Expected as boolean,
specifying whether or not the observations
are to be normalized.
seed: Expected as None or as an integer.
Pass here an integer for explicitly setting a
random seed for the stochastic operations of
the gym environment.
bias: Expected as a boolean, specifying whether or
not the linear policy will have bias parameters.
"""
Policy.__init__(
self,
env_name=env_name,
env_config=env_config,
observation_normalization=observation_normalization,
seed=seed
)
obs_length = get_1D_box_length(self._observation_space)
act_length = get_action_space_length(self._action_space)
self._policy = nn.Linear(obs_length, act_length, bias=bias)
class MLPPolicy(Policy):
"""A multi-layer perceptron policy."""
ACTIVATION_CLS = {
"tanh": nn.Tanh,
"relu": nn.ReLU
}
def __init__(self,
*,
env_name: str,
env_config: Optional[dict]=None,
observation_normalization: bool=True,
seed: Optional[Integral]=None,
hidden_size: Integral=64,
num_hidden: Integral=1,
hidden_activation: str="tanh",
output_activation: Optional[str]=None):
"""
Args:
env_name: Expected as a string specifying the gym
environment ID (e.g. 'Humanoid-v2').
env_config: Expected as None, or as a dictionary
containing the keyword arguments to be passed
to ``gym.make`` when creating the environment.
observation_normalization: Expected as boolean,
specifying whether or not the observations
are to be normalized.
seed: Expected as None or as an integer.
Pass here an integer for explicitly setting a
random seed for the stochastic operations of
the gym environment.
hidden_size: Expected as an integer, specifying
the number of neurons in a hidden layer.
num_hidden: Expected as an integer, specifying
the number of hidden layers.
hidden_activation: The activation function to be
used by the hidden layer(s).
Expected as 'tanh' or 'relu'.
output_activation: Optional. The activation function
to be used by the output layer.
Can be given as 'tanh' or 'relu', or can be left
as None.
"""
Policy.__init__(
self,
env_name=env_name,
env_config=env_config,
observation_normalization=observation_normalization,
seed=seed
)
obs_length = get_1D_box_length(self._observation_space)
act_length = get_action_space_length(self._action_space)
hidden_size = positive_int(hidden_size)
num_hidden = positive_int(num_hidden)
if hidden_activation is None:
hidden_act_cls = None
else:
hidden_act_cls = self.ACTIVATION_CLS[hidden_activation]
if output_activation is None:
output_act_cls = None
else:
output_act_cls = self.ACTIVATION_CLS[output_activation]
layers = []
# first hidden layer
layers.append(nn.Linear(obs_length, hidden_size))
if hidden_act_cls is not None:
layers.append(hidden_act_cls())
# rest of the hidden layers (if any)
for _ in range(1, num_hidden):
layers.append(nn.Linear(hidden_size, hidden_size))
if hidden_act_cls is not None:
layers.append(hidden_act_cls())
# output layer
layers.append(nn.Linear(hidden_size, act_length))
if output_act_cls is not None:
layers.append(output_act_cls())
self._policy = nn.Sequential(*layers)
| [
"numpy.clip",
"torch.as_tensor",
"torch.nn.Sequential",
"numpy.argmax",
"torch.nn.Linear",
"copy.deepcopy",
"torch.no_grad",
"gym.make"
] | [((3263, 3312), 'torch.as_tensor', 'torch.as_tensor', (['observation'], {'dtype': 'torch.float32'}), '(observation, dtype=torch.float32)\n', (3278, 3312), False, 'import torch\n'), ((10182, 10230), 'torch.as_tensor', 'torch.as_tensor', (['parameters'], {'dtype': 'torch.float32'}), '(parameters, dtype=torch.float32)\n', (10197, 10230), False, 'import torch\n'), ((11540, 11559), 'copy.deepcopy', 'deepcopy', (['obs_stats'], {}), '(obs_stats)\n', (11548, 11559), False, 'from copy import copy, deepcopy\n'), ((14797, 14841), 'torch.nn.Linear', 'nn.Linear', (['obs_length', 'act_length'], {'bias': 'bias'}), '(obs_length, act_length, bias=bias)\n', (14806, 14841), False, 'from torch import nn\n'), ((18056, 18078), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (18069, 18078), False, 'from torch import nn\n'), ((2733, 2777), 'gym.make', 'gym.make', (['self._env_name'], {}), '(self._env_name, **self._env_config)\n', (2741, 2777), False, 'import gym\n'), ((3326, 3341), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3339, 3341), False, 'import torch\n'), ((3457, 3521), 'numpy.clip', 'np.clip', (['action', 'self._action_space.low', 'self._action_space.high'], {}), '(action, self._action_space.low, self._action_space.high)\n', (3464, 3521), True, 'import numpy as np\n'), ((17509, 17543), 'torch.nn.Linear', 'nn.Linear', (['obs_length', 'hidden_size'], {}), '(obs_length, hidden_size)\n', (17518, 17543), False, 'from torch import nn\n'), ((17913, 17947), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'act_length'], {}), '(hidden_size, act_length)\n', (17922, 17947), False, 'from torch import nn\n'), ((3660, 3677), 'numpy.argmax', 'np.argmax', (['action'], {}), '(action)\n', (3669, 3677), True, 'import numpy as np\n'), ((17739, 17774), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (17748, 17774), False, 'from torch import nn\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.