code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from Track import Track
import numpy as np
import math
def angle_between(vector_1, vector_2):
"""Get the angle between 2 vectors"""
unit_vector_1 = vector_1 / np.linalg.norm(vector_1)
unit_vector_2 = vector_2 / np.linalg.norm(vector_2)
dot_product = np.dot(unit_vector_1, unit_vector_2)
angle = np.arccos(dot_product)
if math.isnan(angle):
return 0
else:
return angle
def calc_mse(real, pred):
"""Calculate the Mean Squared Error"""
return np.square(np.subtract(real,pred)).mean()
class TrackPhiEtaFinderDevSingh():
def find_phi_eta(self, track):
# Get data into numpy format
vertex = np.array(track.vertex)
data = np.array(list(map(lambda point: np.array([point.x, point.y, point.z]), track.points)))
x_data = data.T[0]
y_data = data.T[1]
# Calculate etas
thetas = []
for vec in data:
# vector in form <x, y, z>, find angle to z-axis and adjust for vertex
theta = angle_between(np.array([vec[0], vec[1], vec[2] - vertex]), np.array([0,0,1]))
thetas.append(theta)
thetas = np.array(thetas)
# Calculate phis
phis = []
for vec in data:
# vector in form <x, y>, find angle to x-axis
phi = angle_between(np.array([vec[0], vec[1]]), np.array([1,0]))
phis.append(phi)
# Choose best measurement to report
phi = phis[0]
theta = np.array(thetas).mean() # I don't know why but theta is consistently more accurate when using mean rather than first element
# Not using linear interpolations because it adds uncertainty if the vertex
# is too far away from known values, and it doesn't bring much benefit to error
# Calculate eta using formula
eta = -1 * np.log(np.tan(theta/2))
# Account for range of arccos() not including Quadrants 3 and 4
# Replace negative values with their positive equivalents
if (np.average(x_data[0]) < 0 and np.average(y_data) < 0 and phi != 0):
phi += np.pi
if (np.average(x_data) > 0 and np.average(y_data) < 0 and phi != 0):
phi = -1 * phi
while phi < 0:
phi += 2 * np.pi
return phi, eta
| [
"numpy.arccos",
"numpy.tan",
"numpy.average",
"numpy.subtract",
"numpy.array",
"numpy.dot",
"numpy.linalg.norm",
"math.isnan"
] | [((275, 311), 'numpy.dot', 'np.dot', (['unit_vector_1', 'unit_vector_2'], {}), '(unit_vector_1, unit_vector_2)\n', (281, 311), True, 'import numpy as np\n'), ((325, 347), 'numpy.arccos', 'np.arccos', (['dot_product'], {}), '(dot_product)\n', (334, 347), True, 'import numpy as np\n'), ((356, 373), 'math.isnan', 'math.isnan', (['angle'], {}), '(angle)\n', (366, 373), False, 'import math\n'), ((174, 198), 'numpy.linalg.norm', 'np.linalg.norm', (['vector_1'], {}), '(vector_1)\n', (188, 198), True, 'import numpy as np\n'), ((231, 255), 'numpy.linalg.norm', 'np.linalg.norm', (['vector_2'], {}), '(vector_2)\n', (245, 255), True, 'import numpy as np\n'), ((698, 720), 'numpy.array', 'np.array', (['track.vertex'], {}), '(track.vertex)\n', (706, 720), True, 'import numpy as np\n'), ((1190, 1206), 'numpy.array', 'np.array', (['thetas'], {}), '(thetas)\n', (1198, 1206), True, 'import numpy as np\n'), ((524, 547), 'numpy.subtract', 'np.subtract', (['real', 'pred'], {}), '(real, pred)\n', (535, 547), True, 'import numpy as np\n'), ((1074, 1117), 'numpy.array', 'np.array', (['[vec[0], vec[1], vec[2] - vertex]'], {}), '([vec[0], vec[1], vec[2] - vertex])\n', (1082, 1117), True, 'import numpy as np\n'), ((1119, 1138), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1127, 1138), True, 'import numpy as np\n'), ((1372, 1398), 'numpy.array', 'np.array', (['[vec[0], vec[1]]'], {}), '([vec[0], vec[1]])\n', (1380, 1398), True, 'import numpy as np\n'), ((1400, 1416), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (1408, 1416), True, 'import numpy as np\n'), ((1544, 1560), 'numpy.array', 'np.array', (['thetas'], {}), '(thetas)\n', (1552, 1560), True, 'import numpy as np\n'), ((1912, 1929), 'numpy.tan', 'np.tan', (['(theta / 2)'], {}), '(theta / 2)\n', (1918, 1929), True, 'import numpy as np\n'), ((2084, 2105), 'numpy.average', 'np.average', (['x_data[0]'], {}), '(x_data[0])\n', (2094, 2105), True, 'import numpy as np\n'), ((2115, 2133), 'numpy.average', 'np.average', (['y_data'], {}), '(y_data)\n', (2125, 2133), True, 'import numpy as np\n'), ((2193, 2211), 'numpy.average', 'np.average', (['x_data'], {}), '(x_data)\n', (2203, 2211), True, 'import numpy as np\n'), ((2220, 2238), 'numpy.average', 'np.average', (['y_data'], {}), '(y_data)\n', (2230, 2238), True, 'import numpy as np\n'), ((769, 806), 'numpy.array', 'np.array', (['[point.x, point.y, point.z]'], {}), '([point.x, point.y, point.z])\n', (777, 806), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.ensemble import GradientBoostingClassifier
#--This code evaluates how well publicly available data (14-day case data)
#--predicts the risk of a country, i.e. whether its true prevalence
#-- exceeds its median true prevalence over the summer. Intuitively, if a
#--country's public data lags the true prevalence of travelers
#--by l days, then we should be able to predict it's risk status using case
#--data l days into the future. This code produces for every country and every
#--lag the achieved AUC.
#--The train_evaluate function expects as input the 14 day case data as well
#--as the target variables, trains a model on the training data and returns
#--the off-sample AUC.
def train_evaluate(X, y):
#--defining the chosen model--
model=GradientBoostingClassifier()
#--splitting data into training and testing. Split is done by time since
#--we are working with timeseries data.
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.5, shuffle=False)
#--training model on training data
clf=model.fit(X_train, y_train)
#--evaluating model on test data
y_pred=clf.predict_proba(X_test)
#--obtaining false positive and false negative rate
fpr, tpr, threshold = metrics.roc_curve(y_test, y_pred[:,0], pos_label=False)
#--returning achieved AUC for the given X,y pair.
return metrics.auc(fpr, tpr)
#Download publicly reported case data from OWID
url="https://covid.ourworldindata.org/data/owid-covid-data.csv"
df = pd.read_csv(url)
df=df.drop_duplicates()
#--Transforming 3 digit ISO codes to 2 digit equivalents
url="../OtherData/short_to_iso.csv"
short_to_iso=pd.read_csv(url,error_bad_lines=False)
df=pd.merge(df,short_to_iso,how='left', left_on='iso_code', right_on='alpha-3')
#--Loading Estimates of true Prevalence----
estimates=pd.read_csv('../OPE_outputs/ope_dat_TRUE_Window_3_MinTest_30_SmoothPrior_TRUE_2001_0.9.csv');
estimates=estimates.sort_values('eb_type').groupby(['date_entry','country']).tail(1)
#--Adding prevalence estimates on public data-----------
df=pd.merge(df,estimates,how='left',left_on=['alpha-2','date'] ,\
right_on=['country','date_entry'])
df=df.sort_values(by=['alpha-2','date'])
df['date']=pd.to_datetime(df['date'])
df=df.drop_duplicates()
#--Producing List of all countries for which data is available
countries=df['alpha-2']
countries=countries.drop_duplicates()
#--Reading list of Non-Blacklisted countries
url="../OtherData/countries_allowed.csv"
allow=pd.read_csv(url, header=None)[0].values.tolist()
grey=['ES','BE','MT','BG','RO','GR','CZ','SE','NL']
#--Focusing on allowed countries that are not greylisted, since for the latter
#--measured prevalence is inaccurate during the period when they were greylisted
considered_countries=set(countries)-set(grey)
#--Initializing lags vector and result dataframe
lags=[];
info=pd.DataFrame(columns=['Country','Lag','AUC'])
maximizers=[];
country_list=[]
lags=[]
crap=[]
lowess = sm.nonparametric.lowess
maxxx=0
decent=[]
#--For each country
for country in considered_countries:
res=[] #--KIMON check
this_country=df[df['alpha-2']==country]
#--for each country we require that we have enough data to produce
#--prevalence estimates for at least 70 days. This is in order to exclude
#--countries with very rare arrivals.
valid_eb=this_country[this_country['eb_prev'].notnull()]
enough_tests=(len(valid_eb)>70);
if enough_tests:
#--for each lag up to 20 days
for lag in range(0,20):
#--we produce the X: cases y:risk status
X=[];
y=[];
for index,row in valid_eb.iterrows():
date=row['date']
moved_dates=pd.date_range(date-pd.Timedelta(14-lag,unit='D'),\
date+pd.Timedelta(lag,unit='D'))
cases=this_country[np.isin(this_country['date'], moved_dates)]\
['new_cases_smoothed_per_million'].values.tolist()
X.append(cases)
y.append(valid_eb[valid_eb['date']==date].iloc[0]['eb_prev'])
X=np.array(X)
y=np.array(y)
#--A country is defined to be risky on a given day if it's
#--prevalence is higher it's median during the summer
y=(y>np.median(y))
#--For the given country and lag we train and evaluate a model that
#--classifies risky or not and adding resulting AUC to result.
country_lag_auc=train_evaluate(X, y)
info=info.append({'Country':country, 'Lag':lag, \
'AUC': max(country_lag_auc,1-country_lag_auc)},ignore_index=True)
#--Producing Country Lag AUC profile to be used in clustering
info.to_csv("../OtherData/country_lag_auc_profile.csv")
| [
"numpy.median",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.auc",
"pandas.merge",
"pandas.Timedelta",
"numpy.isin",
"numpy.array",
"sklearn.metrics.roc_curve",
"pandas.DataFrame",
"sklearn.ensemble.GradientBoostingClassifier",
"pandas.to_datetime"
] | [((1642, 1658), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (1653, 1658), True, 'import pandas as pd\n'), ((1792, 1831), 'pandas.read_csv', 'pd.read_csv', (['url'], {'error_bad_lines': '(False)'}), '(url, error_bad_lines=False)\n', (1803, 1831), True, 'import pandas as pd\n'), ((1834, 1912), 'pandas.merge', 'pd.merge', (['df', 'short_to_iso'], {'how': '"""left"""', 'left_on': '"""iso_code"""', 'right_on': '"""alpha-3"""'}), "(df, short_to_iso, how='left', left_on='iso_code', right_on='alpha-3')\n", (1842, 1912), True, 'import pandas as pd\n'), ((1967, 2069), 'pandas.read_csv', 'pd.read_csv', (['"""../OPE_outputs/ope_dat_TRUE_Window_3_MinTest_30_SmoothPrior_TRUE_2001_0.9.csv"""'], {}), "(\n '../OPE_outputs/ope_dat_TRUE_Window_3_MinTest_30_SmoothPrior_TRUE_2001_0.9.csv'\n )\n", (1978, 2069), True, 'import pandas as pd\n'), ((2208, 2313), 'pandas.merge', 'pd.merge', (['df', 'estimates'], {'how': '"""left"""', 'left_on': "['alpha-2', 'date']", 'right_on': "['country', 'date_entry']"}), "(df, estimates, how='left', left_on=['alpha-2', 'date'], right_on=[\n 'country', 'date_entry'])\n", (2216, 2313), True, 'import pandas as pd\n'), ((2370, 2396), 'pandas.to_datetime', 'pd.to_datetime', (["df['date']"], {}), "(df['date'])\n", (2384, 2396), True, 'import pandas as pd\n'), ((3013, 3060), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Country', 'Lag', 'AUC']"}), "(columns=['Country', 'Lag', 'AUC'])\n", (3025, 3060), True, 'import pandas as pd\n'), ((922, 950), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {}), '()\n', (948, 950), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((1107, 1159), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.5)', 'shuffle': '(False)'}), '(X, y, test_size=0.5, shuffle=False)\n', (1123, 1159), False, 'from sklearn.model_selection import train_test_split\n'), ((1380, 1436), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['y_test', 'y_pred[:, 0]'], {'pos_label': '(False)'}), '(y_test, y_pred[:, 0], pos_label=False)\n', (1397, 1436), False, 'from sklearn import metrics\n'), ((1498, 1519), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (1509, 1519), False, 'from sklearn import metrics\n'), ((4284, 4295), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (4292, 4295), True, 'import numpy as np\n'), ((4310, 4321), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4318, 4321), True, 'import numpy as np\n'), ((2641, 2670), 'pandas.read_csv', 'pd.read_csv', (['url'], {'header': 'None'}), '(url, header=None)\n', (2652, 2670), True, 'import pandas as pd\n'), ((4477, 4489), 'numpy.median', 'np.median', (['y'], {}), '(y)\n', (4486, 4489), True, 'import numpy as np\n'), ((3902, 3934), 'pandas.Timedelta', 'pd.Timedelta', (['(14 - lag)'], {'unit': '"""D"""'}), "(14 - lag, unit='D')\n", (3914, 3934), True, 'import pandas as pd\n'), ((3981, 4008), 'pandas.Timedelta', 'pd.Timedelta', (['lag'], {'unit': '"""D"""'}), "(lag, unit='D')\n", (3993, 4008), True, 'import pandas as pd\n'), ((4044, 4086), 'numpy.isin', 'np.isin', (["this_country['date']", 'moved_dates'], {}), "(this_country['date'], moved_dates)\n", (4051, 4086), True, 'import numpy as np\n')] |
# coding: utf-8
"""
target captcha url: http://www.miitbeian.gov.cn/getVerifyCode?4
"""
import json
from captcha_utils.capthafactory import CaptchaFactory
import numpy as np
def custom_fn(single_char):
# do something
# return single_char.filter(ImageFilter.GaussianBlur)
return single_char
def bg_custom_fn(bg):
# do something
# return bg.filter(ImageFilter.GaussianBlur)
return bg
def main():
project_name = "icp"
with open("configs/icp.json", encoding="utf-8") as fp:
demo_config = json.load(fp)
# with open("configs/char/specific_chars.json", encoding="utf-8") as fp:
# specific = json.load(fp)
demo_factory = CaptchaFactory(char_custom_fns=[custom_fn], bg_custom_fns=[bg_custom_fn], **demo_config)
number = 10000 * 50
while number:
# captcha = demo_factory.generate_captcha(specific_chars=specific)
captcha = demo_factory.generate_captcha()
captcha.save("output/%s/%s.jpg" % (project_name, captcha.text))
# print(captcha.text, captcha.num)
print(number)
number -= 1
class GenCaptcha(object):
def __init__(self):
with open("configs/icp.json", encoding="utf-8") as fp:
demo_config = json.load(fp)
# self.config = demo_config
self.factory = CaptchaFactory(**demo_config)
def gen_one(self):
captcha = self.factory.generate_captcha()
return np.array(captcha.captcha), ''.join([i.char_text.lower() for i in captcha.chars])
def test():
gen = GenCaptcha()
captcha, text = gen.gen_one()
print(captcha.shape, text)
captcha, text = gen.gen_one()
print(captcha.shape, text)
if __name__ == "__main__":
# main()
test()
| [
"json.load",
"captcha_utils.capthafactory.CaptchaFactory",
"numpy.array"
] | [((679, 771), 'captcha_utils.capthafactory.CaptchaFactory', 'CaptchaFactory', ([], {'char_custom_fns': '[custom_fn]', 'bg_custom_fns': '[bg_custom_fn]'}), '(char_custom_fns=[custom_fn], bg_custom_fns=[bg_custom_fn],\n **demo_config)\n', (693, 771), False, 'from captcha_utils.capthafactory import CaptchaFactory\n'), ((532, 545), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (541, 545), False, 'import json\n'), ((1306, 1335), 'captcha_utils.capthafactory.CaptchaFactory', 'CaptchaFactory', ([], {}), '(**demo_config)\n', (1320, 1335), False, 'from captcha_utils.capthafactory import CaptchaFactory\n'), ((1233, 1246), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (1242, 1246), False, 'import json\n'), ((1425, 1450), 'numpy.array', 'np.array', (['captcha.captcha'], {}), '(captcha.captcha)\n', (1433, 1450), True, 'import numpy as np\n')] |
#
# Copyright 2021 by <NAME>, Hitachi, Ltd.
# All rights reserved.
#
# This file is part of the KEMPNN package,
# and is released under the "BSD 3-Clause License". Please see the LICENSE
# file that should have been included as part of this package.
#
import numpy as np
from rdkit import Chem
from kempnn.loader import loadDataset
from kempnn.visualizer import drawSmiles
def make_knowledge_for_tg(smiles: str) -> np.ndarray:
mol = Chem.MolFromSmiles(smiles)
RotatableBond = Chem.MolFromSmarts("[!$(*#*)&!D1]-&!@[!$(*#*)&!D1]")
rotatable = mol.GetSubstructMatches(RotatableBond)
NonRotatableBond = Chem.MolFromSmarts("[C,c]=,#[C,c]")
non_rotatable = mol.GetSubstructMatches(NonRotatableBond)
AromaticBond = Chem.MolFromSmarts("c:c")
aromaticbond = mol.GetSubstructMatches(AromaticBond)
isInRing = [
int(mol.GetAtomWithIdx(i).IsInRing()) for i in range(mol.GetNumAtoms())
]
ret = np.zeros(mol.GetNumAtoms(), dtype=np.float)
ret += np.array(isInRing) * 1
for bond in rotatable:
ret[bond[0]] += -1
ret[bond[1]] += -1
for bond in non_rotatable:
ret[bond[0]] += 0.5
ret[bond[1]] += 0.5
for bond in aromaticbond:
ret[bond[0]] += 1
ret[bond[1]] += 1
ret = np.maximum(np.minimum(ret, 1), -1)
return ret
def load_tg_knowledge():
knowledge, _, _ = loadDataset(
dict(
dataset=dict(
name="PolymerTg", frac_train=1, frac_test=0, frac_valid=0
)
)
)
for i in range(len(knowledge)):
smiles = knowledge[i].smiles
knowledge.annotate_node(i, make_knowledge_for_tg(smiles))
return knowledge
if __name__ == "__main__":
print(make_knowledge_for_tg("[Ce]CC(C)(C(=O)OCCCCCCCC)[Th]"))
print(make_knowledge_for_tg("[Ce]CC(C1=CC(Cl)=C(C=C1))[Th]"))
drawSmiles(
"[Ce]CC(C1=CC(Cl)=C(C=C1))[Th]",
weights=make_knowledge_for_tg("[Ce]CC(C1=CC(Cl)=C(C=C1))[Th]"),
filename="test.svg",
)
| [
"rdkit.Chem.MolFromSmarts",
"numpy.array",
"rdkit.Chem.MolFromSmiles",
"numpy.minimum"
] | [((441, 467), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (459, 467), False, 'from rdkit import Chem\n'), ((488, 540), 'rdkit.Chem.MolFromSmarts', 'Chem.MolFromSmarts', (['"""[!$(*#*)&!D1]-&!@[!$(*#*)&!D1]"""'], {}), "('[!$(*#*)&!D1]-&!@[!$(*#*)&!D1]')\n", (506, 540), False, 'from rdkit import Chem\n'), ((619, 654), 'rdkit.Chem.MolFromSmarts', 'Chem.MolFromSmarts', (['"""[C,c]=,#[C,c]"""'], {}), "('[C,c]=,#[C,c]')\n", (637, 654), False, 'from rdkit import Chem\n'), ((737, 762), 'rdkit.Chem.MolFromSmarts', 'Chem.MolFromSmarts', (['"""c:c"""'], {}), "('c:c')\n", (755, 762), False, 'from rdkit import Chem\n'), ((990, 1008), 'numpy.array', 'np.array', (['isInRing'], {}), '(isInRing)\n', (998, 1008), True, 'import numpy as np\n'), ((1287, 1305), 'numpy.minimum', 'np.minimum', (['ret', '(1)'], {}), '(ret, 1)\n', (1297, 1305), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# from tensorflow.bitwise import bitwise_xor
from itertools import product
import hashlib
import numpy as np
import importlib
import re
import pdb
import tensorflow as tf
import sonnet as snt
from ..argoLogging import get_logger
import copy
tf_logging = get_logger()
NUMTOL = 1e-7 # NB if you plan to changee this, talk to me (****)
AC_REGULARIZATION = "activity_and_contractive_regularizers"
CUSTOM_REGULARIZATION = "custom_regularizers"
def create_panels_lists(list_of_vpanels_of_plots):
nodes_to_log = []
names_of_nodes_to_log = []
filenames_to_log_to = []
for vpanel in list_of_vpanels_of_plots:
nodes_vpanel = []
names_vpanel = []
files_vpanel = []
for plot in vpanel:
assert isinstance(plot["nodes"], list), "`nodes` in a plot dictionary must be a list"
assert isinstance(plot["names"], list), "`names` in a plot dictionary must be a list"
assert isinstance(plot["output"], dict), "`output` in a plot dictionary must be a dict"
nodes_vpanel.append(plot["nodes"])
names_vpanel.append(plot["names"])
files_vpanel.append(plot["output"])
nodes_to_log.append(nodes_vpanel)
names_of_nodes_to_log.append(names_vpanel)
filenames_to_log_to.append(files_vpanel)
return nodes_to_log, names_of_nodes_to_log, filenames_to_log_to
def update_conf_with_defaults(opts, default_params):
# update the defaul values in opts
# use .update to modify the dict in place
# originally
#passed_opts = opts.copy()
#opts.update(self.default_params)
#opts.update(passed_opts)
# new
copy_opts = copy.deepcopy(opts)
copy_opts.update(default_params)
copy_opts.update(opts)
return copy_opts
def my_loss_full_logits(y, logits):
n = logits.get_shape().as_list()[1]
probabilities = tf.nn.softmax(logits)
loss = tf.reduce_sum(-tf.one_hot(y, depth=n) * tf.log(probabilities + NUMTOL), axis=1)
return loss
def make_list(l):
return l if isinstance(l, list) else [l]
def create_list_colors(max_colors):
r = max_colors / 10.0
return plt.cm.tab10((1 / r * np.arange(10 * r)).astype(int))
def load_sonnet_module(module_name, kwargs, instantiate=True):
tf_logging.info("Loading sonnet module " + str(module_name))
try:
my_path = '.'.join(__name__.split('.')[:-2])
# try first to get the module from argo
layer_module = importlib.import_module(my_path + ".network." + module_name)
sntmodule = eval_method_from_tuple(layer_module, (module_name, kwargs), instantiate)
except ImportError:
# otherwise get module from sonnet or sonnet.nets or raise exception
module = None
if hasattr(snt, module_name):
module = snt
elif hasattr(snt.nets, module_name):
module = snt.nets
else:
raise Exception("sonnet module " + module_name + " not recognized")
sntmodule = eval_method_from_tuple(module, (module_name, kwargs), instantiate)
except Exception as e:
raise Exception("problem loading module: %s, kwargs: %s, exception: %s" % (module_name, kwargs, e)) from e
return sntmodule
def get_ac_collection_name(additional_str=None):
ac_collection_name = AC_REGULARIZATION
if additional_str:
ac_collection_name += "_" + additional_str
return ac_collection_name
def compose_name(basename, dataset_str, separator="_"):
if basename[0] == "-":
basename = basename[1:]
return basename + separator + dataset_str
def hash_this(longstring, trunc=None):
hasher = hashlib.sha1(longstring.encode('utf-8'))
hexstr = hasher.hexdigest()
if trunc:
hexstr=hexstr[:trunc]
return hexstr
# from https://stackoverflow.com/questions/47709854/how-to-get-covariance-matrix-in-tensorflow?rq=1
# once it is compatible with Python3, we should move to
# https://www.tensorflow.org/tfx/transform/api_docs/python/tft/covariance
# NB I cannot get the value of n_points, since I concatenated the tensors, thus I need to
# return the matrix up to the moltiplication by n_points. Annoying, but I cannot find a solution
def tf_cov_times_n_points(x):
mean_x = tf.reduce_mean(x, axis=0, keepdims=True)
sum_x = tf.reduce_sum(x, axis=0, keepdims=True)
mx = tf.matmul(tf.transpose(mean_x), sum_x)
# n_points = x.shape.as_list()[0]
vx = tf.matmul(tf.transpose(x), x) # /n_points
cov_xx = vx - mx
return cov_xx
def create_reset_metric(metric, scope, **metric_args):
"""create a metric inside a scope to control over variables reset operations.
suggestion by: shoeffner -> https://github.com/tensorflow/tensorflow/issues/4814
reimplemented and added check if scope is not empty, to avoid accidentally resetting some other variables.
Args:
metric (type): a tf.metric function, this typically returns a tuple: (metric_op, update_op).
scope (type): scope_name to use in the creation of the metric nodes.
(scope should be different from any other scope already containing variables)
**metric_args (type): arguments to pass to the metric function -> metric(**metric_args).
Returns:
(metric_op, update_op, reset_op)
Example usage:
```python
metric_op, update_op, reset_op = create_reset_metric(tf.metrics.mean,
scope="mean_reset_metric/"+tensor.name,
values=tensor)
```
"""
scope = scope.replace(":", "_")
with tf.compat.v1.variable_scope(scope) as scope:
local_vars = tf.contrib.framework.get_variables(scope,
collection=tf.GraphKeys.LOCAL_VARIABLES)
# this performs a check that the scope is currently empty,
# this is very important to ensure the reset_op will reset
# only the metric variables created in the present function
if local_vars:
raise Exception("local variables already present in scope: `%s`. " \
"I cannot safely initialize reset operation for the metric." % scope.name)
metric_op, update_op = metric(**metric_args)
local_vars = tf.contrib.framework.get_variables(scope,
collection=tf.GraphKeys.LOCAL_VARIABLES)
reset_op = tf.compat.v1.variables_initializer(local_vars)
return metric_op, update_op, reset_op
def create_concat_opts(scope, node):
# self.concat_ops[ds_key] = tf.contrib.framework.get_variables(scope,
# collection=tf.GraphKeys.LOCAL_VARIABLES)
# if self.concat_ops[ds_key]:
# raise Exception("variable already present in scope: `%s`. "\
# "I cannot safely initialize reset operation for the metric." % scope.name)
scope = scope.replace(":", "_")
with tf.variable_scope(scope) as scope:
# local_vars = tf.contrib.framework.get_variables(scope,
# collection=tf.GraphKeys.LOCAL_VARIABLES)
# if local_vars:
# raise Exception("local variables already present in scope: `%s`. "\
# "I cannot safely initialize reset operation for the metric." % scope.name)
# see https://github.com/tensorflow/tensorflow/issues/4432
# TODO it must be 1-D? Maybe yes,(for PCA yes for sure).
node_shape = node.shape.as_list()
if len(node_shape) != 2:
raise RuntimeError("the node passed for concatenation is not a 2D tensor, as expected...")
dim = node_shape[1]
accumulator = tf.get_variable("accumulator",
initializer=tf.zeros([0, dim]),
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES]
)
i = tf.get_variable("index",
initializer=tf.constant(0),
dtype=tf.int32,
trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES]
)
def assign():
# with tf.control_dependencies(tf.assign_add(i, 1)):
return tf.assign(accumulator, node, validate_shape=False), tf.assign_add(i, 1)
def concat():
return tf.assign(accumulator, tf.concat([accumulator, node], axis=0), validate_shape=False), tf.assign_add(i, 1)
concat_update_ops = tf.cond(tf.equal(i, 0),
assign,
concat)
concat_reset_ops = tf.variables_initializer([i])
return accumulator, concat_update_ops, concat_reset_ops
'''
def sample_discrete_from_continuous(probabilies):
bernoulli = tf.distributions.Bernoulli(probs=probabilies)
return bernoulli.sample()
'''
'''
def rescale(data, eps, min_value=0., max_value=1.):
delta = max_value - min_value
return (delta - 2 * eps) * data + eps + min_value
'''
def tf_rescale(data, eps, min_value=-1.0, max_value=1.0):
delta = max_value - min_value
return (delta - 2*eps)*data + eps + min_value
'''
def clip(data, low=-1, high=1):
return np.clip(data, low, high)
'''
def tf_clip(data, low=-1.0, high=1.0):
#data = tf.cast(data, dtype=tf.float32)
return tf.clip_by_value(data, low, high)
def np_softplus(x, limit=30):
if x > limit:
return x
else:
return np.log(1.0 + np.exp(x))
dtype_short = {
'float16': 'f16',
'float32': 'f32',
'float64': 'f64',
'bfloat16': 'bf16',
'complex64': 'c64',
'complex128': 'c128'}
def get_short_dtype(dtypestr):
"""
return the dtype name (string) in short form, typically used for id construction
Args:
dtypestr (str) : dtype name in string format
Returns:
str : the short name
"""
if not dtypestr in dtype_short:
raise ValueError('the type specified %s is not supported.' % dtypestr)
return dtype_short[dtypestr]
# layer_short_names={'dense' : 'D',
# 'conv2d' : 'C',
# 'Linear' : 'D',
# 'Conv2D' : 'C',
# 'GaussianDiagonal' : 'GD',
# 'GaussianDiagonalZeroOne' : 'GDZO',
# 'LogitNormalDiagonal' : 'LND',
# 'Bernoulli' : 'B'}
#
# def get_short_layer_name(layer_str, layer_kwargs):
# """
# return the layer type name (string) in short form, typically used for id construction
#
# Args:
# layer_str (str) : layer type in string format
#
# Returns:
# str : the short name
# """
# if not layer_str in layer_short_names:
# raise ValueError('the type specified %s is not supported.'%layer_str)
# return layer_short_names[layer_str]
#
#
regularizers_short_names = {
'standard_contractive_regularizer': 'SCR',
'cos_contractive_regularizer': 'CCR',
'geometric_contractive_regularizer': 'GCR',
'wasserstein_contractive_regularizer': 'WCR',
'ring_loss_regularizer': 'RLR',
'ring_loss_variable_regularizer': 'RLVR',
'contractive_reg_list': ''}
def get_short_regularization_name(reg_name):
"""
return the regularization name (string) in short form, typically used for id construction
Args:
reg_name (str) : regularization name in string format
Returns:
str : the short name
"""
if not reg_name in regularizers_short_names:
raise ValueError('the regularizer specified %s is not supported.' % reg_name)
return regularizers_short_names[reg_name]
def regularization_info(layer_dict):
# "contractive_regularizer" : ("standard_contractive_regularizer",
# {"norm": 2, "scale_mean" : 0.1, "scale_covariance" : 0.1})
reg_info = ""
contr_reg = layer_dict.get("contractive_regularizer", None)
if contr_reg is not None:
reg_info = "r"
crname, crdict = contr_reg
if crname == 'contractive_reg_list':
list_regs = crdict['list_regs']
for reg_tuple in list_regs:
reg_info += regularization_info({"contractive_regularizer": reg_tuple})
else:
reg_info += get_short_regularization_name(crname)
if "norm" in crdict:
reg_info += "_n" + str(crdict["norm"])
if "scale_mean" in crdict:
reg_info += "_sm" + str(crdict["scale_mean"])
if "scale" in crdict:
reg_info += "_s" + str(crdict["scale"])
if "scale_covariance" in crdict:
reg_info += "_sc" + str(crdict["scale_covariance"])
# if not "refnode" in crdict:
# raise Exception("refnode field in contractive_regularizer kwargs must be: `inputs` or `net`")
# reg_info += "_rn" + crdict["refnode"][0].upper()
# TODO add your own regularizer type and extract relevant parameters!
return reg_info
method_name_short = {
# activation functions
'relu': 'R',
'elu': 'E',
'leaky_relu': 'LR',
'LeakyReLU': 'LR',
'sigmoid': 'S',
'tanh': 'T',
# layers
'dense': 'D',
'conv2d': 'C',
'max_pooling2d': 'P',
'flatten': '', # not shown
# snt modules
'Linear': 'D',
'LinearWN': 'D',
'Concatenate': 'CO',
'Conv2D': 'C',
'Conv2DTranspose': 'CT',
'Conv2DWN': 'C',
'ConvNet2D': 'CN',
'ConvNet2DTranspose': 'CNT',
'ConvDec': 'CDec',
'ResEnc': 'REnc',
'ResDec': 'RDec',
'BatchFlatten': '',
'Identity': '',
'BatchNorm': 'BN',
'LayerNorm': 'LN',
'BatchReshape': 'BR',
'ResUnit': 'RU',
'ResNet18': 'RN18',
'VGGBlock': 'V',
'Sigmoid': 'S',
'Tanh': 'T',
'MaxPooling2D': 'P',
'Dropout': 'DO',
'RandomUniform': 'RU',
'RandomGaussian': 'RG',
'AveragePooling2D': 'AP',
# stochastic_models
'GaussianDiagonal': 'GD',
'GaussianDiagonalZeroOne': 'GD01',
'GaussianDiagonalPlusMinusOne': 'GDPM1',
'Gaussian': 'G',
'vonMisesFisher': 'vMF',
'LogisticDiagonalZeroOne': 'LD01',
'LogisticDiagonalPlusMinusOne': 'LDPM1',
'LogitNormalDiagonal': 'LND',
'LogitNormalDiagonalPlusMinusOne':'LND01', # >>>>>>> TODO this is very confusing, it should be: 01 -> pm1. I don't change it now since some already trained models could not be loaded after
'Bernoulli': 'B',
'BernoulliPlusMinusOne': 'BPM1',
'OneHotCategorical': 'OHC',
# initializers
"glorot_normal_initializer": 'gn',
"glorot_uniform_initializer": 'gu',
"xavier_initializer": 'x',
"truncated_normal_initializer": 't',
"variance_scaling_initializer": 'v',
"constant_initializer": 'c',
"constant": 'c',
"random_normal": 'n',
# custom networks
"CIFAR10TutorialNetwork": "CIFAR10TutorialNetwork",
# regularizers
"l2_regularizer": "Ltwo",
"l1_regularizer": "Lone",
"sum_regularizer": "Sum",
#keras.regularizers
"l2": "Ltwo",
"l1": "Lone",
# covariance parameterization
"softplus": 'S',
"linear_softplus": 'LS',
"exp": 'E',
# zero one methods
"clip": 'C',
#"sigmoid": 's', #already defined sigmoid "S"
# clipping gradient
"clip_by_global_norm": "GN",
"clip_by_norm": "N",
"clip_by_value": "V",
"none": "No",
"no": "No",
# preprocess filtering
"FromAE": "A",
}
#listWithPoints = lambda x: ",".join(re.sub('[( )\[\]]', '', str(list(x))).replace(' ', '').split(","))
def listWithPoints(x):
if isinstance(x, int):
x = [x]
return ",".join(re.sub('[( )\[\]]', '', str(list(x))).replace(' ', '').split(","))
def get_method_id(method_tuple):
"""Creates the id for a method of tensorflow.
Args:
method_tuple (tuple): A tuple composed of : (name of the method of tensorflow, kwargs to pass to the method, bool_activation).
Returns:
string: the idname of the method that we want to concatenate in the output filenames.
"""
# ipdb.set_trace()
# the name could be articulated, since I might want to get the initializers or regularizers
# from different submodules in tf.
# e.g. tf.contrib.layers.xavier_initializer
# the method name I am interested in is the one after the last dot.
if method_tuple is None:
return "0"
method_name = method_tuple[0].split('.')[-1]
method_kwargs = method_tuple[1]
methodid = method_name_short[method_name]
if method_name == 'dense':
methodid += str(method_kwargs['units'])
elif method_name == 'conv2d':
methodid += str(method_kwargs['filters']) + 'x' + re.sub('[( )]', '', str(method_kwargs['kernel_size']))
elif method_name == 'max_pooling2d':
methodid += re.sub('[( )]', '', str(method_kwargs['pool_size']))
elif method_name=='AveragePooling2D':
methodid += re.sub('[( )]', '', str(method_kwargs['pool_size']))
elif method_name=='AveragePooling2D':
methodid += re.sub('[( )]', '', str(method_kwargs['pool_size']))
elif method_name=='flatten':
pass
elif method_name == 'Linear':
#case when it is output layer
if "output_size" in method_kwargs:
methodid += str(method_kwargs["output_size"])
elif method_name == 'Concatenate':
methodid += str(method_kwargs['node_name'])
elif method_name=='LinearWN':
# case when it is output layer
if "output_size" in method_kwargs:
methodid += str(method_kwargs["output_size"])
methodid += 'wn'+str(int(method_kwargs['use_weight_norm']))
elif method_name=='Conv2D':
# case when it is output layer
if "output_channels" in method_kwargs:
methodid += str(method_kwargs["output_channels"])
methodid += 'k'+listWithPoints(method_kwargs['kernel_shape'])
elif method_name=='Conv2DTranspose':
# case when it is output layer
if "output_channels" in method_kwargs:
methodid += str(method_kwargs["output_channels"])
if "output_shape" in method_kwargs:
methodid += 'o'+listWithPoints(method_kwargs['output_shape'])
methodid += 'k'+listWithPoints(method_kwargs['kernel_shape'])
if "stride" in method_kwargs:
methodid += 's'+listWithPoints(method_kwargs['stride']) # if you need to changes to 'strides', talk to me (****)
elif method_name=='Conv2DWN':
methodid += str(method_kwargs['output_channels'])+'o'+listWithPoints(method_kwargs['kernel_shape'])+\
'wn'+str(int(method_kwargs['use_weight_norm']))
elif method_name=='ResUnit':
methodid += 'c' + str(method_kwargs['depth'])+'k'+listWithPoints(method_kwargs['kernel_shape'])+\
's' + str(method_kwargs['stride'])
elif method_name == 'VGGBlock':
methodid += 'c' + str(method_kwargs['channels']) + 'k' + listWithPoints(method_kwargs['kernel_shape']) + \
'd' + str(method_kwargs['prob_drop'])
if method_kwargs.get('logits_size', None) is not None:
methodid += "l" + listWithPoints(method_kwargs['logits_size'])
elif method_name=='ResNet18':
methodid += 'o'+str(method_kwargs['output_size'])+'wn'+str(int(method_kwargs['use_weight_norm']))
elif method_name=='ConvNet2D':
methodid += 'o' + listWithPoints(method_kwargs['output_channels']) + 'k' + listWithPoints(
method_kwargs['kernel_shapes']) + 's' + listWithPoints(method_kwargs['strides'])
elif method_name=='ConvNet2DTranspose':
methodid += 'o' + listWithPoints(method_kwargs['output_channels']) + 'k' + listWithPoints(
method_kwargs['kernel_shapes']) + 's' + listWithPoints(method_kwargs['strides'])
elif method_name=='ConvDec':
if method_kwargs.get('linear_first', None) is not None:
methodid += 'l' + listWithPoints(method_kwargs["linear_first"]["sizes"])
methodid += 'r' + listWithPoints(method_kwargs["linear_first"]["reshape"])
methodid += 'c' + listWithPoints(method_kwargs['channels']) \
+ 'k' + listWithPoints(method_kwargs['kernel_shape']) # + 's' + listWithPoints(method_kwargs['stride'])
elif method_name in ['ResEnc', 'ResDec']:
methodid += 'h' + str(method_kwargs['num_hiddens'])
methodid += 'rl' + str(method_kwargs['num_residual_layers'])
methodid += 'rh' + str(method_kwargs['num_residual_hiddens'])
methodid += 'd' + str(method_kwargs['prob_drop'])
if 'creg_scale' in method_kwargs and method_kwargs['creg_scale'] is not None:
methodid += 'cs' + str(method_kwargs['creg_scale'])
elif method_name == 'BatchFlatten':
pass
elif method_name == 'Identity':
pass
elif method_name == 'MaxPooling2D':
methodid += re.sub('[( )]', '', str(method_kwargs['pool_size'])) + 's' + listWithPoints(
method_kwargs['strides'])
elif method_name=='Dropout':
methodid += 'r' + str(method_kwargs['rate']) # tf.layers.dropout
elif method_name=='Sigmoid':
pass
elif method_name=='Tanh':
pass
elif method_name == 'RandomUniform':
methodid += 's' + str(method_kwargs['shape'])
methodid += 'min' + str(method_kwargs['minval'])
methodid += 'max' + str(method_kwargs['maxval'])
# for the moment we don't have mean and covariance as parameters
elif method_name == 'RandomGaussian':
methodid += 's' + str(method_kwargs['shape'])
elif method_name == 'Tanh':
pass
elif method_name=='AveragePooling2D':
methodid += re.sub('[( )]', '', str(method_kwargs['pool_size'])) + 's' + listWithPoints(method_kwargs['strides'])
#elif method_name=='Dropout':
# methodid += 'k'+str(method_kwargs['keep'])
elif method_name=='Sigmoid':
raise Exception("why nothing in the name? talk to ****")
elif method_name=='Identity':
pass
elif method_name=='BatchReshape':
pass
elif method_name=='BatchNorm':
methodid += '' #+ str(method_kwargs['offset']) + 's' + str(method_kwargs['scale']) + 'd' + str(method_kwargs['decay_rate'])
elif method_name=='LayerNorm':
methodid += ''
elif method_name=='GaussianDiagonal' \
or method_name=='GaussianDiagonalZeroOne' \
or method_name=='GaussianDiagonalPlusMinusOne' \
or method_name=='vonMisesFisher' \
or method_name=='LogitNormalDiagonal' \
or method_name=='LogitNormalDiagonalPlusMinusOne' \
or method_name=='LogisticDiagonalZeroOne' \
or method_name=='LogisticDiagonalPlusMinusOne':
# wrapped_module_name, wrapped_module_kwargs = method_kwargs["module_tuple"]
# **** the lower case is done to increase readibility
# methodid += "" + method_name_short[wrapped_module_name].lower()
module_tuple = copy.deepcopy(method_kwargs["module_tuple"])
if "output_size" in method_kwargs:
module_tuple[1]["output_size"] = method_kwargs["output_size"]
if "output_shape" in method_kwargs:
module_tuple[1]["output_channels"] = method_kwargs["output_shape"][-1]
methodid += "m"+get_method_id(module_tuple)
if "minimal_concentration" in method_kwargs or "minimal_covariance" in method_kwargs: # and method_kwargs["minimal_concentration"] != 1:
if method_name=='vonMisesFisher':
methodid += "mc" + str(method_kwargs["minimal_concentration"])
else:
methodid += "mc" + str(method_kwargs["minimal_covariance"])
if "zero_one_method" in method_kwargs and method_kwargs["zero_one_method"] != "sigmoid":
methodid += "zo" + str(method_name_short[method_kwargs["zero_one_method"]])
if "scalar_covariance" in method_kwargs:
scov = method_kwargs["scalar_covariance"]
if scov == True:
methodid += "scT"
elif isinstance(scov, float):
methodid += "sc" + str(scov)
if method_name=='LogitNormalDiagonal' or method_name=='LogitNormalDiagonalPlusMinusOne':
#import pdb;pdb.set_trace()
if "clip_value" in method_kwargs:
clip = method_kwargs["clip_value"]
methodid += "cv" + str(clip)
if check_key_in("creg_tuple", method_kwargs):
metric_name, cscale = method_kwargs["creg_tuple"]
methodid += 'cr' + metric_name[0].upper() + 'cs' + "{:.4g}".format(cscale)
#methodid += "_r" + regularization_info(method_kwargs)
elif method_name=='Bernoulli' or method_name=='BernoulliPlusMinusOne':
if "output_size" in method_kwargs:
methodid += str(method_kwargs["output_size"])
clip = method_kwargs["clip_value"]
methodid += "cv" + str(clip)
elif method_name=='OneHotCategorical':
if "output_size" in method_kwargs:
methodid += str(method_kwargs["output_size"])
clip = method_kwargs["clip_value"]
methodid += "cv" + str(clip)
elif method_name == 'CIFAR10TutorialNetwork':
pass
elif "variance_scaling_initializer" in method_name:
pass
elif "glorot_normal_initializer" in method_name or "glorot_uniform_initializer" in method_name:
pass
elif "truncated_normal_initializer" in method_name or "random_normal" in method_name:
methodid += str(method_kwargs['stddev'])
elif "constant_initializer" in method_name or "constant" in method_name:
methodid += str(method_kwargs['value'])
elif method_name in ["l1_regularizer", "l2_regularizer", "sum_regularizer"]:
methodid += str(method_kwargs["scale"])
elif method_name in ["l1", "l2"]:
methodid += str(method_kwargs["l"])
elif method_name == "softplus":
pass
elif method_name == "linear_softplus":
pass
elif method_name == "exp":
pass
# PREPROCESSING SECTION used to prefilter transform an image with some method
elif method_name == "FromAE":
methodid += hash_this(method_kwargs["filename"], trunc=3)
methodid += "t" + str(method_kwargs["transform_prob"])
methodid += "n" + str(method_kwargs["noisy_transform_prob"])
# Here implement your favourite method
# elif :
#
else:
print('----------------------')
print('ERROR ', method_name)
raise ValueError("id rule for `%s` has to be implemented." % method_name)
# support for contractive only in some layers for the moment, but it could be easily extended,
# just add your layer and test it
if "contractive_regularizer" in method_kwargs:
if method_name == "Linear" or method_name == "GaussianDiagonal" or method_name == "GaussianDiagonalPlusMinusOne":
methodid += regularization_info(method_kwargs)
else:
raise ValueError("contractive_regularizers not supported for `%s`." % method_name)
return methodid
def check_key_in(key, kwargs):
return (key in kwargs) and (kwargs[key] is not None)
def get_clipping_id(clipping_tuple):
method = clipping_tuple[0]
if not method:
return method_name_short["none"]
else:
value = clipping_tuple[1]["value"]
return method_name_short[method] + "{:.4g}".format(value)
def eval_method_from_tuple(module, method_tuple, instantiate=True):
"""
Args:
module (python module): module from which take the method.
method_tuple (tuple): (method_path, method_kwargs).
Returns:
if method_tuple is None returns None
otherwise returns
module.method_path(**method_kwargs)
"""
if not method_tuple:
return None
method_fn = load_method_fn_from_method_path(module, method_tuple[0])
# import pdb;pdb.set_trace()
if instantiate:
return method_fn(**method_tuple[1])
else:
return method_fn
def load_method_fn_from_method_path(module, method_path):
"""
Args:
module (python module): module from which take the method.
method_path (string): path to the method.
Returns:
if method_path is None returns None
otherwise returns
module.method_path(**method_kwargs)
"""
if not method_path:
return None
mpathsplit = method_path.split(".")
method_name = mpathsplit[-1]
path = module.__name__
middle_path = '.'.join(mpathsplit[:-1])
if middle_path:
path += '.' + middle_path
last_module = importlib.import_module(path)
method_fn = getattr(last_module, method_name)
return method_fn
def try_load_class_from_modules(class_path, modules):
LayerClass = None
for module in modules:
try:
LayerClass = load_method_fn_from_method_path(module, class_path)
except:
pass
if LayerClass is None:
raise Exception("problem loading class: {:}, not found in modules {:}".format(class_path, modules))
return LayerClass
def load_class(module_plus_class, relative=False, base_path=''):
# assemble class path
class_path = ''
# if class_base_path prepend this to the path
if base_path:
class_path = base_path
# if the prepended path does not finish with a dot add it
if class_path[-1] != '.':
class_path += '.'
class_path += module_plus_class
# split in all modules and class
modulesname, classname = class_path.rsplit('.', 1)
if relative:
modulesname = class_path.split('.', 1)[1]
my_module = importlib.import_module(modulesname)
my_class = getattr(my_module, classname)
return my_class
def load_module(module, relative=False, base_path=''):
# assemble class path
class_path = ''
#if class_base_path prepend this to the path
if base_path:
class_path = base_path
#if the prepended path does not finish with a dot add it
if class_path[-1] != '.':
class_path += '.'
class_path += module
modulesname = class_path
if relative:
modulesname = class_path.split('.', 1)[1]
my_module = importlib.import_module(modulesname)
return my_module
def eval_file(file_name_path):
with open(file_name_path, 'r') as fstream:
return eval(fstream.read())
def freeze_graph_create_pb(session,
output_names=None,
variable_names_whitelist=None,
variable_names_blacklist=None,
output_filename = None,
clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param variable_names_whitelist A list of variable to be frozen (default all)
@param variable_names_blacklist A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
graph = session.graph
with graph.as_default():
if variable_names_whitelist is not None:
freeze_var_names = variable_names_whitelist
else:
freeze_var_names = [v.op.name for v in tf.global_variables()]
freeze_var_names = list(set(freeze_var_names).difference(variable_names_blacklist or []))
if output_names is None:
output_names = [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = tf.graph_util.convert_variables_to_constants(session,
input_graph_def,
output_names,
freeze_var_names)
# finally we serialize and dump the output graph to the filesystem
if output_filename is not None:
with tf.gfile.GFile(output_filename, "wb") as f:
f.write(frozen_graph.SerializeToString())
print("freezing and creating pb file: %d ops in the final graph." % len(frozen_graph.node))
#return frozen_graph
def unpack_dict_of_lists(dictionary):
return [dict(zip(dictionary.keys(), p)) for p in product(*map(make_list, dictionary.values()))]
def apply_resize(x, intermediate_size):
intermediate_x = tf.image.resize(x, tf.constant([intermediate_size, intermediate_size]))
resized_x = tf.image.resize(intermediate_x, tf.shape(x)[1:3])
return resized_x
def tf_f1_score(y_true, y_pred):
"""Computes 3 different f1 scores, micro macro
weighted.
micro: f1 score accross the classes, as 1
macro: mean of f1 scores per class
weighted: weighted average of f1 scores per class,
weighted from the support of each class
from https://stackoverflow.com/questions/45287169/tensorflow-precision-recall-f1-multi-label-classification
Args:
y_true (Tensor): labels, with shape (batch, num_classes)
y_pred (Tensor): model's predictions, same shape as y_true
Returns:
tuple(Tensor): (micro, macro, weighted)
tuple of the computed f1 scores
"""
f1s = [0, 0, 0]
y_true = tf.cast(y_true, tf.float64)
y_pred = tf.cast(y_pred, tf.float64)
for i, axis in enumerate([None, 0]):
TP = tf.count_nonzero(y_pred * y_true, axis=axis)
FP = tf.count_nonzero(y_pred * (y_true - 1), axis=axis)
FN = tf.count_nonzero((y_pred - 1.) * y_true, axis=axis)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
f1 = 2. * precision * recall / (precision + recall)
f1s[i] = tf.reduce_mean(f1)
weights = tf.reduce_sum(y_true, axis=0)
weights /= tf.reduce_sum(weights)
f1s[2] = tf.reduce_sum(f1 * weights)
micro, macro, weighted = f1s
return micro, macro, weighted
| [
"tensorflow.equal",
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.nn.softmax",
"copy.deepcopy",
"tensorflow.gfile.GFile",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.variables_initializer",
"tensorflow.log",
"numpy.arange",
"tensorflow.graph_ut... | [((19, 40), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (33, 40), False, 'import matplotlib\n'), ((1752, 1771), 'copy.deepcopy', 'copy.deepcopy', (['opts'], {}), '(opts)\n', (1765, 1771), False, 'import copy\n'), ((1960, 1981), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (1973, 1981), True, 'import tensorflow as tf\n'), ((4324, 4364), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (4338, 4364), True, 'import tensorflow as tf\n'), ((4377, 4416), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (4390, 4416), True, 'import tensorflow as tf\n'), ((9659, 9692), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['data', 'low', 'high'], {}), '(data, low, high)\n', (9675, 9692), True, 'import tensorflow as tf\n'), ((30113, 30142), 'importlib.import_module', 'importlib.import_module', (['path'], {}), '(path)\n', (30136, 30142), False, 'import importlib\n'), ((31157, 31193), 'importlib.import_module', 'importlib.import_module', (['modulesname'], {}), '(modulesname)\n', (31180, 31193), False, 'import importlib\n'), ((31727, 31763), 'importlib.import_module', 'importlib.import_module', (['modulesname'], {}), '(modulesname)\n', (31750, 31763), False, 'import importlib\n'), ((35530, 35557), 'tensorflow.cast', 'tf.cast', (['y_true', 'tf.float64'], {}), '(y_true, tf.float64)\n', (35537, 35557), True, 'import tensorflow as tf\n'), ((35571, 35598), 'tensorflow.cast', 'tf.cast', (['y_pred', 'tf.float64'], {}), '(y_pred, tf.float64)\n', (35578, 35598), True, 'import tensorflow as tf\n'), ((36008, 36037), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (36021, 36037), True, 'import tensorflow as tf\n'), ((36053, 36075), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weights'], {}), '(weights)\n', (36066, 36075), True, 'import tensorflow as tf\n'), ((36090, 36117), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(f1 * weights)'], {}), '(f1 * weights)\n', (36103, 36117), True, 'import tensorflow as tf\n'), ((2548, 2608), 'importlib.import_module', 'importlib.import_module', (["(my_path + '.network.' + module_name)"], {}), "(my_path + '.network.' + module_name)\n", (2571, 2608), False, 'import importlib\n'), ((4436, 4456), 'tensorflow.transpose', 'tf.transpose', (['mean_x'], {}), '(mean_x)\n', (4448, 4456), True, 'import tensorflow as tf\n'), ((4522, 4537), 'tensorflow.transpose', 'tf.transpose', (['x'], {}), '(x)\n', (4534, 4537), True, 'import tensorflow as tf\n'), ((5721, 5755), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['scope'], {}), '(scope)\n', (5748, 5755), True, 'import tensorflow as tf\n'), ((5787, 5874), 'tensorflow.contrib.framework.get_variables', 'tf.contrib.framework.get_variables', (['scope'], {'collection': 'tf.GraphKeys.LOCAL_VARIABLES'}), '(scope, collection=tf.GraphKeys.\n LOCAL_VARIABLES)\n', (5821, 5874), True, 'import tensorflow as tf\n'), ((6410, 6497), 'tensorflow.contrib.framework.get_variables', 'tf.contrib.framework.get_variables', (['scope'], {'collection': 'tf.GraphKeys.LOCAL_VARIABLES'}), '(scope, collection=tf.GraphKeys.\n LOCAL_VARIABLES)\n', (6444, 6497), True, 'import tensorflow as tf\n'), ((6568, 6614), 'tensorflow.compat.v1.variables_initializer', 'tf.compat.v1.variables_initializer', (['local_vars'], {}), '(local_vars)\n', (6602, 6614), True, 'import tensorflow as tf\n'), ((7129, 7153), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (7146, 7153), True, 'import tensorflow as tf\n'), ((8949, 8978), 'tensorflow.variables_initializer', 'tf.variables_initializer', (['[i]'], {}), '([i])\n', (8973, 8978), True, 'import tensorflow as tf\n'), ((33779, 33885), 'tensorflow.graph_util.convert_variables_to_constants', 'tf.graph_util.convert_variables_to_constants', (['session', 'input_graph_def', 'output_names', 'freeze_var_names'], {}), '(session, input_graph_def,\n output_names, freeze_var_names)\n', (33823, 33885), True, 'import tensorflow as tf\n'), ((34685, 34736), 'tensorflow.constant', 'tf.constant', (['[intermediate_size, intermediate_size]'], {}), '([intermediate_size, intermediate_size])\n', (34696, 34736), True, 'import tensorflow as tf\n'), ((35654, 35698), 'tensorflow.count_nonzero', 'tf.count_nonzero', (['(y_pred * y_true)'], {'axis': 'axis'}), '(y_pred * y_true, axis=axis)\n', (35670, 35698), True, 'import tensorflow as tf\n'), ((35712, 35762), 'tensorflow.count_nonzero', 'tf.count_nonzero', (['(y_pred * (y_true - 1))'], {'axis': 'axis'}), '(y_pred * (y_true - 1), axis=axis)\n', (35728, 35762), True, 'import tensorflow as tf\n'), ((35776, 35828), 'tensorflow.count_nonzero', 'tf.count_nonzero', (['((y_pred - 1.0) * y_true)'], {'axis': 'axis'}), '((y_pred - 1.0) * y_true, axis=axis)\n', (35792, 35828), True, 'import tensorflow as tf\n'), ((35974, 35992), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['f1'], {}), '(f1)\n', (35988, 35992), True, 'import tensorflow as tf\n'), ((2034, 2064), 'tensorflow.log', 'tf.log', (['(probabilities + NUMTOL)'], {}), '(probabilities + NUMTOL)\n', (2040, 2064), True, 'import tensorflow as tf\n'), ((8817, 8831), 'tensorflow.equal', 'tf.equal', (['i', '(0)'], {}), '(i, 0)\n', (8825, 8831), True, 'import tensorflow as tf\n'), ((34231, 34268), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['output_filename', '"""wb"""'], {}), "(output_filename, 'wb')\n", (34245, 34268), True, 'import tensorflow as tf\n'), ((34786, 34797), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (34794, 34797), True, 'import tensorflow as tf\n'), ((2009, 2031), 'tensorflow.one_hot', 'tf.one_hot', (['y'], {'depth': 'n'}), '(y, depth=n)\n', (2019, 2031), True, 'import tensorflow as tf\n'), ((7973, 7991), 'tensorflow.zeros', 'tf.zeros', (['[0, dim]'], {}), '([0, dim])\n', (7981, 7991), True, 'import tensorflow as tf\n'), ((8247, 8261), 'tensorflow.constant', 'tf.constant', (['(0)'], {}), '(0)\n', (8258, 8261), True, 'import tensorflow as tf\n'), ((8560, 8610), 'tensorflow.assign', 'tf.assign', (['accumulator', 'node'], {'validate_shape': '(False)'}), '(accumulator, node, validate_shape=False)\n', (8569, 8610), True, 'import tensorflow as tf\n'), ((8612, 8631), 'tensorflow.assign_add', 'tf.assign_add', (['i', '(1)'], {}), '(i, 1)\n', (8625, 8631), True, 'import tensorflow as tf\n'), ((8760, 8779), 'tensorflow.assign_add', 'tf.assign_add', (['i', '(1)'], {}), '(i, 1)\n', (8773, 8779), True, 'import tensorflow as tf\n'), ((9798, 9807), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (9804, 9807), True, 'import numpy as np\n'), ((2252, 2269), 'numpy.arange', 'np.arange', (['(10 * r)'], {}), '(10 * r)\n', (2261, 2269), True, 'import numpy as np\n'), ((8697, 8735), 'tensorflow.concat', 'tf.concat', (['[accumulator, node]'], {'axis': '(0)'}), '([accumulator, node], axis=0)\n', (8706, 8735), True, 'import tensorflow as tf\n'), ((33345, 33366), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (33364, 33366), True, 'import tensorflow as tf\n'), ((33559, 33580), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (33578, 33580), True, 'import tensorflow as tf\n'), ((24464, 24508), 'copy.deepcopy', 'copy.deepcopy', (["method_kwargs['module_tuple']"], {}), "(method_kwargs['module_tuple'])\n", (24477, 24508), False, 'import copy\n')] |
import numpy as np
import matplotlib.pyplot as pl
import torch
from torch.nn.modules.module import _addindent
import h5py
import argparse
import glob
import os
import sys
import time
sys.path.append('models')
import model_x1
import model_x2
class deep_3d_inversion(object):
def __init__(self, parsed):
print(' _____ _____ _____ ____ _ _ ')
print(' / ____|_ _/ ____/ __ \| \ | |')
print(' | (___ | || | | | | | \| |')
print(' \___ \ | || | | | | | . ` |')
print(' ____) |_| || |___| |__| | |\ |')
print(' |_____/|_____\_____\____/|_| \_|')
self.cuda = torch.cuda.is_available()
device = parsed['device']
self.superresolution = parsed['resolution']
if ('cuda' in device):
if (self.cuda == False):
print('GPU not available. Computing in CPU')
device = 'cpu'
else:
print(f"Computing on GPU {device}")
if (device == 'cpu'):
print("Computing on CPU")
self.device = torch.device(device)
self.ltau = np.array([0.0,-0.5,-1.0,-1.5,-2.0,-2.5,-3.0])
self.variable = ["T", "v$_z$", "h", "log P", "$(B_x^2-B_y^2)^{1/2}$", "$(B_x B_y)^{1/2}$", "B$_z$"]
self.variable_txt = ["T", "vz", "tau", "logP", "sqrtBx2By2", "sqrtBxBy", "Bz"]
self.units = ["K", "km s$^{-1}$", "km", "cgs", "kG", "kG", "kG"]
self.multiplier = [1.0, 1.e-5, 1.e-5, 1.0, 1.0e-3, 1.0e-3, 1.0e-3]
self.z_tau1 = 1300.0
def load_weights(self):
if (self.superresolution == 1):
self.checkpoint = 'models/weights_x1.pth'
self.normalization = 'models/normalization_x1.npz'
print("Generating output x1")
print("Defining inversion NN...")
self.model = model_x1.block(n_input_channels=112*4, n_output_channels=7*7).to(self.device)
if (self.superresolution == 2):
self.checkpoint = 'models/weights_x2.pth'
self.normalization = 'models/normalization_x2.npz'
print("Generating output x2")
print("Defining inversion NN...")
self.model = model_x2.block(n_input_channels=112*4, n_output_channels=7*7).to(self.device)
tmp = torch.load(self.checkpoint, map_location=lambda storage, loc: storage)
self.model.load_state_dict(tmp['inv_state_dict'])
print("=> loaded checkpoint for inversion '{}'".format(self.checkpoint))
self.model.eval()
tmp = np.load(self.normalization)
self.phys_min, self.phys_max = tmp['minimum'], tmp['maximum']
def test_hinode(self, parsed):
print(f"Reading input file {parsed['input']}")
f = h5py.File(parsed['input'], 'r')
self.stokes = f['stokes'][:,:,:,:]
if (parsed['normalize'] is not None):
x0, x1, y0, y1 = parsed['normalize']
print(f"Data will be normalized to median value in box : {x0}-{x1},{y0}-{y1}")
stokes_median = np.median(self.stokes[0,x0:x1,y0:y1,0:3])
else:
print(f"Data is already normalized")
stokes_median = 1.0
f.close()
print(f"Transposing data")
self.stokes = np.transpose(self.stokes, axes=(0,3,1,2))
_, n_lambda, nx, ny = self.stokes.shape
nx_int = nx // 2**4
ny_int = ny // 2**4
nx = nx_int * 2**4
ny = ny_int * 2**4
print(f"Size of map {nx} x {ny}")
print(f"Cropping map to range (0,{nx})-(0,{ny}) ")
self.stokes = self.stokes[:,:,0:nx,0:ny]
print(f"Normalizing data")
self.stokes /= stokes_median
self.stokes[1,:,:,:] /= 0.1
self.stokes[2,:,:,:] /= 0.1
self.stokes[3,:,:,:] /= 0.1
self.stokes = np.expand_dims(self.stokes.reshape((4*n_lambda,nx,ny)), axis=0)
logtau = np.linspace(0.0, -3.0, 70)
self.load_weights()
print("Running neural network inversion...")
start = time.time()
# Do it in two steps in this case
if (nx > 512):
n = nx // 512
idx = []
for i in range(n+1):
idx.append(i*512)
if (nx % 512 != 0):
idx.append(nx)
extra = 1
else:
extra = 0
print(f"Doing in {n} parts : {idx}")
for i in range(n+extra):
input = torch.as_tensor(self.stokes[0:1,:,idx[i]:idx[i+1],:].astype('float32')).to(self.device)
print(f"{idx[i]} - {idx[i+1]}")
if (i == 0):
with torch.no_grad():
output_model = self.model(input)
output_model = output_model.cpu().numpy()
else:
with torch.no_grad():
output_model1 = self.model(input)
output_model = np.concatenate([output_model, output_model1.cpu().numpy()], axis=-2)
else:
input = torch.as_tensor(self.stokes[0:1,:,:,:].astype('float32')).to(self.device)
with torch.no_grad():
output_model = self.model(input)
output_model = output_model.cpu().numpy()
end = time.time()
print(f"Elapsed time : {end-start} s - {1e6*(end-start)/(nx*ny)} us/pixel")
# Transform the tensors to numpy arrays and undo the transformation needed for the training
print("Saving results")
output_model = np.squeeze(output_model)
output_model = output_model * (self.phys_max[:,None,None] - self.phys_min[:,None,None]) + self.phys_min[:,None,None]
output_model = output_model.reshape((7,7,self.superresolution*nx,self.superresolution*ny))
tmp = '.'.join(self.checkpoint.split('/')[-1].split('.')[0:2])
f = h5py.File(f"{parsed['output']}", 'w')
db_logtau = f.create_dataset('tau_axis', self.ltau.shape)
db_T = f.create_dataset('T', output_model[0,:,:,:].shape)
db_vz = f.create_dataset('vz', output_model[1,:,:,:].shape)
db_tau = f.create_dataset('tau', output_model[2,:,:,:].shape)
db_logP = f.create_dataset('logP', output_model[3,:,:,:].shape)
db_Bx2_By2 = f.create_dataset('sqrt_Bx2_By2', output_model[4,:,:,:].shape)
db_BxBy = f.create_dataset('sqrt_BxBy', output_model[5,:,:,:].shape)
db_Bz = f.create_dataset('Bz', output_model[6,:,:,:].shape)
db_Bx = f.create_dataset('Bx', output_model[4,:,:,:].shape)
db_By = f.create_dataset('By', output_model[5,:,:,:].shape)
Bx = np.zeros_like(db_Bz[:])
By = np.zeros_like(db_Bz[:])
db_logtau[:] = self.ltau
db_T[:] = output_model[0,:,:,:] * self.multiplier[0]
db_vz[:] = output_model[1,:,:,:] * self.multiplier[1]
db_tau[:] = output_model[2,:,:,:] * self.multiplier[2]
db_logP[:] = output_model[3,:,:,:] * self.multiplier[3]
db_Bx2_By2[:] = output_model[4,:,:,:] * self.multiplier[4]
db_BxBy[:] = output_model[5,:,:,:] * self.multiplier[5]
db_Bz[:] = output_model[6,:,:,:] * self.multiplier[6]
A = np.sign(db_Bx2_By2[:]) * db_Bx2_By2[:]**2 # I saved sign(Bx^2-By^2) * np.sqrt(Bx^2-By^2)
B = np.sign(db_BxBy[:]) * db_BxBy[:]**2 # I saved sign(Bx*By) * np.sqrt(Bx*By)
# This quantity is obviously always >=0
D = np.sqrt(A**2 + 4.0*B**2)
ind_pos = np.where(B >0)
ind_neg = np.where(B < 0)
ind_zero = np.where(B == 0)
Bx[ind_pos] = np.sign(db_BxBy[:][ind_pos]) * np.sqrt(A[ind_pos] + D[ind_pos]) / np.sqrt(2.0)
By[ind_pos] = np.sqrt(2.0) * B[ind_pos] / np.sqrt(1e-1 + A[ind_pos] + D[ind_pos])
Bx[ind_neg] = np.sign(db_BxBy[:][ind_neg]) * np.sqrt(A[ind_neg] + D[ind_neg]) / np.sqrt(2.0)
By[ind_neg] = -np.sqrt(2.0) * B[ind_neg] / np.sqrt(1e-1 + A[ind_neg] + D[ind_neg])
Bx[ind_zero] = 0.0
By[ind_zero] = 0.0
db_Bx[:] = Bx
db_By[:] = By
f.close()
if (__name__ == '__main__'):
parser = argparse.ArgumentParser(description='Fast 3D LTE inversion of Hinode datasets')
parser.add_argument('-i', '--input', default=None, type=str,
metavar='INPUT', help='Input file', required=True)
parser.add_argument('-o', '--output', default=None, type=str,
metavar='OUTPUT', help='Output file', required=True)
parser.add_argument('-n', '--normalize', default=None, type=int, nargs='+',
metavar='OUTPUT', help='Output file', required=False)
parser.add_argument('-d', '--device', default='cpu', type=str,
metavar='DEVICE', help='Device : cpu/cuda:0/cuda:1,...', required=False)
parser.add_argument('-r', '--resolution', default=1, type=int, choices=[1,2],
metavar='RESOLUTION', help='Resolution', required=False)
parsed = vars(parser.parse_args())
if (not os.path.exists(parsed['output'])):
deep_network = deep_3d_inversion(parsed)
deep_network.test_hinode(parsed)
else:
print(f"Output file {parsed['output']} already exists. Remove it to recompute.")
| [
"numpy.sqrt",
"numpy.array",
"torch.cuda.is_available",
"sys.path.append",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.where",
"numpy.linspace",
"model_x2.block",
"h5py.File",
"numpy.squeeze",
"numpy.sign",
"numpy.transpose",
"time.time",
"model_x1.block",
"torch.device",
"nu... | [((183, 208), 'sys.path.append', 'sys.path.append', (['"""models"""'], {}), "('models')\n", (198, 208), False, 'import sys\n'), ((8626, 8705), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Fast 3D LTE inversion of Hinode datasets"""'}), "(description='Fast 3D LTE inversion of Hinode datasets')\n", (8649, 8705), False, 'import argparse\n'), ((722, 747), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (745, 747), False, 'import torch\n'), ((1162, 1182), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (1174, 1182), False, 'import torch\n'), ((1219, 1270), 'numpy.array', 'np.array', (['[0.0, -0.5, -1.0, -1.5, -2.0, -2.5, -3.0]'], {}), '([0.0, -0.5, -1.0, -1.5, -2.0, -2.5, -3.0])\n', (1227, 1270), True, 'import numpy as np\n'), ((2424, 2494), 'torch.load', 'torch.load', (['self.checkpoint'], {'map_location': '(lambda storage, loc: storage)'}), '(self.checkpoint, map_location=lambda storage, loc: storage)\n', (2434, 2494), False, 'import torch\n'), ((2801, 2828), 'numpy.load', 'np.load', (['self.normalization'], {}), '(self.normalization)\n', (2808, 2828), True, 'import numpy as np\n'), ((3004, 3035), 'h5py.File', 'h5py.File', (["parsed['input']", '"""r"""'], {}), "(parsed['input'], 'r')\n", (3013, 3035), False, 'import h5py\n'), ((3521, 3565), 'numpy.transpose', 'np.transpose', (['self.stokes'], {'axes': '(0, 3, 1, 2)'}), '(self.stokes, axes=(0, 3, 1, 2))\n', (3533, 3565), True, 'import numpy as np\n'), ((4240, 4266), 'numpy.linspace', 'np.linspace', (['(0.0)', '(-3.0)', '(70)'], {}), '(0.0, -3.0, 70)\n', (4251, 4266), True, 'import numpy as np\n'), ((4383, 4394), 'time.time', 'time.time', ([], {}), '()\n', (4392, 4394), False, 'import time\n'), ((5769, 5780), 'time.time', 'time.time', ([], {}), '()\n', (5778, 5780), False, 'import time\n'), ((6021, 6045), 'numpy.squeeze', 'np.squeeze', (['output_model'], {}), '(output_model)\n', (6031, 6045), True, 'import numpy as np\n'), ((6367, 6404), 'h5py.File', 'h5py.File', (['f"""{parsed[\'output\']}"""', '"""w"""'], {}), '(f"{parsed[\'output\']}", \'w\')\n', (6376, 6404), False, 'import h5py\n'), ((7125, 7148), 'numpy.zeros_like', 'np.zeros_like', (['db_Bz[:]'], {}), '(db_Bz[:])\n', (7138, 7148), True, 'import numpy as np\n'), ((7162, 7185), 'numpy.zeros_like', 'np.zeros_like', (['db_Bz[:]'], {}), '(db_Bz[:])\n', (7175, 7185), True, 'import numpy as np\n'), ((7936, 7966), 'numpy.sqrt', 'np.sqrt', (['(A ** 2 + 4.0 * B ** 2)'], {}), '(A ** 2 + 4.0 * B ** 2)\n', (7943, 7966), True, 'import numpy as np\n'), ((7988, 8003), 'numpy.where', 'np.where', (['(B > 0)'], {}), '(B > 0)\n', (7996, 8003), True, 'import numpy as np\n'), ((8021, 8036), 'numpy.where', 'np.where', (['(B < 0)'], {}), '(B < 0)\n', (8029, 8036), True, 'import numpy as np\n'), ((8056, 8072), 'numpy.where', 'np.where', (['(B == 0)'], {}), '(B == 0)\n', (8064, 8072), True, 'import numpy as np\n'), ((9508, 9540), 'os.path.exists', 'os.path.exists', (["parsed['output']"], {}), "(parsed['output'])\n", (9522, 9540), False, 'import os\n'), ((3295, 3339), 'numpy.median', 'np.median', (['self.stokes[0, x0:x1, y0:y1, 0:3]'], {}), '(self.stokes[0, x0:x1, y0:y1, 0:3])\n', (3304, 3339), True, 'import numpy as np\n'), ((7697, 7719), 'numpy.sign', 'np.sign', (['db_Bx2_By2[:]'], {}), '(db_Bx2_By2[:])\n', (7704, 7719), True, 'import numpy as np\n'), ((7801, 7820), 'numpy.sign', 'np.sign', (['db_BxBy[:]'], {}), '(db_BxBy[:])\n', (7808, 7820), True, 'import numpy as np\n'), ((8161, 8173), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (8168, 8173), True, 'import numpy as np\n'), ((8224, 8262), 'numpy.sqrt', 'np.sqrt', (['(0.1 + A[ind_pos] + D[ind_pos])'], {}), '(0.1 + A[ind_pos] + D[ind_pos])\n', (8231, 8262), True, 'import numpy as np\n'), ((8352, 8364), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (8359, 8364), True, 'import numpy as np\n'), ((8416, 8454), 'numpy.sqrt', 'np.sqrt', (['(0.1 + A[ind_neg] + D[ind_neg])'], {}), '(0.1 + A[ind_neg] + D[ind_neg])\n', (8423, 8454), True, 'import numpy as np\n'), ((5617, 5632), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5630, 5632), False, 'import torch\n'), ((8095, 8123), 'numpy.sign', 'np.sign', (['db_BxBy[:][ind_pos]'], {}), '(db_BxBy[:][ind_pos])\n', (8102, 8123), True, 'import numpy as np\n'), ((8126, 8158), 'numpy.sqrt', 'np.sqrt', (['(A[ind_pos] + D[ind_pos])'], {}), '(A[ind_pos] + D[ind_pos])\n', (8133, 8158), True, 'import numpy as np\n'), ((8196, 8208), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (8203, 8208), True, 'import numpy as np\n'), ((8286, 8314), 'numpy.sign', 'np.sign', (['db_BxBy[:][ind_neg]'], {}), '(db_BxBy[:][ind_neg])\n', (8293, 8314), True, 'import numpy as np\n'), ((8317, 8349), 'numpy.sqrt', 'np.sqrt', (['(A[ind_neg] + D[ind_neg])'], {}), '(A[ind_neg] + D[ind_neg])\n', (8324, 8349), True, 'import numpy as np\n'), ((1940, 2005), 'model_x1.block', 'model_x1.block', ([], {'n_input_channels': '(112 * 4)', 'n_output_channels': '(7 * 7)'}), '(n_input_channels=112 * 4, n_output_channels=7 * 7)\n', (1954, 2005), False, 'import model_x1\n'), ((2289, 2354), 'model_x2.block', 'model_x2.block', ([], {'n_input_channels': '(112 * 4)', 'n_output_channels': '(7 * 7)'}), '(n_input_channels=112 * 4, n_output_channels=7 * 7)\n', (2303, 2354), False, 'import model_x2\n'), ((8388, 8400), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (8395, 8400), True, 'import numpy as np\n'), ((5010, 5025), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5023, 5025), False, 'import torch\n'), ((5258, 5273), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5271, 5273), False, 'import torch\n')] |
import rclpy
from rclpy.time import Time, Duration
from rclpy.node import Node
from std_msgs.msg import Header
from tf2_ros import LookupException, ExtrapolationException, ConnectivityException
from tf2_ros.buffer import Buffer
from tf2_ros.transform_broadcaster import TransformBroadcaster
from tf2_ros.transform_listener import TransformListener
from tf_transformations import euler_from_quaternion, quaternion_from_euler
from rclpy.qos import QoSPresetProfiles
from geometry_msgs.msg import TransformStamped
import numpy as np
from .filter_type import filters
class RigidBodyKalman(Node):
def __init__(self):
super().__init__('kalman_filter')
# Create tf2 broadcaster
self.pose_br = TransformBroadcaster(self)
# create a tf2 buffer and listener
self.buffer = Buffer()
TransformListener(self.buffer, self)
# Declare parameters
self.declare_parameter('verbose', 1)
self.declare_parameter('filter_type', 'const_accel')
self.declare_parameter('duration', False)
self.declare_parameter('state_indexes', '0,1,2,9,10,11')
# Get parameters
self.filter = filters[self.get_parameter('filter_type').get_parameter_value().string_value](1/60)
self.verbose = self.get_parameter('verbose').get_parameter_value().integer_value
self.duration = self.get_parameter('duration').get_parameter_value().bool_value
self.state_indexes = [int(i) for i in self.get_parameter('state_indexes').get_parameter_value().string_value.split(',')]
self.announcement = None
self.create_subscription(
Header,
'announcement',
self.set_announcement,
QoSPresetProfiles.get_from_short_key('sensor_data')
)
def set_announcement(self, msg):
self.announcement = msg
def callback(self, t):
euler = np.array(euler_from_quaternion([t.transform.rotation.x, t.transform.rotation.y, t.transform.rotation.z, t.transform.rotation.w]), dtype=np.float32)
trans = np.array([t.transform.translation.x, t.transform.translation.y, t.transform.translation.z], dtype=np.float32)
self.filter.predict()
self.filter.update(np.concatenate((trans, euler)))
t.transform.translation.x = self.filter.x[self.state_indexes[0]]
t.transform.translation.y = self.filter.x[self.state_indexes[1]]
t.transform.translation.z = self.filter.x[self.state_indexes[2]]
euler = quaternion_from_euler(self.filter.x[self.state_indexes[3]], self.filter.x[self.state_indexes[4]], self.filter.x[self.state_indexes[5]])
t.transform.rotation.x = euler[0]
t.transform.rotation.y = euler[1]
t.transform.rotation.z = euler[2]
t.child_frame_id = (self.get_namespace() + '/filtered_estimation').lstrip('/')
self.pose_br.sendTransform(t)
def main(args=None):
rclpy.init(args=args)
node = RigidBodyKalman()
while rclpy.ok():
rclpy.spin_once(node)
if node.announcement is None: continue
try:
t = node.buffer.lookup_transform('world', (node.get_namespace() + '/estimated_pose').lstrip('/'), node.announcement.stamp)
except (LookupException, ConnectivityException, ExtrapolationException):
pass
except TypeError:
node.get_logger().info(str(node.announcement))
else:
node.callback(t)
node.announcement = None
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | [
"tf2_ros.transform_broadcaster.TransformBroadcaster",
"rclpy.ok",
"tf_transformations.quaternion_from_euler",
"tf2_ros.buffer.Buffer",
"rclpy.qos.QoSPresetProfiles.get_from_short_key",
"tf2_ros.transform_listener.TransformListener",
"numpy.array",
"rclpy.spin_once",
"numpy.concatenate",
"rclpy.ini... | [((2994, 3015), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (3004, 3015), False, 'import rclpy\n'), ((3060, 3070), 'rclpy.ok', 'rclpy.ok', ([], {}), '()\n', (3068, 3070), False, 'import rclpy\n'), ((3606, 3622), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (3620, 3622), False, 'import rclpy\n'), ((727, 753), 'tf2_ros.transform_broadcaster.TransformBroadcaster', 'TransformBroadcaster', (['self'], {}), '(self)\n', (747, 753), False, 'from tf2_ros.transform_broadcaster import TransformBroadcaster\n'), ((828, 836), 'tf2_ros.buffer.Buffer', 'Buffer', ([], {}), '()\n', (834, 836), False, 'from tf2_ros.buffer import Buffer\n'), ((845, 881), 'tf2_ros.transform_listener.TransformListener', 'TransformListener', (['self.buffer', 'self'], {}), '(self.buffer, self)\n', (862, 881), False, 'from tf2_ros.transform_listener import TransformListener\n'), ((2094, 2208), 'numpy.array', 'np.array', (['[t.transform.translation.x, t.transform.translation.y, t.transform.\n translation.z]'], {'dtype': 'np.float32'}), '([t.transform.translation.x, t.transform.translation.y, t.transform\n .translation.z], dtype=np.float32)\n', (2102, 2208), True, 'import numpy as np\n'), ((2555, 2695), 'tf_transformations.quaternion_from_euler', 'quaternion_from_euler', (['self.filter.x[self.state_indexes[3]]', 'self.filter.x[self.state_indexes[4]]', 'self.filter.x[self.state_indexes[5]]'], {}), '(self.filter.x[self.state_indexes[3]], self.filter.x[\n self.state_indexes[4]], self.filter.x[self.state_indexes[5]])\n', (2576, 2695), False, 'from tf_transformations import euler_from_quaternion, quaternion_from_euler\n'), ((3080, 3101), 'rclpy.spin_once', 'rclpy.spin_once', (['node'], {}), '(node)\n', (3095, 3101), False, 'import rclpy\n'), ((1750, 1801), 'rclpy.qos.QoSPresetProfiles.get_from_short_key', 'QoSPresetProfiles.get_from_short_key', (['"""sensor_data"""'], {}), "('sensor_data')\n", (1786, 1801), False, 'from rclpy.qos import QoSPresetProfiles\n'), ((1939, 2063), 'tf_transformations.euler_from_quaternion', 'euler_from_quaternion', (['[t.transform.rotation.x, t.transform.rotation.y, t.transform.rotation.z, t.\n transform.rotation.w]'], {}), '([t.transform.rotation.x, t.transform.rotation.y, t.\n transform.rotation.z, t.transform.rotation.w])\n', (1960, 2063), False, 'from tf_transformations import euler_from_quaternion, quaternion_from_euler\n'), ((2270, 2300), 'numpy.concatenate', 'np.concatenate', (['(trans, euler)'], {}), '((trans, euler))\n', (2284, 2300), True, 'import numpy as np\n')] |
# Programmer : <NAME>
import random
import numpy as np
from sklearn import datasets
from Py_FS.wrapper.nature_inspired.algorithm import Algorithm
from Py_FS.wrapper.population_based._utilities import compute_fitness, sort_agents, compute_accuracy
class HS(Algorithm):
# Harmony Search Algorithm
############################### Parameters ####################################
# #
# num_agents: number of harmonies #
# max_iter: maximum number of generations #
# train_data: training samples of data #
# train_label: class labels for the training samples #
# obj_function: the function to maximize while doing feature selection #
# trans_function_shape: shape of the transfer function used #
# save_conv_graph: boolean value for saving convergence graph #
# #
###############################################################################
# <STEPS OF HARMOMY SEARCH ALGORITH>
# Step 1. Initialize a Harmony Memory (HM).
# Step 2. Improvise a new harmony from HM.
# Step 3. If the new harmony is better than minimum harmony in HM, include the new harmony in HM, and exclude the minimum harmony from HM.
# Step 4. If stopping criteria are not satisfied, go to Step 2.
def __init__(self,
num_agents,
max_iter,
train_data,
train_label,
save_conv_graph=False,
seed=0):
super().__init__(num_agents=num_agents,
max_iter=max_iter,
train_data=train_data,
train_label=train_label,
save_conv_graph=save_conv_graph,
seed=seed)
self.algo_name = 'HS'
self.agent_name = 'Harmony'
self.algo_params = {}
def user_input(self):
self.algo_params['HMCR'] = float(
input('HMCR [0-1]: ') or 0.9)
self.algo_params['PAR'] = float(
input('PAR [0-1]: ') or 0.3)
def improvise(self):
HMCR_randValue = np.random.rand()
num_features = self.population[0, :].shape[0]
newHarmony = np.zeros([1, num_features])
# Harmony Memory consideration rate
if HMCR_randValue <= self.algo_params['HMCR']:
for featureNum in range(num_features):
selectedAgent = random.randint(0, self.num_agents - 1)
newHarmony[0, featureNum] = self.population[selectedAgent, featureNum]
else:
for featureNum in range(num_features):
newHarmony[0, featureNum] = random.randint(0, 1)
for featureNum in range(num_features):
# Pitch adjacement
PAR_randValue = np.random.rand()
if PAR_randValue > self.algo_params['PAR']:
newHarmony[0, featureNum] = 1 - newHarmony[0, featureNum]
fitnessHarmony = self.obj_function(
newHarmony, self.training_data)
if self.fitness[self.num_agents-1] < fitnessHarmony:
self.population[self.num_agents-1, :] = newHarmony
self.fitness[self.num_agents-1] = fitnessHarmony
# sort harmony memory
self.population, self.fitness = sort_agents(
self.population, self.fitness)
if self.fitness[0] > self.Leader_fitness:
self.Leader_agent = self.population[0].copy()
self.Leader_fitness = self.fitness[0].copy()
def next(self):
print('\n================================================================================')
print(' Iteration - {}'.format(self.cur_iter+1))
print('================================================================================\n')
# perform improvisation, replacement
self.improvise()
self.cur_iter += 1
if __name__ == '__main__':
data = datasets.load_digits()
algo = HS(num_agents=20, max_iter=5,
train_data=data.data, train_label=data.target)
solution = algo.run()
| [
"numpy.random.rand",
"sklearn.datasets.load_digits",
"numpy.zeros",
"Py_FS.wrapper.population_based._utilities.sort_agents",
"random.randint"
] | [((4335, 4357), 'sklearn.datasets.load_digits', 'datasets.load_digits', ([], {}), '()\n', (4355, 4357), False, 'from sklearn import datasets\n'), ((2462, 2478), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2476, 2478), True, 'import numpy as np\n'), ((2558, 2585), 'numpy.zeros', 'np.zeros', (['[1, num_features]'], {}), '([1, num_features])\n', (2566, 2585), True, 'import numpy as np\n'), ((3652, 3694), 'Py_FS.wrapper.population_based._utilities.sort_agents', 'sort_agents', (['self.population', 'self.fitness'], {}), '(self.population, self.fitness)\n', (3663, 3694), False, 'from Py_FS.wrapper.population_based._utilities import compute_fitness, sort_agents, compute_accuracy\n'), ((3147, 3163), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3161, 3163), True, 'import numpy as np\n'), ((2774, 2812), 'random.randint', 'random.randint', (['(0)', '(self.num_agents - 1)'], {}), '(0, self.num_agents - 1)\n', (2788, 2812), False, 'import random\n'), ((3015, 3035), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (3029, 3035), False, 'import random\n')] |
import torch
import torch.nn.functional as F
# import cv2
import numpy as np
def grad_to_inp(input, model, target_label_idx, device=None):
if device:
input = input.to(device)
else:
input = input.cpu().detach()
input.requires_grad = True
output = model(input)
output = F.softmax(output, dim=1)
if target_label_idx is None:
target_label_idx = torch.argmax(output, 1).item()
index = np.ones((output.size()[0], 1)) * target_label_idx
index = torch.tensor(index, dtype=torch.int64)
if device:
index = index.to(device)
output = output.gather(1, index)
# clear grad
model.zero_grad()
output.backward()
gradient = input.grad.detach().cpu().numpy()[0]
return gradient, target_label_idx
def calculate_outputs_and_gradients(inputs, model, target_label_idx, device=None):
# do the pre-processing
predict_idx = None
gradients = []
preds = []
for input in inputs:
gradient, pred = grad_to_inp(input, model, target_label_idx, device=device)
gradients.append(gradient)
preds.append(pred)
gradients = np.array(gradients)
return gradients, preds
def pre_processing(obs, device):
# print(np.max(np.max(np.max(obs))))
mean = np.array([0.485, 0.456, 0.406]).reshape([1, 1, 3])
std = np.array([0.229, 0.224, 0.225]).reshape([1, 1, 3])
obs = obs / 255
obs = (obs - mean) / std
obs = np.transpose(obs, (2, 0, 1))
obs = np.expand_dims(obs, 0)
obs = np.array(obs)
obs_tensor = torch.tensor(obs, dtype=torch.float32, device=device, requires_grad=True)
# print(torch.min(obs_tensor),torch.max(obs_tensor))
return obs_tensor
# generate the entire images
def generate_entrie_images(img_origin, img_grad, img_grad_overlay, img_integrad, img_integrad_overlay):
blank = np.ones((img_grad.shape[0], 10, 3), dtype=np.uint8) * 255
blank_hor = np.ones((10, 20 + img_grad.shape[0] * 3, 3), dtype=np.uint8) * 255
upper = np.concatenate([img_origin[:, :, (2, 1, 0)], blank, img_grad_overlay, blank, img_grad], 1)
down = np.concatenate([img_origin[:, :, (2, 1, 0)], blank, img_integrad_overlay, blank, img_integrad], 1)
total = np.concatenate([upper, blank_hor, down], 0)
# total = cv2.resize(total, (550, 364))
return total
| [
"numpy.ones",
"torch.tensor",
"numpy.array",
"numpy.expand_dims",
"numpy.concatenate",
"numpy.transpose",
"torch.nn.functional.softmax",
"torch.argmax"
] | [((306, 330), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (315, 330), True, 'import torch.nn.functional as F\n'), ((496, 534), 'torch.tensor', 'torch.tensor', (['index'], {'dtype': 'torch.int64'}), '(index, dtype=torch.int64)\n', (508, 534), False, 'import torch\n'), ((1128, 1147), 'numpy.array', 'np.array', (['gradients'], {}), '(gradients)\n', (1136, 1147), True, 'import numpy as np\n'), ((1434, 1462), 'numpy.transpose', 'np.transpose', (['obs', '(2, 0, 1)'], {}), '(obs, (2, 0, 1))\n', (1446, 1462), True, 'import numpy as np\n'), ((1473, 1495), 'numpy.expand_dims', 'np.expand_dims', (['obs', '(0)'], {}), '(obs, 0)\n', (1487, 1495), True, 'import numpy as np\n'), ((1506, 1519), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (1514, 1519), True, 'import numpy as np\n'), ((1537, 1610), 'torch.tensor', 'torch.tensor', (['obs'], {'dtype': 'torch.float32', 'device': 'device', 'requires_grad': '(True)'}), '(obs, dtype=torch.float32, device=device, requires_grad=True)\n', (1549, 1610), False, 'import torch\n'), ((1990, 2084), 'numpy.concatenate', 'np.concatenate', (['[img_origin[:, :, (2, 1, 0)], blank, img_grad_overlay, blank, img_grad]', '(1)'], {}), '([img_origin[:, :, (2, 1, 0)], blank, img_grad_overlay, blank,\n img_grad], 1)\n', (2004, 2084), True, 'import numpy as np\n'), ((2092, 2194), 'numpy.concatenate', 'np.concatenate', (['[img_origin[:, :, (2, 1, 0)], blank, img_integrad_overlay, blank, img_integrad]', '(1)'], {}), '([img_origin[:, :, (2, 1, 0)], blank, img_integrad_overlay,\n blank, img_integrad], 1)\n', (2106, 2194), True, 'import numpy as np\n'), ((2203, 2246), 'numpy.concatenate', 'np.concatenate', (['[upper, blank_hor, down]', '(0)'], {}), '([upper, blank_hor, down], 0)\n', (2217, 2246), True, 'import numpy as np\n'), ((1837, 1888), 'numpy.ones', 'np.ones', (['(img_grad.shape[0], 10, 3)'], {'dtype': 'np.uint8'}), '((img_grad.shape[0], 10, 3), dtype=np.uint8)\n', (1844, 1888), True, 'import numpy as np\n'), ((1911, 1971), 'numpy.ones', 'np.ones', (['(10, 20 + img_grad.shape[0] * 3, 3)'], {'dtype': 'np.uint8'}), '((10, 20 + img_grad.shape[0] * 3, 3), dtype=np.uint8)\n', (1918, 1971), True, 'import numpy as np\n'), ((1263, 1294), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (1271, 1294), True, 'import numpy as np\n'), ((1324, 1355), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (1332, 1355), True, 'import numpy as np\n'), ((391, 414), 'torch.argmax', 'torch.argmax', (['output', '(1)'], {}), '(output, 1)\n', (403, 414), False, 'import torch\n')] |
import tensorflow as tf
import h5py
import numpy as np
#load data
full_data = None
full_label = None
with h5py.File('/mnt/disk1/bole.h5', 'r') as hf:
full_data = np.array(hf.get('data'))
full_label = np.array(hf.get('label'))
#data
data = full_data[:40]
label = full_label[:40]
test_data = full_data[40:]
test_label = full_label[40:]
#Hyperparameters
learning_rate = 0.0001 #if loss is nan try to lower this shit
step_size = 20000
batch_size = 10
display_progress_step = 1
#network parameters
input_layer = 4
hidden_layer_1 = 12
hidden_layer_2 = 6
output_layer = 3
def next_batch(num, data, labels):
'''
Return a total SUPER RESOLTUIOof `num` random samples and labels.
'''
idx = np.arange(0 , len(data))
np.random.shuffle(idx)
idx = idx[:num]
data_shuffle = [data[ i] for i in idx]
labels_shuffle = [labels[ i] for i in idx]
return np.asarray(data_shuffle), np.asarray(labels_shuffle)
# A TensorFlow graph consists of the following parts which will be detailed below:
# Placeholder variables used to feed input into the graph.
# Model variables that are going to be optimized so as to make the model perform better.
# The model which is essentially just a mathematical function that calculates some output given the input in the placeholder variables and the model variables.
# A cost measure that can be used to guide the optimization of the variables.
# An optimization method which updates the variables of the model.
#tensorflow input and output placeholder (dtype args = [Num of data, Dimension of layer])
#none mean the tensor can hold any number of image
X = tf.placeholder("float", [None, input_layer])
Y = tf.placeholder("float", [None, output_layer])
#Define Weight and bias in tensorflow variable by setting the initial value
#weight shape are in matrix fromat ([x,y])
weights = {
1: tf.Variable(tf.random_normal([input_layer, hidden_layer_1])),
2: tf.Variable(tf.random_normal([hidden_layer_1, hidden_layer_2])),
3: tf.Variable(tf.random_normal([hidden_layer_2, output_layer]))
}
#biases shape are in vector format ([1,x] or [1])
biases = {
1 : tf.Variable(tf.random_normal([hidden_layer_1])),
2 : tf.Variable(tf.random_normal([hidden_layer_2])),
3 : tf.Variable(tf.random_normal([output_layer]))
}
#model blueprint in forward propagation way (x.w + b)
def neural_net(x):
layer_1 = tf.add(tf.matmul(x, weights[1]), biases[1])
layer_2 = tf.add(tf.matmul(layer_1, weights[2]), biases[2])
return tf.matmul(layer_2, weights[3]) + biases[3]
#build model
logits = neural_net(X)
prediction = tf.reduce_mean(tf.nn.softmax(logits), axis=0)
#define loss and training optimizer
#cross entropy loss is used form classification problem where
#the loss become 0 when the predicted output is the same as the true output
#since the output of cross entory are bunch of tensors , we need to average them to get a scalar value
loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
training_optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss=loss_function)
#evaluate the model (correct prediction and accuracy)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1)) #using argmax with axis 1 becuase the output is bunch of one hot ecoded rows in the matrix
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # tf.cast convert bunch of [True , False, True, ....] into [1., 0., 1.,] AKA boolean to float element wise
#intitialize the value of tf.Variable
init = tf.global_variables_initializer()
#start training
# what we do in this step
# 1. run a session of initializer to give initial value to variable
# 2. for each step run a session of training optimizer (doing backpropagation)
# 3. Also run a session to display the loss and accuracy for each display step
# 4. Calculate the test accuracy by running the model on the test dataset
"""
step is the number of times we update the gradient which mean
the number of weight and bias update
"""
with tf.Session() as session:
session.run(init) #execute the initializer
#start training
for step in range(1, step_size+1):
x_batch, y_batch = next_batch(40, data, label) #take the input and true output data from the train batch
train_feed_dict = {X: x_batch, Y:y_batch} #map the placeholder values to the batch values
session.run(training_optimizer, feed_dict=train_feed_dict) #backpropagation
if step % display_progress_step == 0 or step == 1: #display progress if step equal 1 or step is the multiply of step_size
train_loss, train_acc = session.run([loss_function, accuracy], feed_dict=train_feed_dict) #caulculate loss and accuracy of training
print("Step " + str(step) + ", Minibatch Loss = {:.4f}, ".format(train_loss) + "Training accuracy = {:.3f}".format(train_acc)) # print result
#start testing
test_feed_dict = {X: test_data, Y: test_label}
result = session.run(prediction, feed_dict=test_feed_dict) * 100
print("P(liverpool): {}, P(Man United): {}, P(Draw): {}".format(result[0], result[1], result[2]) )
| [
"tensorflow.random_normal",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.asarray",
"h5py.File",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.argmax",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.nn.softmax",
"tens... | [((1625, 1669), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, input_layer]'], {}), "('float', [None, input_layer])\n", (1639, 1669), True, 'import tensorflow as tf\n'), ((1675, 1720), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, output_layer]'], {}), "('float', [None, output_layer])\n", (1689, 1720), True, 'import tensorflow as tf\n'), ((3554, 3587), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3585, 3587), True, 'import tensorflow as tf\n'), ((109, 145), 'h5py.File', 'h5py.File', (['"""/mnt/disk1/bole.h5"""', '"""r"""'], {}), "('/mnt/disk1/bole.h5', 'r')\n", (118, 145), False, 'import h5py\n'), ((742, 764), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (759, 764), True, 'import numpy as np\n'), ((2614, 2635), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (2627, 2635), True, 'import tensorflow as tf\n'), ((2955, 3019), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'Y'}), '(logits=logits, labels=Y)\n', (2994, 3019), True, 'import tensorflow as tf\n'), ((3205, 3225), 'tensorflow.argmax', 'tf.argmax', (['logits', '(1)'], {}), '(logits, 1)\n', (3214, 3225), True, 'import tensorflow as tf\n'), ((3227, 3242), 'tensorflow.argmax', 'tf.argmax', (['Y', '(1)'], {}), '(Y, 1)\n', (3236, 3242), True, 'import tensorflow as tf\n'), ((3361, 3400), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (3368, 3400), True, 'import tensorflow as tf\n'), ((4045, 4057), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4055, 4057), True, 'import tensorflow as tf\n'), ((887, 911), 'numpy.asarray', 'np.asarray', (['data_shuffle'], {}), '(data_shuffle)\n', (897, 911), True, 'import numpy as np\n'), ((913, 939), 'numpy.asarray', 'np.asarray', (['labels_shuffle'], {}), '(labels_shuffle)\n', (923, 939), True, 'import numpy as np\n'), ((1873, 1920), 'tensorflow.random_normal', 'tf.random_normal', (['[input_layer, hidden_layer_1]'], {}), '([input_layer, hidden_layer_1])\n', (1889, 1920), True, 'import tensorflow as tf\n'), ((1942, 1992), 'tensorflow.random_normal', 'tf.random_normal', (['[hidden_layer_1, hidden_layer_2]'], {}), '([hidden_layer_1, hidden_layer_2])\n', (1958, 1992), True, 'import tensorflow as tf\n'), ((2014, 2062), 'tensorflow.random_normal', 'tf.random_normal', (['[hidden_layer_2, output_layer]'], {}), '([hidden_layer_2, output_layer])\n', (2030, 2062), True, 'import tensorflow as tf\n'), ((2148, 2182), 'tensorflow.random_normal', 'tf.random_normal', (['[hidden_layer_1]'], {}), '([hidden_layer_1])\n', (2164, 2182), True, 'import tensorflow as tf\n'), ((2205, 2239), 'tensorflow.random_normal', 'tf.random_normal', (['[hidden_layer_2]'], {}), '([hidden_layer_2])\n', (2221, 2239), True, 'import tensorflow as tf\n'), ((2262, 2294), 'tensorflow.random_normal', 'tf.random_normal', (['[output_layer]'], {}), '([output_layer])\n', (2278, 2294), True, 'import tensorflow as tf\n'), ((2393, 2417), 'tensorflow.matmul', 'tf.matmul', (['x', 'weights[1]'], {}), '(x, weights[1])\n', (2402, 2417), True, 'import tensorflow as tf\n'), ((2451, 2481), 'tensorflow.matmul', 'tf.matmul', (['layer_1', 'weights[2]'], {}), '(layer_1, weights[2])\n', (2460, 2481), True, 'import tensorflow as tf\n'), ((2505, 2535), 'tensorflow.matmul', 'tf.matmul', (['layer_2', 'weights[3]'], {}), '(layer_2, weights[3])\n', (2514, 2535), True, 'import tensorflow as tf\n'), ((3042, 3090), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (3075, 3090), True, 'import tensorflow as tf\n')] |
from __init__ import OUTPUT
from tqdm import tqdm
import pandas as pd
from rdkit import Chem
import os, sys
import numpy as np
import joblib
print("CLASSIFIER HIGH ACTIVE")
df=pd.read_csv(os.path.join(OUTPUT, "data_7.csv"))
smiles = list(df["Smiles"])
ml_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "highpredictor", "results")
from mordred import Calculator, descriptors
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "predictor"))
from descriptors.utils import impute, normalize
IGNORE_3D = False
class Mordred(object):
def __init__(self):
pass
def calc(self, mols):
calc = Calculator(descriptors, ignore_3D=IGNORE_3D)
df = calc.pandas(mols)
return np.array(df, dtype=np.float)
desc = Mordred()
print("Loading transformation data")
with open(os.path.join(ml_folder, "medians.npy"), "rb") as f:
medians = np.load(f)
with open(os.path.join(ml_folder, "mus.npy"), "rb") as f:
mus = np.load(f)
with open(os.path.join(ml_folder, "sigmas.npy"), "rb") as f:
sigmas = np.load(f)
with open(os.path.join(ml_folder, "sel1.npy"), "rb") as f:
sel_idxs = np.load(f)
def chunker(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
print("Loading classifier model")
sel0 = joblib.load(os.path.join(ml_folder, "sel0.pkl"))
mdl = joblib.load(os.path.join(ml_folder, "classifier.pkl"))
yp = []
done = 0
print("Total", len(smiles))
for chunk in chunker(smiles, 1000):
print("Calculating descriptors")
mols = [Chem.MolFromSmiles(smi) for smi in tqdm(chunk)]
X = desc.calc(mols)
print(X.shape)
X = impute(X, medians)
X = normalize(X, mus, sigmas)
print("Predicting")
X = sel0.transform(X)
print(X.shape)
X = X[:,sel_idxs]
print(X.shape)
yp += list(mdl.predict_proba(X)[:,1])
done += len(chunk)
print("Done", done)
yp = np.array(yp)
print(df.shape)
df["HighClassifier"] = yp
print("Not very trustable predictor, only removing lowest quartile")
p25 = np.percentile(yp, 25)
print(p25)
df = df[df["HighClassifier"] > p25]
df.to_csv(os.path.join(OUTPUT, "data_8.csv"), index=False)
| [
"mordred.Calculator",
"tqdm.tqdm",
"os.path.join",
"rdkit.Chem.MolFromSmiles",
"numpy.array",
"descriptors.utils.normalize",
"descriptors.utils.impute",
"os.path.abspath",
"numpy.percentile",
"numpy.load"
] | [((1913, 1925), 'numpy.array', 'np.array', (['yp'], {}), '(yp)\n', (1921, 1925), True, 'import numpy as np\n'), ((2045, 2066), 'numpy.percentile', 'np.percentile', (['yp', '(25)'], {}), '(yp, 25)\n', (2058, 2066), True, 'import numpy as np\n'), ((191, 225), 'os.path.join', 'os.path.join', (['OUTPUT', '"""data_7.csv"""'], {}), "(OUTPUT, 'data_7.csv')\n", (203, 225), False, 'import os, sys\n'), ((925, 935), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (932, 935), True, 'import numpy as np\n'), ((1005, 1015), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1012, 1015), True, 'import numpy as np\n'), ((1091, 1101), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1098, 1101), True, 'import numpy as np\n'), ((1177, 1187), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1184, 1187), True, 'import numpy as np\n'), ((1327, 1362), 'os.path.join', 'os.path.join', (['ml_folder', '"""sel0.pkl"""'], {}), "(ml_folder, 'sel0.pkl')\n", (1339, 1362), False, 'import os, sys\n'), ((1382, 1423), 'os.path.join', 'os.path.join', (['ml_folder', '"""classifier.pkl"""'], {}), "(ml_folder, 'classifier.pkl')\n", (1394, 1423), False, 'import os, sys\n'), ((1655, 1673), 'descriptors.utils.impute', 'impute', (['X', 'medians'], {}), '(X, medians)\n', (1661, 1673), False, 'from descriptors.utils import impute, normalize\n'), ((1682, 1707), 'descriptors.utils.normalize', 'normalize', (['X', 'mus', 'sigmas'], {}), '(X, mus, sigmas)\n', (1691, 1707), False, 'from descriptors.utils import impute, normalize\n'), ((2125, 2159), 'os.path.join', 'os.path.join', (['OUTPUT', '"""data_8.csv"""'], {}), "(OUTPUT, 'data_8.csv')\n", (2137, 2159), False, 'import os, sys\n'), ((298, 323), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (313, 323), False, 'import os, sys\n'), ((672, 716), 'mordred.Calculator', 'Calculator', (['descriptors'], {'ignore_3D': 'IGNORE_3D'}), '(descriptors, ignore_3D=IGNORE_3D)\n', (682, 716), False, 'from mordred import Calculator, descriptors\n'), ((763, 791), 'numpy.array', 'np.array', (['df'], {'dtype': 'np.float'}), '(df, dtype=np.float)\n', (771, 791), True, 'import numpy as np\n'), ((859, 897), 'os.path.join', 'os.path.join', (['ml_folder', '"""medians.npy"""'], {}), "(ml_folder, 'medians.npy')\n", (871, 897), False, 'import os, sys\n'), ((947, 981), 'os.path.join', 'os.path.join', (['ml_folder', '"""mus.npy"""'], {}), "(ml_folder, 'mus.npy')\n", (959, 981), False, 'import os, sys\n'), ((1027, 1064), 'os.path.join', 'os.path.join', (['ml_folder', '"""sigmas.npy"""'], {}), "(ml_folder, 'sigmas.npy')\n", (1039, 1064), False, 'import os, sys\n'), ((1113, 1148), 'os.path.join', 'os.path.join', (['ml_folder', '"""sel1.npy"""'], {}), "(ml_folder, 'sel1.npy')\n", (1125, 1148), False, 'import os, sys\n'), ((1556, 1579), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (1574, 1579), False, 'from rdkit import Chem\n'), ((451, 476), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (466, 476), False, 'import os, sys\n'), ((1591, 1602), 'tqdm.tqdm', 'tqdm', (['chunk'], {}), '(chunk)\n', (1595, 1602), False, 'from tqdm import tqdm\n')] |
from imutils.video import VideoStream
import time
import imutils
import cv2
import tensorflow.keras as k
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import img_to_array
from numpy import expand_dims
import math
import numpy as np
TIME_PER_CAPTURE = 6
start = (20, 30) # start point for text used in putText
font = cv2.FONT_HERSHEY_DUPLEX
fontScale = 0.6
color = (0, 0, 0)
thickness = 1
image_size = (224, 224)
classes = 2
shift = 100
kappa, kappa_s = 7, 0
tic = time.time()
vs = VideoStream(src=0).start()
while True:
toc = time.time()
frame = vs.read()
frame = imutils.resize(frame, width=650, height=650)
frame = cv2.flip(frame, 1)
time_elapsed = round(toc - tic)
if time_elapsed == TIME_PER_CAPTURE:
break
else:
cv2.putText(frame, 'Background picture taken in: ' + str(TIME_PER_CAPTURE - time_elapsed), start, font,
fontScale, color, thickness)
cv2.imshow('Take Background Picture', frame)
cv2.waitKey(1)
cv2.destroyAllWindows()
vs.stop()
background = cv2.resize(frame, image_size)
while True:
cv2.putText(frame, 'Press q to quit', start, font, fontScale, color, thickness)
cv2.imshow('Background', frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cv2.destroyAllWindows()
model = k.models.Sequential([
k.layers.SeparableConv2D(64, (1, 1), activation='relu', input_shape=(224, 224, 3), depth_multiplier=3),
])
output_layer = 0
outputs = [model.layers[output_layer].output]
box_model = Model(inputs=model.inputs, outputs=outputs)
background_img = img_to_array(background)
background_img = expand_dims(background_img, axis=0)
feature_maps = box_model.predict(background_img)
fmap_back_avg = np.zeros(shape=(feature_maps.shape[1], feature_maps.shape[2]))
span = int(math.sqrt(feature_maps.shape[-1]))
for fmap in feature_maps:
i = 1
for _ in range(span):
for _ in range(span):
fmap_back_avg += fmap[:, :, i - 1].squeeze()
i += 1
fmap_back_avg /= (span ** 2)
vs = VideoStream(src=0).start()
sal_flag = False
while True:
frame = vs.read()
frame = imutils.resize(frame, width=650, height=650)
frame = cv2.flip(frame, 1)
input_image = cv2.resize(frame, image_size)
input_image = img_to_array(input_image)
input_image = expand_dims(input_image, axis=0)
feature_maps = box_model.predict(input_image)
fmap_avg = np.zeros(shape=(feature_maps.shape[1], feature_maps.shape[2]))
span = int(math.sqrt(feature_maps.shape[-1]))
for fmap in feature_maps:
i = 1
for _ in range(span):
for _ in range(span):
fmap_avg += fmap[:, :, i - 1].squeeze()
i += 1
fmap_avg /= (span ** 2)
diff = np.round(fmap_back_avg - fmap_avg, 2)
sal_diff = np.round(fmap_back_avg - fmap_avg, 2)
sal_diff[sal_diff <= kappa_s] = 0
sal_diff[sal_diff > kappa_s] = shift
diff[diff <= kappa] = 0
diff[diff > kappa] = shift
startx, endx, y = [], [], []
count = 0
for i in diff:
if max(i) != 0:
y.append(count)
lis = list(i)
startx.append(lis.index(shift))
endx.append(len(lis) - list(reversed(lis)).index(shift) - 1)
count += 1
startx = np.array(startx)
startx = (startx / 223 * 650).astype('int')
endx = np.array(endx)
endx = (endx / 223 * 650).astype('int')
y = np.array(y)
y = (y / 223 * 487).astype('int')
start, end = (0, 0), (0, 0)
if not (len(startx) == 0 or len(endx) == 0 or len(y) == 0):
start = (min(startx), max(min(y), 0))
end = (max(endx), max(y))
cv2.rectangle(frame, start, end, color, thickness + 2)
sal_diff = cv2.resize(sal_diff, (frame.shape[1], frame.shape[0]))
key = cv2.waitKey(1) & 0xFF
if key == ord('c'):
break
elif key == ord('s'):
sal_flag = not sal_flag
if sal_flag:
frame[:, :, 0] = frame[:, :, 0] + sal_diff
cv2.imshow('Press c to capture image, press s to toggle saliency', frame)
cv2.destroyAllWindows()
vs.stop()
cv2.imwrite('Image.jpg', frame)
f = open('annot.txt', 'w+')
f.write(str(start)+str(end))
f.close()
| [
"cv2.rectangle",
"math.sqrt",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"imutils.video.VideoStream",
"tensorflow.keras.models.Model",
"tensorflow.keras.preprocessing.image.img_to_array",
"cv2.waitKey",
"numpy.round",
"cv2.putText",
"cv2.resize",
"time.time",
"cv2.imwrite",
"c... | [((523, 534), 'time.time', 'time.time', ([], {}), '()\n', (532, 534), False, 'import time\n'), ((1060, 1083), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1081, 1083), False, 'import cv2\n'), ((1109, 1138), 'cv2.resize', 'cv2.resize', (['frame', 'image_size'], {}), '(frame, image_size)\n', (1119, 1138), False, 'import cv2\n'), ((1348, 1371), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1369, 1371), False, 'import cv2\n'), ((1594, 1637), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'model.inputs', 'outputs': 'outputs'}), '(inputs=model.inputs, outputs=outputs)\n', (1599, 1637), False, 'from tensorflow.keras.models import Model\n'), ((1656, 1680), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['background'], {}), '(background)\n', (1668, 1680), False, 'from tensorflow.keras.preprocessing.image import img_to_array\n'), ((1699, 1734), 'numpy.expand_dims', 'expand_dims', (['background_img'], {'axis': '(0)'}), '(background_img, axis=0)\n', (1710, 1734), False, 'from numpy import expand_dims\n'), ((1802, 1864), 'numpy.zeros', 'np.zeros', ([], {'shape': '(feature_maps.shape[1], feature_maps.shape[2])'}), '(shape=(feature_maps.shape[1], feature_maps.shape[2]))\n', (1810, 1864), True, 'import numpy as np\n'), ((4187, 4210), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4208, 4210), False, 'import cv2\n'), ((4223, 4254), 'cv2.imwrite', 'cv2.imwrite', (['"""Image.jpg"""', 'frame'], {}), "('Image.jpg', frame)\n", (4234, 4254), False, 'import cv2\n'), ((592, 603), 'time.time', 'time.time', ([], {}), '()\n', (601, 603), False, 'import time\n'), ((640, 684), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(650)', 'height': '(650)'}), '(frame, width=650, height=650)\n', (654, 684), False, 'import imutils\n'), ((698, 716), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (706, 716), False, 'import cv2\n'), ((1044, 1058), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1055, 1058), False, 'import cv2\n'), ((1157, 1236), 'cv2.putText', 'cv2.putText', (['frame', '"""Press q to quit"""', 'start', 'font', 'fontScale', 'color', 'thickness'], {}), "(frame, 'Press q to quit', start, font, fontScale, color, thickness)\n", (1168, 1236), False, 'import cv2\n'), ((1242, 1273), 'cv2.imshow', 'cv2.imshow', (['"""Background"""', 'frame'], {}), "('Background', frame)\n", (1252, 1273), False, 'import cv2\n'), ((1877, 1910), 'math.sqrt', 'math.sqrt', (['feature_maps.shape[-1]'], {}), '(feature_maps.shape[-1])\n', (1886, 1910), False, 'import math\n'), ((2216, 2260), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(650)', 'height': '(650)'}), '(frame, width=650, height=650)\n', (2230, 2260), False, 'import imutils\n'), ((2274, 2292), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (2282, 2292), False, 'import cv2\n'), ((2312, 2341), 'cv2.resize', 'cv2.resize', (['frame', 'image_size'], {}), '(frame, image_size)\n', (2322, 2341), False, 'import cv2\n'), ((2361, 2386), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['input_image'], {}), '(input_image)\n', (2373, 2386), False, 'from tensorflow.keras.preprocessing.image import img_to_array\n'), ((2406, 2438), 'numpy.expand_dims', 'expand_dims', (['input_image'], {'axis': '(0)'}), '(input_image, axis=0)\n', (2417, 2438), False, 'from numpy import expand_dims\n'), ((2506, 2568), 'numpy.zeros', 'np.zeros', ([], {'shape': '(feature_maps.shape[1], feature_maps.shape[2])'}), '(shape=(feature_maps.shape[1], feature_maps.shape[2]))\n', (2514, 2568), True, 'import numpy as np\n'), ((2854, 2891), 'numpy.round', 'np.round', (['(fmap_back_avg - fmap_avg)', '(2)'], {}), '(fmap_back_avg - fmap_avg, 2)\n', (2862, 2891), True, 'import numpy as np\n'), ((2908, 2945), 'numpy.round', 'np.round', (['(fmap_back_avg - fmap_avg)', '(2)'], {}), '(fmap_back_avg - fmap_avg, 2)\n', (2916, 2945), True, 'import numpy as np\n'), ((3391, 3407), 'numpy.array', 'np.array', (['startx'], {}), '(startx)\n', (3399, 3407), True, 'import numpy as np\n'), ((3469, 3483), 'numpy.array', 'np.array', (['endx'], {}), '(endx)\n', (3477, 3483), True, 'import numpy as np\n'), ((3538, 3549), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3546, 3549), True, 'import numpy as np\n'), ((3849, 3903), 'cv2.resize', 'cv2.resize', (['sal_diff', '(frame.shape[1], frame.shape[0])'], {}), '(sal_diff, (frame.shape[1], frame.shape[0]))\n', (3859, 3903), False, 'import cv2\n'), ((4112, 4185), 'cv2.imshow', 'cv2.imshow', (['"""Press c to capture image, press s to toggle saliency"""', 'frame'], {}), "('Press c to capture image, press s to toggle saliency', frame)\n", (4122, 4185), False, 'import cv2\n'), ((541, 559), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)'}), '(src=0)\n', (552, 559), False, 'from imutils.video import VideoStream\n'), ((994, 1038), 'cv2.imshow', 'cv2.imshow', (['"""Take Background Picture"""', 'frame'], {}), "('Take Background Picture', frame)\n", (1004, 1038), False, 'import cv2\n'), ((1285, 1299), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1296, 1299), False, 'import cv2\n'), ((1408, 1515), 'tensorflow.keras.layers.SeparableConv2D', 'k.layers.SeparableConv2D', (['(64)', '(1, 1)'], {'activation': '"""relu"""', 'input_shape': '(224, 224, 3)', 'depth_multiplier': '(3)'}), "(64, (1, 1), activation='relu', input_shape=(224, \n 224, 3), depth_multiplier=3)\n", (1432, 1515), True, 'import tensorflow.keras as k\n'), ((2122, 2140), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)'}), '(src=0)\n', (2133, 2140), False, 'from imutils.video import VideoStream\n'), ((2585, 2618), 'math.sqrt', 'math.sqrt', (['feature_maps.shape[-1]'], {}), '(feature_maps.shape[-1])\n', (2594, 2618), False, 'import math\n'), ((3778, 3832), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'start', 'end', 'color', '(thickness + 2)'], {}), '(frame, start, end, color, thickness + 2)\n', (3791, 3832), False, 'import cv2\n'), ((3915, 3929), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3926, 3929), False, 'import cv2\n')] |
import onnx
import numpy as np
from builder import *
_camelName = {
np.int8: 'Int8',
np.uint8: 'UInt8',
np.int16: 'Int16',
np.uint16: 'UInt16',
np.int32: 'Int32',
np.int64: 'Int64'
}
def camel_name(typ):
return _camelName[typ]
def convert_tests():
input = Placeholder()
for typ in (np.int8, np.uint8, np.int16, np.uint16):
exporter = Exporter()
exporter.add_graph_input('data',
input,
shape=[1, 2, 2, 2],
elt_type=typ)
exporter.add_graph_output('output', Cast(input, np.float32))
md = exporter.export(f'{typ.__name__}_to_float')
fname = f'cast{camel_name(typ)}ToFloat'
with open(f'{fname}.onnxtxt', 'w') as f:
f.write(str(md))
onnx.save(md, f'{fname}.onnx')
def onehot_tests():
indices = Placeholder(shape=[2,3,5])
values = Constant(np.asarray([0, 1], dtype=np.float32))
depth = Constant(np.asarray(4, dtype=np.int64))
exporter = Exporter()
exporter.add_graph_input('indices', indices)
exporter.add_graph_output('a', OneHot(indices, depth, values))
exporter.add_graph_output('a0', OneHot(indices, depth, values, 0))
exporter.add_graph_output('a1', OneHot(indices, depth, values, 1))
exporter.add_graph_output('a2', OneHot(indices, depth, values, 2))
md = exporter.export('OneHot Test')
fname = 'oneHot'
with open(f'{fname}.onnxtxt', 'w') as f:
f.write(str(md))
onnx.save(md, f'{fname}.onnx')
def resize_test():
X = Placeholder()
Y = Resize(X,
scales=Constant(np.asarray([1, 1, 2, 2], dtype=np.float32)), coordinate_transformation_mode="half_pixel",
mode="nearest", nearest_mode="round_prefer_ceil")
b = Exporter()
b.add_graph_input('in', X, [1, 1, 2, 2])
b.add_graph_output('Y', Y)
md = b.export('Resize test')
fname = 'resizeHalfPixelNearestCeil'
with open(f'{fname}.onnxtxt', 'w') as f:
f.write(str(md))
onnx.save(md, f'{fname}.onnx')
def run():
resize_test()
# onehot_tests()
# convert_tests()
if __name__ == "__main__":
run()
| [
"onnx.save",
"numpy.asarray"
] | [((1531, 1561), 'onnx.save', 'onnx.save', (['md', 'f"""{fname}.onnx"""'], {}), "(md, f'{fname}.onnx')\n", (1540, 1561), False, 'import onnx\n'), ((2052, 2082), 'onnx.save', 'onnx.save', (['md', 'f"""{fname}.onnx"""'], {}), "(md, f'{fname}.onnx')\n", (2061, 2082), False, 'import onnx\n'), ((835, 865), 'onnx.save', 'onnx.save', (['md', 'f"""{fname}.onnx"""'], {}), "(md, f'{fname}.onnx')\n", (844, 865), False, 'import onnx\n'), ((950, 986), 'numpy.asarray', 'np.asarray', (['[0, 1]'], {'dtype': 'np.float32'}), '([0, 1], dtype=np.float32)\n', (960, 986), True, 'import numpy as np\n'), ((1009, 1038), 'numpy.asarray', 'np.asarray', (['(4)'], {'dtype': 'np.int64'}), '(4, dtype=np.int64)\n', (1019, 1038), True, 'import numpy as np\n'), ((1654, 1696), 'numpy.asarray', 'np.asarray', (['[1, 1, 2, 2]'], {'dtype': 'np.float32'}), '([1, 1, 2, 2], dtype=np.float32)\n', (1664, 1696), True, 'import numpy as np\n')] |
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import requests
import json
import numpy as np
import pandas as pd
import bs4 as bs
import pickle
import requests
from sklearn.metrics import f1_score
from datetime import datetime
from datetime import timedelta
from api import TIMEDELTA_QUARTER
from api import TIMEDELTA_MONTH
from api import TIMEDELTA_YEAR
from api.tickers import get_sp500_tickers
from model.benchmark_helper import get_stock_label_func
from model.benchmark_helper import get_sp500_gain_for_interval
def confusion_matrix(y_predictions,y_labels,output=False):
""" Calculates a confusion matrix given predictions and labels
Args:
y_predictions: Predictions from a model (iterable)
y_labels: Labels from a model (iterable)
output: True to print the confusion matrix
Returns:
The confusion matrix (2D numpy array) associated with predictions and labels
Format of the matrix is [ [TP FP], [TN, FN] ].
"""
y_predictions = np.array(y_predictions)
y_labels = np.array(y_labels)
true_pos = np.logical_and(y_predictions, y_labels).sum()
false_pos = np.logical_and(y_predictions, 1-y_labels).sum()
true_neg = np.logical_and(1-y_predictions, 1-y_labels).sum()
false_neg = np.logical_and(1-y_predictions, y_labels).sum()
if output:
print( ' %10s' % 'True' + '%10s' % 'False')
print(f'Positives: %10s' % f'{true_pos}' + '%10s' % f'{false_pos}')
print(f'Negatives: %10s' % f'{true_neg}' + '%10s' % f'{false_neg}')
print('')
return np.array([[true_pos, false_pos],
[true_neg,false_neg]])
def binary_measures(confusion_matrix, output=False):
""" Calculates statistical measures from a confusion matrix
Args:
confusion_matrix: A 2D array of the form [ [TP FP], [TN, FN] ]
output: True to print statistical measures
Returns:
A tuple consisting of the F1 score, precision and recall associated with the confusion matrix
"""
true_pos, false_pos = confusion_matrix[0][0], confusion_matrix[0][1]
true_neg, false_neg = confusion_matrix[1][0], confusion_matrix[1][1]
if true_pos == 0 and false_pos == 0:
precision = 0
else:
precision = round(true_pos / (true_pos + false_pos),2)
if true_pos == 0 and false_neg == 0:
recall = 0
else:
recall = round(true_pos / (true_pos + false_neg),2)
if precision == 0 and recall == 0:
f1 = 0
else:
f1 = 2 * precision * recall / (precision + recall)
if output:
print(f'F1 score: {round(f1,2)}')
print(f'Precision: {round(precision,2)}')
print(f'Recall: {round(recall,2)}')
return f1, precision, recall
def get_analyst_confusion_matrix_for_quarter(quarter_offset=1,output=False):
"""Returns confusion matrix associated with historical predictions
Args:
quarter_offset: The offset relative to today (must be historical, therefore >= 1)
Returns:
1 for overperform, 0 for underperform)
"""
if output:
print("Collecting analyst ratings...")
get_stock_label = get_stock_label_func(TIMEDELTA_QUARTER,timedelta())
prediction_dict = get_analyst_rating_for_quarter(quarter_offset)
predictions = list()
labels = list()
if output:
print(f"Calculating actual performance for {len(prediction_dict)} tickers: ")
for ticker in prediction_dict.keys():
if output:
print(ticker,end=' ')
try:
labels.append(get_stock_label(ticker))
predictions.append(prediction_dict[ticker])
except:
continue;
return confusion_matrix(predictions,labels,output)
def get_analyst_rating_for_quarter(quarter_offset=0):
"""Returns analyst predictions for last quarter
Args:
quarter_offset: The offset relative to today (0 corresponds to latest analyst rating)
Returns:
1 for overperform, 0 for underperform)
"""
def scrape_historical_ratings_for_ticker(symbol):
try:
response = requests.get(f'https://www.marketbeat.com/stocks/NASDAQ/{symbol}/price-target/')
soup = bs.BeautifulSoup(response.text, 'html.parser')
table = soup.find('table', {'class': 'scroll-table'})
ratings_df = pd.read_html(str(table))[0]
if ratings_df.shape != (5,5):
raise LookupError
except:
raise LookupError
return ratings_df
sp500_tickers = get_sp500_tickers();
if quarter_offset == 0:
column = 'Today'
elif quarter_offset == 1:
column = '90 Days Ago'
elif quarter_offset == 2:
column = '180 Days Ago'
else:
raise LookupError("Data unavailable for the requested quarter")
predictions = dict()
for ticker in sp500_tickers:
try:
analyst_prediction_df = scrape_historical_ratings_for_ticker(ticker)
# Scoring system we are using (consistent with Yahoo Finance) is 1-5 with:
# 1 = strong outperform (strong buy) and 5 = strong underperform (strong sell)
# MarketBeat's scoring is shifted by one and inverted (e.g., 0-4 with 0 = strong sell)
# For this reason, we will subtract the score from 5 to convert to 1-5 scale
analyst_score = 5.0 - float(analyst_prediction_df[column][1])
predictions[ticker] = 1 if analyst_score < 3 else 0
except:
continue
return predictions | [
"api.tickers.get_sp500_tickers",
"numpy.logical_and",
"requests.get",
"bs4.BeautifulSoup",
"numpy.array",
"datetime.timedelta"
] | [((2058, 2081), 'numpy.array', 'np.array', (['y_predictions'], {}), '(y_predictions)\n', (2066, 2081), True, 'import numpy as np\n'), ((2097, 2115), 'numpy.array', 'np.array', (['y_labels'], {}), '(y_labels)\n', (2105, 2115), True, 'import numpy as np\n'), ((2638, 2694), 'numpy.array', 'np.array', (['[[true_pos, false_pos], [true_neg, false_neg]]'], {}), '([[true_pos, false_pos], [true_neg, false_neg]])\n', (2646, 2694), True, 'import numpy as np\n'), ((5671, 5690), 'api.tickers.get_sp500_tickers', 'get_sp500_tickers', ([], {}), '()\n', (5688, 5690), False, 'from api.tickers import get_sp500_tickers\n'), ((4276, 4287), 'datetime.timedelta', 'timedelta', ([], {}), '()\n', (4285, 4287), False, 'from datetime import timedelta\n'), ((2133, 2172), 'numpy.logical_and', 'np.logical_and', (['y_predictions', 'y_labels'], {}), '(y_predictions, y_labels)\n', (2147, 2172), True, 'import numpy as np\n'), ((2195, 2238), 'numpy.logical_and', 'np.logical_and', (['y_predictions', '(1 - y_labels)'], {}), '(y_predictions, 1 - y_labels)\n', (2209, 2238), True, 'import numpy as np\n'), ((2259, 2306), 'numpy.logical_and', 'np.logical_and', (['(1 - y_predictions)', '(1 - y_labels)'], {}), '(1 - y_predictions, 1 - y_labels)\n', (2273, 2306), True, 'import numpy as np\n'), ((2325, 2368), 'numpy.logical_and', 'np.logical_and', (['(1 - y_predictions)', 'y_labels'], {}), '(1 - y_predictions, y_labels)\n', (2339, 2368), True, 'import numpy as np\n'), ((5205, 5290), 'requests.get', 'requests.get', (['f"""https://www.marketbeat.com/stocks/NASDAQ/{symbol}/price-target/"""'], {}), "(f'https://www.marketbeat.com/stocks/NASDAQ/{symbol}/price-target/'\n )\n", (5217, 5290), False, 'import requests\n'), ((5305, 5351), 'bs4.BeautifulSoup', 'bs.BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (5321, 5351), True, 'import bs4 as bs\n')] |
import numpy as np
import unittest
from locality.helpers.rank import rank
class Rank_Tests(uniittest.TestCase):
def main_test(self):
q = 5
n = 10
each = np.random.randint(2, 10, num_points)
indptr = np.empty(num_points + 1, dtype=np.int)
indptr[0] = 0
indptr[1:] = np.cumsum(each)
dist = np.random.random(indptr[-1])
indices = np.random.randint(0, 100, indptr[-1])
confirm = np.empty(q * n, dtype=np.int)
j = 0
for i in range(n):
a, b = indptr[i], indptr[i + 1]
sort = np.argsort(dist[a:b])
confirm[j:j + q] = indices[a:b][sort[:q]]
j += q
test = rank(q, n, dist, indices, indptr)
self.assertEqual(confirm, test) | [
"numpy.random.random",
"numpy.argsort",
"numpy.random.randint",
"numpy.empty",
"numpy.cumsum",
"locality.helpers.rank.rank"
] | [((184, 220), 'numpy.random.randint', 'np.random.randint', (['(2)', '(10)', 'num_points'], {}), '(2, 10, num_points)\n', (201, 220), True, 'import numpy as np\n'), ((238, 276), 'numpy.empty', 'np.empty', (['(num_points + 1)'], {'dtype': 'np.int'}), '(num_points + 1, dtype=np.int)\n', (246, 276), True, 'import numpy as np\n'), ((320, 335), 'numpy.cumsum', 'np.cumsum', (['each'], {}), '(each)\n', (329, 335), True, 'import numpy as np\n'), ((351, 379), 'numpy.random.random', 'np.random.random', (['indptr[-1]'], {}), '(indptr[-1])\n', (367, 379), True, 'import numpy as np\n'), ((398, 435), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', 'indptr[-1]'], {}), '(0, 100, indptr[-1])\n', (415, 435), True, 'import numpy as np\n'), ((454, 483), 'numpy.empty', 'np.empty', (['(q * n)'], {'dtype': 'np.int'}), '(q * n, dtype=np.int)\n', (462, 483), True, 'import numpy as np\n'), ((698, 731), 'locality.helpers.rank.rank', 'rank', (['q', 'n', 'dist', 'indices', 'indptr'], {}), '(q, n, dist, indices, indptr)\n', (702, 731), False, 'from locality.helpers.rank import rank\n'), ((588, 609), 'numpy.argsort', 'np.argsort', (['dist[a:b]'], {}), '(dist[a:b])\n', (598, 609), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 5 23:37:07 2019
@author: manuel
"""
# Exercise 12
# Given some 2-dimensional dataset like data1.csv, data2.csv or data3.csv,
# implement and apply k-means hard clustering with k = 2 and k = 3.
# Use the Euclidean distance as dissimilarity metric.
# At each training iteration of the algorithm, compute the quantization error
# and plot data points and centroids with a different color for each cluster.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
def euclidean_distance(s1,s2):
"""
Compute the Euclidean distance between two n-dimensional objects.
"""
tmpsum = 0
for index,value in enumerate(s1):
tmpsum += (s1[index]-s2[index])**2
return math.sqrt(tmpsum)
# read data
df = pd.read_csv("data3.csv")
# plot data
plt.plot(df["x"], df["y"], 'o')
plt.title("Data")
plt.grid(True)
plt.show()
# set k to be used for k-means clustering
k = 3
training_budget = 100
# init variables
centroids = [] # centroids coordinates
previous_centroids = [] # centroids of previous training iteration
iteration = 1 # training iteration
# get random indexes between 1 and # of data points to pick initial centroids
np.random.seed(2)
centroids_init_index = np.random.randint(1, len(df), k)
for i in range(0,k):
# define initial centroids from training data using the random indexes
centroids.append(df.iloc[centroids_init_index[i],:])
# initialize variables to consider centroids of the previous training iteration
previous_centroids.append(pd.Series([0,0]))
# training loop
while iteration < training_budget:
# loop until the positions of the centroids do not change anymore (2 dimensions)
sum_distances = 0
for i in range(0,k):
sum_distances = sum_distances + euclidean_distance(previous_centroids[i],centroids[i])
if(sum_distances == 0):
break
# init varibles
centroids_lists = [] # list of dataframes of points assigned to each centroid
centroids_lists_distances = [] # list of dataframes of distances wrt to each centroid
for i in range(0,k):
# define an empty dataframe for the points assigned to each centroid
centroids_lists.append(pd.DataFrame())
# define an empty dataframe for the distances between each point
# of a cluster and the respective centroid
centroids_lists_distances.append(pd.DataFrame())
# iterate over all rows in the dataframe (data point), to find nearest centroid
for index in range(df.shape[0]):
# get all columns of a row
point = df.iloc[index,:]
# init variables
minimum_distance = 100 # min distance found between current point and nearest centroid
nearest_centroid_index = -1 # index of the nearest centroid wrt current point
# given a data point, compute the distance wrt each centroid and find the nearest centroid
for centroid_index,centroid in enumerate(centroids):
distance = euclidean_distance(point, centroid)
if(distance < minimum_distance):
minimum_distance = distance
nearest_centroid_index = centroid_index
# append data point according to the index of its nearest centroid
centroids_lists[nearest_centroid_index] = centroids_lists[nearest_centroid_index].append(point)
# append minimum distance according to the index of nearest centroid of current point
# (to compute quantization error)
centroids_lists_distances[nearest_centroid_index] = centroids_lists_distances[nearest_centroid_index].append(pd.Series([minimum_distance]), ignore_index=True)
# compute quantization error
tmp_sum = 0
for i in range(0,k):
# square all minimum distances wrt the centroid for one cluster
tmp = centroids_lists_distances[i].iloc[:,0]**2
tmp_sum = tmp_sum + tmp.sum()
qe = tmp_sum / k
# print current clusters and centroids
for i in range(0,k):
# print cluster
plt.plot(centroids_lists[i].iloc[:,0], centroids_lists[i].iloc[:,1], 'o')
# print centroid
plt.plot(centroids[i].iloc[0], centroids[i].iloc[1],'o')
plt.title("Iteration " + str(iteration) + ": QE = " + "{0:.2f}".format(qe))
plt.grid(True)
plt.show()
# compute new centroid coordinates as mean of the points in each cluster
previous_centroids = centroids
centroids = []
for i in range(0,k):
centroids.append(centroids_lists[i].mean())
iteration = iteration + 1 | [
"pandas.Series",
"matplotlib.pyplot.grid",
"pandas.read_csv",
"matplotlib.pyplot.plot",
"math.sqrt",
"numpy.random.seed",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((838, 862), 'pandas.read_csv', 'pd.read_csv', (['"""data3.csv"""'], {}), "('data3.csv')\n", (849, 862), True, 'import pandas as pd\n'), ((878, 909), 'matplotlib.pyplot.plot', 'plt.plot', (["df['x']", "df['y']", '"""o"""'], {}), "(df['x'], df['y'], 'o')\n", (886, 909), True, 'import matplotlib.pyplot as plt\n'), ((910, 927), 'matplotlib.pyplot.title', 'plt.title', (['"""Data"""'], {}), "('Data')\n", (919, 927), True, 'import matplotlib.pyplot as plt\n'), ((928, 942), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (936, 942), True, 'import matplotlib.pyplot as plt\n'), ((943, 953), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (951, 953), True, 'import matplotlib.pyplot as plt\n'), ((1282, 1299), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (1296, 1299), True, 'import numpy as np\n'), ((801, 818), 'math.sqrt', 'math.sqrt', (['tmpsum'], {}), '(tmpsum)\n', (810, 818), False, 'import math\n'), ((4539, 4553), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4547, 4553), True, 'import matplotlib.pyplot as plt\n'), ((4558, 4568), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4566, 4568), True, 'import matplotlib.pyplot as plt\n'), ((1640, 1657), 'pandas.Series', 'pd.Series', (['[0, 0]'], {}), '([0, 0])\n', (1649, 1657), True, 'import pandas as pd\n'), ((4275, 4350), 'matplotlib.pyplot.plot', 'plt.plot', (['centroids_lists[i].iloc[:, 0]', 'centroids_lists[i].iloc[:, 1]', '"""o"""'], {}), "(centroids_lists[i].iloc[:, 0], centroids_lists[i].iloc[:, 1], 'o')\n", (4283, 4350), True, 'import matplotlib.pyplot as plt\n'), ((4390, 4447), 'matplotlib.pyplot.plot', 'plt.plot', (['centroids[i].iloc[0]', 'centroids[i].iloc[1]', '"""o"""'], {}), "(centroids[i].iloc[0], centroids[i].iloc[1], 'o')\n", (4398, 4447), True, 'import matplotlib.pyplot as plt\n'), ((2348, 2362), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2360, 2362), True, 'import pandas as pd\n'), ((2537, 2551), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2549, 2551), True, 'import pandas as pd\n'), ((3833, 3862), 'pandas.Series', 'pd.Series', (['[minimum_distance]'], {}), '([minimum_distance])\n', (3842, 3862), True, 'import pandas as pd\n')] |
import protodata.data_ops as do
from protodata.utils import save_pickle, create_dir, download_file
from protodata.image_ops import ImageCoder, process_image
import threading
from datetime import datetime
import scipy.misc
import tempfile
import os
from abc import ABCMeta
import abc
import numpy as np
import tensorflow as tf
import logging
logger = logging.getLogger(__name__)
""" File containing code to generate TFRecords from datasets.
This records are protobuf serialization versions of the data
and are the recommended standard for Tensorflow
Inspired from goo.gl/fZyRit """
class DataSerializer(object):
""" Serializes a dataset into TFRecords """
def __init__(self, serialize_settings):
if not isinstance(serialize_settings, SerializeSettings):
raise TypeError('Attribute must be subclass of SerializeSettings')
self.settings = serialize_settings
def serialize(self,
output_folder,
train_ratio,
val_ratio,
num_threads,
train_shards,
val_shards,
test_shards):
""" Serializes the data into a Tensorflow recommended
Example proto format
Args:
output_folder: Output folder for the record files.
train_ratio: Ratio of instances in the training data.
If original dataset already split, this is not used.
val_ratio: Ratio of instances in the validation data.
num_threads: Threads to use.
train_shards: Number of files the training set will be split in.
Must be divisible by the number of threads.
val_shards: Number of slices the validation set will be split in.
Must be divisible by the number of threads.
test_shards: Number of slices the testing set will be split in.
Must be divisible by the number of threads.
"""
logger.info("Trying to create dataset into %s" % output_folder)
if train_ratio > 1.0 or train_ratio < 0.0:
raise ValueError('Training ratio must be in interval [0, 1]')
if val_ratio > 1.0 or val_ratio < 0.0:
raise ValueError('Validation ratio must be in interval [0, 1]')
if train_ratio + val_ratio >= 1.0:
raise ValueError('Training and validation ratio exceed 1')
if os.path.exists(output_folder):
raise ValueError('Dataset already exists!')
create_dir(output_folder)
# Read dataset
self.settings.initialize()
# Split according to validation preferences
logger.info('Splitting into training and validation')
train, val, test = \
self.settings.get_validation_indices(train_ratio, val_ratio)
# Create training files
self._store_dataset(train,
output_folder,
train_shards,
num_threads,
do.DataMode.TRAINING)
# Create validation files
self._store_dataset(val,
output_folder,
val_shards,
num_threads,
do.DataMode.VALIDATION)
# Create test files
self._store_dataset(test,
output_folder,
test_shards,
num_threads,
do.DataMode.TEST)
# Store settings
self._store_options(
output_folder,
n_training_instances=len(train),
train_ratio=train_ratio,
val_ratio=val_ratio
)
# Free resources, if any
self.settings.finalize()
def serialize_folds(self,
output_folder,
train_ratio,
n_folds,
num_threads,
test_shards,
files_per_fold=1):
""" Serializes the data into a Tensorflow recommended
Example proto format using N folds. Each fold has its own
folder with a certain amount of files.
Args:
output_folder: Output folder for the record files.
train_ratio: Ratio of instances in the training set. The rest
will be included in the test set.
n_folds: Ratio of instances in the training data.
If original dataset already split, this is not used.
num_threads: Number of threads to use.
test_shards: Number of files to use for testing.
files_per_fold: Number of files for each training fold.
"""
logger.info("Trying to create folded dataset into %s" % output_folder)
if os.path.exists(output_folder):
raise ValueError('Dataset already exists!')
create_dir(output_folder)
# Read dataset
self.settings.initialize()
# Split between training and test
train, _, test = \
self.settings.get_validation_indices(train_ratio, 0.0)
# Split training into folds
idx_per_fold = np.array_split(train, n_folds)
for fold in range(n_folds):
self._store_dataset(idx_per_fold[fold],
output_folder,
files_per_fold,
num_threads,
'training_fold_%d' % fold)
# Save test dataset
self._store_dataset(test,
output_folder,
test_shards,
num_threads,
do.DataMode.TEST)
# Store settings
fold_size = int(len(train)/n_folds)
self._store_options(
output_folder,
train_ratio=train_ratio,
fold_size=fold_size,
n_training_instances=fold_size*(n_folds-1),
n_folds=n_folds
)
# Free resources, if any
self.settings.finalize()
def _store_options(self, output, **params):
""" Stores serialization options in a 'metadata.dat' file
in the output directory """
# Basic info for all datasets
options = {'columns': self.settings.define_columns()}
options.update(params)
# We can add dataset-specific data
extra_options = self.settings.get_options()
if extra_options is not None:
options.update(extra_options)
# Save into pickle file
save_pickle(os.path.join(output, 'metadata.dat'), options)
def _store_dataset(self, indices_set, folder, num_shards,
num_threads, tag):
""" Stores the subset selected by the row identifiers
indices from the dataset using several threads.
Args:
indices_set: Set of row instances to store.
folder: Output folder.
num_shards: Number of slices dataset will be split.
num_threads: Threads to use.
tag: Dataset tag name.
"""
# Break indices into groups such as [ranges[i][0], ranges[i][1]]
spacing = np.linspace(0,
len(indices_set),
num_threads + 1).astype(np.int)
# Each interval is performed by an independent thread
ranges = [[spacing[i], spacing[i + 1]]
for i in range(len(spacing) - 1)]
# Number of data slices must be multiple of the number
# of threads for easiness
assert not num_shards % num_threads
logger.info('Launching %d threads for spacings: %s'
% (num_threads, ranges))
# Coordinator monitors threads
coord = tf.train.Coordinator()
# Create threads
threads = []
for thread_index in range(len(ranges)):
args = (coord, folder, tag, indices_set, thread_index,
ranges, num_shards, num_threads)
threads.append(threading.Thread(target=self._process_batch,
args=args))
# Start threads
for t in threads:
t.start()
# Wait for threads to end
coord.join(threads)
logger.info('Finished writing dataset %s (%d)'
% (tag, len(indices_set)))
def _process_batch(self, coord, output_folder, name_tag, indices,
thread_index, ranges, num_shards, num_threads):
""" The given thread saves batches of instances that correspond to its
assigned range of rows in the dataset.
Args:
coord: Thread coordinator
output_folder: Folder where to output the current dataset.
name_tag: Tag of the dataset for visualization.
indices: Total set of dataset indices to store
(shared by all threads).
thread_index: Thread identifier for this batch.
ranges: List of pairs of integers specifying ranges
instances to be processed in parallel (shared by all threads).
num_shards: Number of slices the dataset will be split among all
the threads.
num_threads: Total number of threads.
"""
def add_example(writer, example, shard_counter, counter):
""" Dumps example into the TFRecord file """
writer.write(example.SerializeToString())
# increase counter
shard_counter += 1
counter += 1
return shard_counter, counter
# Each thread produces N shards, N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2,
# then the first thread would produce shards [0, 64).
num_shards_per_thread = int(num_shards / num_threads)
# Instances involving each shard
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_thread + 1).astype(int)
# Iterate through shards of current thread
counter, s = 0, 0
while s < num_shards_per_thread and not coord.should_stop():
# Generate a sharded version of the file name,
# e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_thread + s
output_filename = do.get_filename(name_tag, shard + 1, num_shards)
output_file = os.path.join(output_folder, output_filename)
# Create writer file
writer = tf.python_io.TFRecordWriter(output_file)
logger.debug('%s [thread %d]: Creating file %s \n' %
(datetime.now(), thread_index, output_file))
# Select num of instances to be in the current shard file
shard_counter = 0
insts_per_shard = np.arange(shard_ranges[s],
shard_ranges[s + 1],
dtype=int)
# Each shard iterates over a subset of the instances
for i in insts_per_shard:
# Get set of examples for current index and store them
index = indices[i]
examples = self.settings.build_examples(index)
for ex in examples:
shard_counter, counter = \
add_example(writer, ex, shard_counter, counter)
# Close file writer for current shard
writer.close()
# Increase shard counter
s += 1
logger.debug(
'%s [thread %d]: Wrote %d instances to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
# Summarize batched output
logger.debug(
'%s [thread %d]: Wrote %d instances to %d shards.' %
(datetime.now(), thread_index, counter, num_shards_per_thread))
class SerializeSettings(object):
__metaclass__ = ABCMeta
""" Class that provides dataset-specific helpers for serialization """
def __init__(self, data_path):
""" Args:
data_path: Path where to read data from
"""
self.data_path = data_path
self.coder = None
def initialize(self):
""" Initializes the settings. Resources opened here must be
closed in finalize """
self.coder = ImageCoder()
self.read()
def process_image(self, img):
"""
Args:
img: Image path
Returns:
decoded_image: Decoded image content
height: height of the image
width: Width of the image
"""
return process_image(img, self.coder)
def process_image_bytes(self, img):
"""
Args:
img: Ndarray of the image content
Returns:
Encoded image
"""
# Rough but working solution: map matrix into temporary file
fd, file = tempfile.mkstemp(suffix='.jpeg')
scipy.misc.imsave(file, img)
decoded, _, _ = process_image(file, self.coder)
# Clean temporary file
os.close(fd)
os.remove(file)
return decoded
def image_from_url(self, url, jpeg=True):
""" Loads an image into Tensorflow given a valid url
Args:
url: Link to the image (must be valid or will raise an error)
jpeg: Whether to download image as JPEG (True) or PNG (False)
Returns:
decoded_image: Decoded content of the image in Tensorflow
height: Vertical size
width: Horizontal size
"""
# Download image into temporary file
suffix = '.jpeg' if jpeg else '.png'
fd, tmp_path = tempfile.mkstemp(suffix=suffix)
download_file(url, tmp_path)
# Add image plus additional image information
decoded_image, height, width = self.process_image(tmp_path)
# Free temporary resources
os.close(fd)
os.remove(tmp_path)
return decoded_image, height, width
@abc.abstractmethod
def read(self):
""" Reads the dataset so it is ready for being serialized """
@abc.abstractmethod
def get_validation_indices(self, train_ratio, val_ratio):
""" Returns the data indices corresponding to training,
validation and testing
Returns
train, val, test: training and validation index sets
"""
@abc.abstractmethod
def define_columns(self):
""" Returns a list of mapped columns to be stored/read into
ExampleColumn subclasses """
@abc.abstractmethod
def get_options(self):
""" Returns a dictionary of additional serialization options
to be stored. Set to None for no extra settings """
@abc.abstractmethod
def build_examples(self, index):
""" Builds TFExamples from the instance with the given index
in the dataset.
Returns
examples: List of examples built from the given row index
"""
def finalize(self):
self.coder.finalize()
| [
"logging.getLogger",
"os.path.exists",
"protodata.utils.download_file",
"protodata.image_ops.process_image",
"tensorflow.train.Coordinator",
"os.close",
"protodata.image_ops.ImageCoder",
"os.path.join",
"protodata.data_ops.get_filename",
"numpy.array_split",
"protodata.utils.create_dir",
"nump... | [((352, 379), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (369, 379), False, 'import logging\n'), ((2434, 2463), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (2448, 2463), False, 'import os\n'), ((2530, 2555), 'protodata.utils.create_dir', 'create_dir', (['output_folder'], {}), '(output_folder)\n', (2540, 2555), False, 'from protodata.utils import save_pickle, create_dir, download_file\n'), ((4874, 4903), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (4888, 4903), False, 'import os\n'), ((4970, 4995), 'protodata.utils.create_dir', 'create_dir', (['output_folder'], {}), '(output_folder)\n', (4980, 4995), False, 'from protodata.utils import save_pickle, create_dir, download_file\n'), ((5252, 5282), 'numpy.array_split', 'np.array_split', (['train', 'n_folds'], {}), '(train, n_folds)\n', (5266, 5282), True, 'import numpy as np\n'), ((7880, 7902), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (7900, 7902), True, 'import tensorflow as tf\n'), ((12580, 12592), 'protodata.image_ops.ImageCoder', 'ImageCoder', ([], {}), '()\n', (12590, 12592), False, 'from protodata.image_ops import ImageCoder, process_image\n'), ((12877, 12907), 'protodata.image_ops.process_image', 'process_image', (['img', 'self.coder'], {}), '(img, self.coder)\n', (12890, 12907), False, 'from protodata.image_ops import ImageCoder, process_image\n'), ((13168, 13200), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".jpeg"""'}), "(suffix='.jpeg')\n", (13184, 13200), False, 'import tempfile\n'), ((13262, 13293), 'protodata.image_ops.process_image', 'process_image', (['file', 'self.coder'], {}), '(file, self.coder)\n', (13275, 13293), False, 'from protodata.image_ops import ImageCoder, process_image\n'), ((13333, 13345), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (13341, 13345), False, 'import os\n'), ((13354, 13369), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (13363, 13369), False, 'import os\n'), ((13944, 13975), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': 'suffix'}), '(suffix=suffix)\n', (13960, 13975), False, 'import tempfile\n'), ((13984, 14012), 'protodata.utils.download_file', 'download_file', (['url', 'tmp_path'], {}), '(url, tmp_path)\n', (13997, 14012), False, 'from protodata.utils import save_pickle, create_dir, download_file\n'), ((14180, 14192), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (14188, 14192), False, 'import os\n'), ((14201, 14220), 'os.remove', 'os.remove', (['tmp_path'], {}), '(tmp_path)\n', (14210, 14220), False, 'import os\n'), ((6671, 6707), 'os.path.join', 'os.path.join', (['output', '"""metadata.dat"""'], {}), "(output, 'metadata.dat')\n", (6683, 6707), False, 'import os\n'), ((10561, 10609), 'protodata.data_ops.get_filename', 'do.get_filename', (['name_tag', '(shard + 1)', 'num_shards'], {}), '(name_tag, shard + 1, num_shards)\n', (10576, 10609), True, 'import protodata.data_ops as do\n'), ((10636, 10680), 'os.path.join', 'os.path.join', (['output_folder', 'output_filename'], {}), '(output_folder, output_filename)\n', (10648, 10680), False, 'import os\n'), ((10736, 10776), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['output_file'], {}), '(output_file)\n', (10763, 10776), True, 'import tensorflow as tf\n'), ((11044, 11102), 'numpy.arange', 'np.arange', (['shard_ranges[s]', 'shard_ranges[s + 1]'], {'dtype': 'int'}), '(shard_ranges[s], shard_ranges[s + 1], dtype=int)\n', (11053, 11102), True, 'import numpy as np\n'), ((8145, 8200), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._process_batch', 'args': 'args'}), '(target=self._process_batch, args=args)\n', (8161, 8200), False, 'import threading\n'), ((10050, 10143), 'numpy.linspace', 'np.linspace', (['ranges[thread_index][0]', 'ranges[thread_index][1]', '(num_shards_per_thread + 1)'], {}), '(ranges[thread_index][0], ranges[thread_index][1], \n num_shards_per_thread + 1)\n', (10061, 10143), True, 'import numpy as np\n'), ((12047, 12061), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12059, 12061), False, 'from datetime import datetime\n'), ((10869, 10883), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10881, 10883), False, 'from datetime import datetime\n'), ((11852, 11866), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11864, 11866), False, 'from datetime import datetime\n')] |
import numpy as np
import pandas as pd
from src.model.models import nw_inference
from src.model.predict import predict_nw
def main():
"""
make prediction
:return:
"""
#----------------------------
# load image data and label
#----------------------------
img_array_test = np.load("../data/processed/image_array_test.npy")
name_array_test = np.load("../data/processed/name_array_test.npy")
#-----------------
# get best model
#------------------
f1 = np.load("../model/nw_f1.npy")
status = np.load("../model/nw_status.npy")
step = status[np.argmax(f1)][1]
model = nw_inference()
model.load_weights("../model/nw_weight_{}.h5".format(step))
#----------------
# predict
#----------------
res_prob = predict_nw(model, img_array_test)
res_prob = res_prob.reshape(-1, )
np.save("../submission/nw_prob.npy", res_prob)
np.save("../submission/nw_name.npy", name_array_test)
if __name__ == "__main__":
main() | [
"src.model.predict.predict_nw",
"numpy.argmax",
"src.model.models.nw_inference",
"numpy.load",
"numpy.save"
] | [((308, 357), 'numpy.load', 'np.load', (['"""../data/processed/image_array_test.npy"""'], {}), "('../data/processed/image_array_test.npy')\n", (315, 357), True, 'import numpy as np\n'), ((380, 428), 'numpy.load', 'np.load', (['"""../data/processed/name_array_test.npy"""'], {}), "('../data/processed/name_array_test.npy')\n", (387, 428), True, 'import numpy as np\n'), ((507, 536), 'numpy.load', 'np.load', (['"""../model/nw_f1.npy"""'], {}), "('../model/nw_f1.npy')\n", (514, 536), True, 'import numpy as np\n'), ((550, 583), 'numpy.load', 'np.load', (['"""../model/nw_status.npy"""'], {}), "('../model/nw_status.npy')\n", (557, 583), True, 'import numpy as np\n'), ((632, 646), 'src.model.models.nw_inference', 'nw_inference', ([], {}), '()\n', (644, 646), False, 'from src.model.models import nw_inference\n'), ((785, 818), 'src.model.predict.predict_nw', 'predict_nw', (['model', 'img_array_test'], {}), '(model, img_array_test)\n', (795, 818), False, 'from src.model.predict import predict_nw\n'), ((862, 908), 'numpy.save', 'np.save', (['"""../submission/nw_prob.npy"""', 'res_prob'], {}), "('../submission/nw_prob.npy', res_prob)\n", (869, 908), True, 'import numpy as np\n'), ((913, 966), 'numpy.save', 'np.save', (['"""../submission/nw_name.npy"""', 'name_array_test'], {}), "('../submission/nw_name.npy', name_array_test)\n", (920, 966), True, 'import numpy as np\n'), ((602, 615), 'numpy.argmax', 'np.argmax', (['f1'], {}), '(f1)\n', (611, 615), True, 'import numpy as np\n')] |
import pickle
import numpy as np
import pandas as pd
# Example input: (0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 20, 1000, 5, 0, 1, 0, 1, 500, 100, 300,
# 3, 100, 0, 300, 2000, 4, 1, 1, 500, 0, 0, 0)
def predict_p(user_input, model, scaler):
"""Returns probability of class 0 and class 1"""
# with open('../lr_model.sav', 'rb') as f:
# model = pickle.load(f)
# with open('../scaler.pkl', 'rb') as f:
# scaler = pickle.load(f)
user_input = np.array(user_input)
df = pd.DataFrame(user_input).T
df.iloc[:, 0:30] = df.iloc[:, 0:30].astype(float)
scaled = scaler.transform(df.iloc[:, 0:30])
scaled_df = pd.DataFrame(scaled)
scaled_df = pd.concat([scaled_df, df.iloc[:, 30:]], axis=1, join="inner")
proba = model.predict_proba(scaled_df)
return proba
# tested with this input:
# predict_p((0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 20, 1000, 5, 0, 1, 0, 1, 500, 100, 300,
# 3, 100, 0, 300, 2000, 4, 1, 1, 500, 0, 0, 0), model, scaler)
| [
"pandas.DataFrame",
"numpy.array",
"pandas.concat"
] | [((461, 481), 'numpy.array', 'np.array', (['user_input'], {}), '(user_input)\n', (469, 481), True, 'import numpy as np\n'), ((637, 657), 'pandas.DataFrame', 'pd.DataFrame', (['scaled'], {}), '(scaled)\n', (649, 657), True, 'import pandas as pd\n'), ((675, 736), 'pandas.concat', 'pd.concat', (['[scaled_df, df.iloc[:, 30:]]'], {'axis': '(1)', 'join': '"""inner"""'}), "([scaled_df, df.iloc[:, 30:]], axis=1, join='inner')\n", (684, 736), True, 'import pandas as pd\n'), ((491, 515), 'pandas.DataFrame', 'pd.DataFrame', (['user_input'], {}), '(user_input)\n', (503, 515), True, 'import pandas as pd\n')] |
import gpflow
import meshzoo
import numpy as np
import pytest
import tensorflow as tf
from geometric_kernels.backends.tensorflow import GPflowGeometricKernel
from geometric_kernels.kernels import MaternKarhunenLoeveKernel
from geometric_kernels.spaces import Mesh
class DefaultFloatZero(gpflow.mean_functions.Constant):
"""
Simple zero mean function that uses gpflow's default_float
as dtype instead of the default input's dtype. In our case this
leads to dtype mismatch because the inputs are integer indices.
"""
def __init__(self, output_dim=1):
super().__init__()
self.output_dim = output_dim
del self.c
def __call__(self, inputs):
output_shape = tf.concat([tf.shape(inputs)[:-1], [self.output_dim]], axis=0)
return tf.zeros(output_shape, dtype=gpflow.default_float())
# filename = Path(__file__).parent / "../teddy.obj"
# mesh = Mesh.load_mesh(str(filename))
# return mesh
# TODO(VD) This needs fixing!
@pytest.mark.skip()
def test_gpflow_integration():
"""
Build GPflow GPR model with a Mesh Geometric Kernel.
"""
resolution = 5
vertices, faces = meshzoo.icosa_sphere(resolution)
mesh = Mesh(vertices, faces)
nu = 1 / 2.0
truncation_level = 20
base_kernel = MaternKarhunenLoeveKernel(mesh, nu, truncation_level)
kernel = GPflowGeometricKernel(base_kernel)
num_data = 25
def get_data():
# np.random.seed(1)
_X = np.random.randint(mesh.num_vertices, size=(num_data, 1))
_K = kernel.K(_X).numpy()
_y = np.linalg.cholesky(_K + np.eye(num_data) * 1e-6) @ np.random.randn(
num_data, 1
)
return _X, _y
X, y = get_data()
model = gpflow.models.GPR(
(X, y), kernel, mean_function=DefaultFloatZero(), noise_variance=1.1e-6
)
print(model.log_marginal_likelihood())
X_test = np.arange(mesh.num_vertices).reshape(-1, 1)
# print(X_test)
m, v = model.predict_f(X_test)
m, v = m.numpy(), v.numpy()
model.predict_f_samples(X_test).numpy()
# print(sample.shape)
# ps.init()
# ps_cloud = ps.register_point_cloud("my points", vertices[X.flatten()])
# ps_cloud.add_scalar_quantity("data", y.flatten())
# my_mesh = ps.register_surface_mesh("my mesh", vertices, faces, smooth_shade=True)
# my_mesh.add_scalar_quantity(f"sample", sample.squeeze(), enabled=True)
# my_mesh.add_scalar_quantity(f"mean", m.squeeze(), enabled=True)
# my_mesh.add_scalar_quantity(f"variance", v.squeeze(), enabled=True)
# ps.show()
| [
"numpy.eye",
"geometric_kernels.kernels.MaternKarhunenLoeveKernel",
"tensorflow.shape",
"geometric_kernels.spaces.Mesh",
"meshzoo.icosa_sphere",
"gpflow.default_float",
"pytest.mark.skip",
"geometric_kernels.backends.tensorflow.GPflowGeometricKernel",
"numpy.random.randint",
"numpy.random.randn",
... | [((986, 1004), 'pytest.mark.skip', 'pytest.mark.skip', ([], {}), '()\n', (1002, 1004), False, 'import pytest\n'), ((1150, 1182), 'meshzoo.icosa_sphere', 'meshzoo.icosa_sphere', (['resolution'], {}), '(resolution)\n', (1170, 1182), False, 'import meshzoo\n'), ((1194, 1215), 'geometric_kernels.spaces.Mesh', 'Mesh', (['vertices', 'faces'], {}), '(vertices, faces)\n', (1198, 1215), False, 'from geometric_kernels.spaces import Mesh\n'), ((1278, 1331), 'geometric_kernels.kernels.MaternKarhunenLoeveKernel', 'MaternKarhunenLoeveKernel', (['mesh', 'nu', 'truncation_level'], {}), '(mesh, nu, truncation_level)\n', (1303, 1331), False, 'from geometric_kernels.kernels import MaternKarhunenLoeveKernel\n'), ((1345, 1379), 'geometric_kernels.backends.tensorflow.GPflowGeometricKernel', 'GPflowGeometricKernel', (['base_kernel'], {}), '(base_kernel)\n', (1366, 1379), False, 'from geometric_kernels.backends.tensorflow import GPflowGeometricKernel\n'), ((1460, 1516), 'numpy.random.randint', 'np.random.randint', (['mesh.num_vertices'], {'size': '(num_data, 1)'}), '(mesh.num_vertices, size=(num_data, 1))\n', (1477, 1516), True, 'import numpy as np\n'), ((1615, 1643), 'numpy.random.randn', 'np.random.randn', (['num_data', '(1)'], {}), '(num_data, 1)\n', (1630, 1643), True, 'import numpy as np\n'), ((1886, 1914), 'numpy.arange', 'np.arange', (['mesh.num_vertices'], {}), '(mesh.num_vertices)\n', (1895, 1914), True, 'import numpy as np\n'), ((822, 844), 'gpflow.default_float', 'gpflow.default_float', ([], {}), '()\n', (842, 844), False, 'import gpflow\n'), ((727, 743), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (735, 743), True, 'import tensorflow as tf\n'), ((1588, 1604), 'numpy.eye', 'np.eye', (['num_data'], {}), '(num_data)\n', (1594, 1604), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import scipy as sp
from scipy import sparse
from scipy.sparse import linalg
def cg_cg(A,b,x0,max_iter,callbacks=[],**kwargs):
'''
Chronopoulos and Gear Conjugate Gradient
(implementation from Greenbaum, Liu, Chen 2019)
'''
# get size of problem
n = len(b)
# initialize
output = {}
output['name'] = 'cg_cg'
output['max_iter'] = max_iter
x_k = np.copy(x0)
r_k = np.copy(b - A @ x_k)
w_k = A @ r_k
p_k = np.copy(r_k)
nu_k = r_k @ r_k
eta_k = w_k @ r_k
s_k = A @ p_k
u_k = np.copy(s_k)
mu_k = p_k @ s_k
a_k = nu_k / mu_k
a_k1 = 0
a_k2 = 0
b_k = 0
b_k1 = 0
k=0
for callback in callbacks:
callback(**locals())
# run main optimization
for k in range(1,max_iter):
# update indexing
a_k2 = a_k1
a_k1 = a_k
b_k1 = b_k
nu_k1 = nu_k
eta_k1 = eta_k
mu_k1 = mu_k
x_k1 = np.copy(x_k)
r_k1 = np.copy(r_k)
w_k1 = np.copy(w_k)
p_k1 = np.copy(p_k)
s_k1 = np.copy(s_k)
u_k1 = np.copy(u_k)
# main loop
x_k = x_k1 + a_k1 * p_k1
r_k = r_k1 - a_k1 * s_k1
w_k = A @ r_k
nu_k = r_k @ r_k
eta_k = w_k @ r_k
b_k = nu_k / nu_k1
p_k = r_k + b_k * p_k1
s_k = w_k + b_k * s_k1
mu_k = eta_k - (b_k / a_k1) * nu_k
a_k = nu_k / mu_k
# call callback functions
for callback in callbacks:
callback(**locals())
return output
def cg_pcg(A,b,x0,max_iter,preconditioner=lambda x:x,callbacks=[],**kwargs):
'''
Chronopoulos and Gear Preconditioned Conjugate Gradient
(implementation from Greenbaum, Liu, Chen 2019)
'''
# get size of problem
n = len(b)
# initialize
output = {}
output['name'] = 'cg_pcg'
output['max_iter'] = max_iter
x_k = np.copy(x0)
r_k = np.copy(b - A @ x_k)
rt_k = preconditioner(r_k)
w_k = A @ rt_k
p_k = np.copy(rt_k)
nu_k = r_k @ rt_k
eta_k = w_k @ rt_k
s_k = A @ p_k
u_k = np.copy(s_k)
mu_k = p_k @ s_k
a_k = nu_k / mu_k
a_k1 = 0
a_k2 = 0
b_k = 0
b_k1 = 0
k=0
for callback in callbacks:
callback(**locals())
# run main optimization
for k in range(1,max_iter):
# update indexing
a_k2 = a_k1
a_k1 = a_k
b_k1 = b_k
nu_k1 = nu_k
eta_k1 = eta_k
mu_k1 = mu_k
x_k1 = np.copy(x_k)
r_k1 = np.copy(r_k)
rt_k1 = np.copy(rt_k)
w_k1 = np.copy(w_k)
p_k1 = np.copy(p_k)
s_k1 = np.copy(s_k)
u_k1 = np.copy(u_k)
# main loop
x_k = x_k1 + a_k1 * p_k1
r_k = r_k1 - a_k1 * s_k1
rt_k = preconditioner(r_k)
w_k = A @ rt_k
nu_k = r_k @ rt_k
eta_k = w_k @ rt_k
b_k = nu_k / nu_k1
p_k = rt_k + b_k * p_k1
s_k = w_k + b_k * s_k1
mu_k = eta_k - (b_k / a_k1) * nu_k
a_k = nu_k / mu_k
# call callback functions
for callback in callbacks:
callback(**locals())
return output | [
"numpy.copy"
] | [((465, 476), 'numpy.copy', 'np.copy', (['x0'], {}), '(x0)\n', (472, 476), True, 'import numpy as np\n'), ((491, 511), 'numpy.copy', 'np.copy', (['(b - A @ x_k)'], {}), '(b - A @ x_k)\n', (498, 511), True, 'import numpy as np\n'), ((550, 562), 'numpy.copy', 'np.copy', (['r_k'], {}), '(r_k)\n', (557, 562), True, 'import numpy as np\n'), ((649, 661), 'numpy.copy', 'np.copy', (['s_k'], {}), '(s_k)\n', (656, 661), True, 'import numpy as np\n'), ((2189, 2200), 'numpy.copy', 'np.copy', (['x0'], {}), '(x0)\n', (2196, 2200), True, 'import numpy as np\n'), ((2215, 2235), 'numpy.copy', 'np.copy', (['(b - A @ x_k)'], {}), '(b - A @ x_k)\n', (2222, 2235), True, 'import numpy as np\n'), ((2309, 2322), 'numpy.copy', 'np.copy', (['rt_k'], {}), '(rt_k)\n', (2316, 2322), True, 'import numpy as np\n'), ((2411, 2423), 'numpy.copy', 'np.copy', (['s_k'], {}), '(s_k)\n', (2418, 2423), True, 'import numpy as np\n'), ((1115, 1127), 'numpy.copy', 'np.copy', (['x_k'], {}), '(x_k)\n', (1122, 1127), True, 'import numpy as np\n'), ((1154, 1166), 'numpy.copy', 'np.copy', (['r_k'], {}), '(r_k)\n', (1161, 1166), True, 'import numpy as np\n'), ((1185, 1197), 'numpy.copy', 'np.copy', (['w_k'], {}), '(w_k)\n', (1192, 1197), True, 'import numpy as np\n'), ((1216, 1228), 'numpy.copy', 'np.copy', (['p_k'], {}), '(p_k)\n', (1223, 1228), True, 'import numpy as np\n'), ((1247, 1259), 'numpy.copy', 'np.copy', (['s_k'], {}), '(s_k)\n', (1254, 1259), True, 'import numpy as np\n'), ((1278, 1290), 'numpy.copy', 'np.copy', (['u_k'], {}), '(u_k)\n', (1285, 1290), True, 'import numpy as np\n'), ((2877, 2889), 'numpy.copy', 'np.copy', (['x_k'], {}), '(x_k)\n', (2884, 2889), True, 'import numpy as np\n'), ((2908, 2920), 'numpy.copy', 'np.copy', (['r_k'], {}), '(r_k)\n', (2915, 2920), True, 'import numpy as np\n'), ((2939, 2952), 'numpy.copy', 'np.copy', (['rt_k'], {}), '(rt_k)\n', (2946, 2952), True, 'import numpy as np\n'), ((2971, 2983), 'numpy.copy', 'np.copy', (['w_k'], {}), '(w_k)\n', (2978, 2983), True, 'import numpy as np\n'), ((3002, 3014), 'numpy.copy', 'np.copy', (['p_k'], {}), '(p_k)\n', (3009, 3014), True, 'import numpy as np\n'), ((3033, 3045), 'numpy.copy', 'np.copy', (['s_k'], {}), '(s_k)\n', (3040, 3045), True, 'import numpy as np\n'), ((3064, 3076), 'numpy.copy', 'np.copy', (['u_k'], {}), '(u_k)\n', (3071, 3076), True, 'import numpy as np\n')] |
import numpy as np
import numpy.random
import scipy.stats
def clip_theta_mu(theta):
return np.maximum(np.minimum(theta, np.zeros(len(theta)) + 1e8), np.zeros(len(theta)) - 1e8)
def clip_theta_disp(theta):
return np.maximum(np.minimum(theta, np.zeros(len(theta)) + 1e8), np.zeros(len(theta)) - 1e8)
def nb_glm_linker_mu(theta, X, lib_size):
return np.exp(np.asarray(np.dot(X, np.asarray(clip_theta_mu(theta)).T)).flatten() + lib_size)
def nb_glm_linker_disp(theta, X):
return np.asarray(np.exp(np.dot(X, np.asarray(clip_theta_disp(theta)).T))).flatten()
def ll_nb(x, mu, disp):
# Re-parameterize.
variance = np.maximum(mu + np.square(mu) / disp, np.zeros(len(mu)) + 1e-8)
p = 1 - (mu / variance)
return scipy.stats.nbinom(n=disp, p=1 - p).logpmf(x)
def objective_ll(x, theta_mu, theta_disp, design_loc, design_scale, lib_size):
return -ll_nb(x=x, mu=nb_glm_linker_mu(theta_mu, design_loc, lib_size),
disp=nb_glm_linker_disp(theta_disp, design_scale))
def objective(theta, x, design_loc, design_scale, lib_size, batch_size=100):
if batch_size is None:
J = np.sum(objective_ll(x=x,
theta_mu=np.asarray(theta)[:design_loc.shape[1]],
theta_disp=np.asarray(theta)[design_loc.shape[1]:],
design_loc=design_loc,
design_scale=design_scale,
lib_size=lib_size))
else:
batch_idx = numpy.random.randint(low=0, high=x.shape[0], size=(batch_size))
J = np.sum(objective_ll(x=x[batch_idx],
theta_mu=np.asarray(theta)[:design_loc.shape[1]],
theta_disp=np.asarray(theta)[design_loc.shape[1]:],
design_loc=design_loc[batch_idx, :],
design_scale=design_scale[batch_idx, :],
lib_size=lib_size[batch_idx]))
return J
| [
"numpy.asarray",
"numpy.square"
] | [((657, 670), 'numpy.square', 'np.square', (['mu'], {}), '(mu)\n', (666, 670), True, 'import numpy as np\n'), ((1200, 1217), 'numpy.asarray', 'np.asarray', (['theta'], {}), '(theta)\n', (1210, 1217), True, 'import numpy as np\n'), ((1284, 1301), 'numpy.asarray', 'np.asarray', (['theta'], {}), '(theta)\n', (1294, 1301), True, 'import numpy as np\n'), ((1674, 1691), 'numpy.asarray', 'np.asarray', (['theta'], {}), '(theta)\n', (1684, 1691), True, 'import numpy as np\n'), ((1758, 1775), 'numpy.asarray', 'np.asarray', (['theta'], {}), '(theta)\n', (1768, 1775), True, 'import numpy as np\n')] |
'''
Designed to plot data.
'''
import numpy
import scipy.signal
import scipy.interpolate
import style
import matplotlib.collections
import matplotlib.animation
def _subplot_row(out_filenames,
x_variable,
y_variable_list,
xlabel,
ylabel,
text_list,
xmin,
xmax,
ymin,
ymax,
color=style.DARK_COLOR,
grid=True,
figsize=(style.ONE_AND_HALF_COLUMN_WIDTH,
style.ONE_AND_HALF_COLUMN_SHORT_HEIGHT)):
'''
General function for subplots of multi rows.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
x_variable : rank-1 array
Varialbe for x axis.
y_variable_list : rank-2 array or a list of rank-1 array
A list of varialbes for y axis. One variable for one row.
xlabel : string
The label shown on the x axis.
ylabel : string
The label shown on the y axis for the lowest figure.
text_list : a list of string
Texts to label each row or each y variable.
xmin : float
Lower limit for x axis.
xmax : float
Upper limit for x axis.
ymin : float
Lower limit for y axis.
ymax : float
Upper limit for y axis.
color : string (must be acceptable for matplotlib )
Line color used in the figure.
grid : bool
True if grid is necessary.
figsize : float tuple (float1, float2)
Figure size (width, height).
'''
assert len(y_variable_list) == len(text_list), print(
len(y_variable_list), '!=', len(text_list))
figure, axis_tuple = matplotlib.pyplot.subplots(
len(y_variable_list), 1, figsize=figsize, sharex=True)
for i, (axis, y_variable,
text) in enumerate(zip(axis_tuple, y_variable_list, text_list)):
axis.plot(x_variable, y_variable, color=color)
axis.set_xlim(xmin, xmax)
axis.set_ylim(ymin, ymax)
# axis.yaxis.set_major_formatter(nice_math_text_form)
axis.locator_params(axis='x', nbins=style.LONG_XTICK_MAX_LENGTH)
axis.locator_params(axis='y', nbins=style.SHORT_YTICK_MAX_LENGTH)
if i < len(text_list) - 1:
matplotlib.pyplot.setp(axis.get_yticklabels(), visible=False)
axis.yaxis.set_major_formatter(style.NO_POWER_FORM)
else:
figure.savefig('')
x_sci_notaion = axis.xaxis.get_offset_text()
x_sci_notaion.set_visible(False)
if x_sci_notaion.get_text():
xlabel = r"{:s} / {:s}".format(xlabel[:-1],
x_sci_notaion.get_text()[1:])
y_sci_notaion = axis.yaxis.get_offset_text()
y_sci_notaion.set_visible(False)
if y_sci_notaion.get_text():
ylabel = r"{:s} / {:s}".format(ylabel[:-1],
y_sci_notaion.get_text()[1:])
axis.set_xlabel(xlabel)
axis.set_ylabel(ylabel)
axis.text(
style.LEFT_CORNER,
style.TOP_CORNER,
text,
horizontalalignment='left',
verticalalignment='top',
transform=axis.transAxes)
axis.grid(grid)
# matplotlib.pyplot.tight_layout()
for out_filename in out_filenames:
figure.savefig(out_filename)
matplotlib.pyplot.close()
def _contour(
out_filenames,
variable,
x,
y,
xlabel,
ylabel,
colorbar_min,
colorbar_max,
contour_num,
color=style.DARK_COLOR,
figsize=(style.SINGLE_COLUMN_WIDTH, style.SINGLE_COLUMN_SHORT_HEIGHT)):
'''
General function for contours.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
variable : rank-2 array
Concerned varialbe for contours.
x : rank-1 array or a list of rank-1 array
varialbe for x axis.
y : rank-1 array or a list of rank-1 array
varialbe for y axis.
xlabel : string
The label shown on the x axis.
ylabel : string
The label shown on the y axis.
colorbar_min : float
Lower limit for the contour.
colorbar_max : float
Upper limit for the contour.
contour_num : int
Line number for the contour.
color : string (must be acceptable for matplotlib )
Line color used in the figure.
figsize : float tuple (float1, float2)
Figure size (width, height).
'''
figure, axis = matplotlib.pyplot.subplots(figsize=figsize)
axis.set_xlabel(xlabel)
axis.set_ylabel(ylabel)
axis.locator_params(
axis='x', nbins=style.LONG_XTICK_MAX_LENGTH)
axis.locator_params(
axis='y', nbins=style.LONG_YTICK_MAX_LENGTH)
X, Y = numpy.meshgrid(x, y, indexing='ij')
contour_range = numpy.linspace(colorbar_min, colorbar_max, contour_num)
# variable[variable > colorbar_max] = colorbar_max
# variable[variable < colorbar_min] = colorbar_min
axis.contour(X, Y, variable, contour_range, colors=color, extend='both')
# matplotlib.pyplot.tight_layout()
for out_filename in out_filenames:
figure.savefig(out_filename)
matplotlib.pyplot.close('all')
def _contourf(
out_filenames,
variable,
x,
y,
xlabel,
ylabel,
colorbar_min,
colorbar_max,
contourf_num,
colorbar=False,
colorbar_zlabel='',
cmap=style.CMAP_SINGLE,
figsize=(style.SINGLE_COLUMN_WIDTH, style.SINGLE_COLUMN_SHORT_HEIGHT)):
'''
General function for contourfs.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
variable : rank-2 array
Concerned varialbe for contours.
x : rank-1 array or a list of rank-1 array
varialbe for x axis.
y : rank-1 array or a list of rank-1 array
varialbe for y axis.
xlabel : string
The label shown on the x axis.
ylabel : string
The label shown on the y axis.
colorbar_min : float
Lower limit for the contour.
colorbar_max : float
Upper limit for the contour.
contourf_num : int
Line number for the contourf.
colorbar : bool
True if colorbar is necessary.
colorbar_zlabel : string
Label shown on the colorbar.
cmap : matplotlib cmap
Color maps used for the contourfs.
figsize : float tuple (float1, float2)
Figure size (width, height).
'''
figure, axis = matplotlib.pyplot.subplots(figsize=figsize)
axis.set_xlabel(xlabel)
axis.set_ylabel(ylabel)
axis.locator_params(
axis='x', nbins=style.LONG_XTICK_MAX_LENGTH)
axis.locator_params(
axis='y', nbins=style.LONG_YTICK_MAX_LENGTH)
X, Y = numpy.meshgrid(x, y, indexing='ij')
contourf_range = numpy.linspace(colorbar_min, colorbar_max, contourf_num)
# variable[variable > colorbar_max] = colorbar_max
# variable[variable < colorbar_min] = colorbar_min
contourf_ = axis.contourf(X, Y, variable, contourf_range, cmap=cmap, extend='both')
if colorbar:
colorbar_ax = figure.colorbar(contourf_).ax
if colorbar_zlabel:
figure.savefig('')
colorbar_sci_notaion = colorbar_ax.yaxis.get_offset_text()
colorbar_sci_notaion.set_visible(False)
if colorbar_sci_notaion.get_text():
colorbar_zlabel = "{:s} / {:s}".format(
colorbar_zlabel[:-1], colorbar_sci_notaion.get_text()[1:])
colorbar_ax.set_title(colorbar_zlabel)
# matplotlib.pyplot.tight_layout()
for out_filename in out_filenames:
figure.savefig(out_filename)
matplotlib.pyplot.close()
def _contourf_contour(
out_filenames,
variable,
x,
y,
xlabel,
ylabel,
colorbar_min,
colorbar_max,
contourf_num,
contour_num,
colorbar_zlabel='',
cmap=style.CMAP_DOUBLE,
color=style.LIGHT_COLOR,
figsize=(style.SINGLE_COLUMN_WIDTH, style.SINGLE_COLUMN_SHORT_HEIGHT)):
'''
General function for contourfs.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
variable : rank-2 array
Concerned varialbe for contours.
x : rank-1 array or a list of rank-1 array
varialbe for x axis.
y : rank-1 array or a list of rank-1 array
varialbe for y axis.
xlabel : string
The label shown on the x axis.
ylabel : string
The label shown on the y axis.
colorbar_min : float
Lower limit for the contour.
colorbar_max : float
Upper limit for the contour.
contourf_num : int
Line number for the contourf.
contour_num : int
Line number for the contour.
colorbar_zlabel : string
Label shown on the colorbar.
cmap : matplotlib cmap
Color maps used for the contourfs.
color : string (must be acceptable for matplotlib )
Line color used in the figure.
figsize : float tuple (float1, float2)
Figure size (width, height).
'''
figure, axis = matplotlib.pyplot.subplots(figsize=figsize)
axis.set_xlabel(xlabel)
axis.set_ylabel(ylabel)
axis.locator_params(
axis='x', nbins=style.LONG_XTICK_MAX_LENGTH)
axis.locator_params(
axis='y', nbins=style.LONG_YTICK_MAX_LENGTH)
X, Y = numpy.meshgrid(x, y, indexing='ij')
contourf_range = numpy.linspace(colorbar_min, colorbar_max, contourf_num)
contour_range = numpy.linspace(colorbar_min, colorbar_max, contour_num)
# variable[variable > colorbar_max] = colorbar_max
# variable[variable < colorbar_min] = colorbar_min
axis.contour(X, Y, variable, contour_range, colors=color, extend='both')
contourf_ = axis.contourf(X, Y, variable, contourf_range, cmap=cmap, extend='both')
contour_range = numpy.round(
contour_range[::2],
decimals=-int(
numpy.floor(numpy.log10(numpy.diff(contour_range[::2])[0]))))
colorbar_ax = figure.colorbar(contourf_, ticks=contour_range).ax
if colorbar_zlabel:
figure.savefig('')
colorbar_sci_notaion = colorbar_ax.yaxis.get_offset_text()
colorbar_sci_notaion.set_visible(False)
if colorbar_sci_notaion.get_text():
colorbar_zlabel = "{:s} / {:s}".format(
colorbar_zlabel[:-1], colorbar_sci_notaion.get_text()[1:])
colorbar_ax.set_title(colorbar_zlabel)
# colorbar_ax.yaxis.set_major_formatter(style.NO_POWER_FORM)
matplotlib.pyplot.tight_layout()
for out_filename in out_filenames:
figure.savefig(out_filename)
matplotlib.pyplot.close()
class ForceVisualization:
'''
Visualize all analytical results of force data obtained from ForceAnalysis.
Parameters
----------
force_analysis : ForceAnalysis instance
All analytical results of force data prepared for visualization.
'''
def __init__(self, force_analysis):
self._force_analysis = force_analysis
@property
def force_analysis(self):
'''
ForceAnalysis instance containing all analytical results of force data.
'''
return self._force_analysis
def _plot_along_time_function(self,
out_filenames,
time_function,
variable,
xlabel,
start_time,
end_time,
ylabel,
ymin,
ymax,
color=style.DARK_COLOR,
grid=True,
figsize=(style.SINGLE_COLUMN_WIDTH,
style.SINGLE_COLUMN_SHORT_HEIGHT)):
'''
General function for plots y along x.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
time_function : a ufunc of time
Function results for x axis.
variable : rank-1 array
Varialbe for y axis.
xlabel : string
The label shown on the x axis for the lowest figure.
start_time : float
Lower limit for time function.
end_time : float
Upper limit for time function.
ylabel : string
The label shown on the y axis for the lowest figure.
ymin : float
Lower limit for y axis.
ymax : float
Upper limit for y axis.
color : string (must be acceptable for matplotlib )
Line color used in the figure.
grid : bool
True if grid is necessary.
figsize : float tuple (float1, float2)
Figure size (width, height).
'''
figure, axis = matplotlib.pyplot.subplots(figsize=figsize)
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
selected_time = self.force_analysis.time[time_index]
selected_variable = variable[time_index]
matplotlib.pyplot.plot(
selected_time,
selected_variable,
color=color)
# matplotlib.pyplot.xlim(xmin, xmax)
axis.set_ylim(ymin, ymax)
matplotlib.pyplot.grid(grid)
axis.locator_params(axis='x', nbins=style.LONG_XTICK_MAX_LENGTH)
axis.locator_params(axis='y', nbins=style.SHORT_YTICK_MAX_LENGTH)
figure.savefig('')
x_sci_notaion = axis.xaxis.get_offset_text()
x_sci_notaion.set_visible(False)
if x_sci_notaion.get_text():
xlabel = "{:s} / {:s}".format(xlabel[:-1],
x_sci_notaion.get_text()[1:])
y_sci_notaion = axis.yaxis.get_offset_text()
y_sci_notaion.set_visible(False)
if y_sci_notaion.get_text():
ylabel = r"{:s} / {:s}".format(ylabel[:-1],
y_sci_notaion.get_text()[1:])
axis.set_xlabel(xlabel)
axis.set_ylabel(ylabel)
# matplotlib.pyplot.tight_layout()
for out_filename in out_filenames:
figure.savefig(out_filename)
matplotlib.pyplot.close()
def _plot_along_time(self,
out_filenames,
variable,
start_time,
end_time,
ylabel,
ymin,
ymax,
xlabel=r'$t\mathrm{\ (s)}$',
color=style.DARK_COLOR,
reference_line_factor_tuple=(),
reference_line_share_maximum=False,
tol=1e-2,
input_time_referece_tuple=(),
grid=True,
figsize=(style.SINGLE_COLUMN_WIDTH,
style.SINGLE_COLUMN_SHORT_HEIGHT)):
'''
General function for plots along the time axis.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
variable : rank-1 array
Varialbe for y axis.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
ylabel : string
The label shown on the y axis for the lowest figure.
ymin : float
Lower limit for y axis.
ymax : float
Upper limit for y axis.
color : string (must be acceptable for matplotlib )
Line color used in the figure.
grid : bool
True if grid is necessary.
figsize : float tuple (float1, float2)
Figure size (width, height).
'''
figure, axis = matplotlib.pyplot.subplots(figsize=figsize)
xlabel = xlabel
# start_index = self.force_analysis.time_index(start_time)
# end_index = self.force_analysis.next_time_index(end_time)
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
selected_time = self.force_analysis.time[time_index]
selected_variable = variable[time_index]
axis.plot(
selected_time,
selected_variable,
# self.force_analysis.time[start_index: end_index],
# variable[start_index: end_index],
color=color)
assert len(reference_line_factor_tuple) in (0, 2)
if len(reference_line_factor_tuple) is 2:
reference_max = numpy.amax(selected_variable)
reference_min = numpy.amin(selected_variable)
if reference_line_share_maximum:
reference_max = max(reference_max, abs(reference_min))
reference_min = -reference_max
reference_factor_max, reference_factor_min = reference_line_factor_tuple
upper_reference = reference_factor_max * reference_max
lower_reference = reference_factor_min * reference_min
upper_intersection_times = []
lower_intersection_times = []
if upper_reference >= 0:
upper_intersection_times = selected_time[(
selected_variable > upper_reference * (1 - tol)) & (
selected_variable < upper_reference * (1 + tol))]
else:
upper_intersection_times = selected_time[(
selected_variable < upper_reference * (1 - tol)) & (
selected_variable > upper_reference * (1 + tol))]
if lower_reference >= 0:
lower_intersection_times = selected_time[(
selected_variable > lower_reference * (1 - tol)) & (
selected_variable < lower_reference * (1 + tol))]
else:
lower_intersection_times = selected_time[(
selected_variable < lower_reference * (1 - tol)) & (
selected_variable > lower_reference * (1 + tol))]
intersection_time_min = numpy.amin(numpy.r_[
upper_intersection_times, lower_intersection_times])
intersection_time_max = numpy.amax(numpy.r_[
upper_intersection_times, lower_intersection_times])
axis.axhline(
y=upper_reference, **style.REFERENCE_LINE_STYLE)
axis.axhline(
y=lower_reference, **style.REFERENCE_LINE_STYLE)
axis.axvline(
x=intersection_time_min, **style.REFERENCE_LINE_STYLE)
axis.axvline(
x=intersection_time_max, **style.REFERENCE_LINE_STYLE)
# none reference
elif input_time_referece_tuple:
assert len(input_time_referece_tuple) is 2
axis.axvline(
x=input_time_referece_tuple[0], **style.REFERENCE_LINE_STYLE)
axis.axvline(
x=input_time_referece_tuple[1], **style.REFERENCE_LINE_STYLE)
axis.set_xlim(start_time, end_time)
axis.set_ylim(ymin, ymax)
matplotlib.pyplot.grid(grid)
axis.locator_params(axis='x', nbins=style.LONG_XTICK_MAX_LENGTH)
axis.locator_params(axis='y', nbins=style.SHORT_YTICK_MAX_LENGTH)
figure.savefig('')
x_sci_notaion = axis.xaxis.get_offset_text()
x_sci_notaion.set_visible(False)
if x_sci_notaion.get_text():
xlabel = "{:s} / {:s}".format(xlabel[:-1],
x_sci_notaion.get_text()[1:])
y_sci_notaion = axis.yaxis.get_offset_text()
y_sci_notaion.set_visible(False)
if y_sci_notaion.get_text():
ylabel = r"{:s} / {:s}".format(ylabel[:-1],
y_sci_notaion.get_text()[1:])
axis.set_xlabel(xlabel)
axis.set_ylabel(ylabel)
# matplotlib.pyplot.tight_layout()
for out_filename in out_filenames:
figure.savefig(out_filename)
matplotlib.pyplot.close()
if reference_line_factor_tuple:
return (intersection_time_min, intersection_time_max)
def _subplot_along_time(self,
out_filenames,
variable_list,
start_time,
end_time,
ylabel,
text_list,
ymin,
ymax,
xlabel=r'$t\mathrm{\ (s)}$',
grid=False,
figsize=(style.SINGLE_COLUMN_WIDTH,
style.SINGLE_COLUMN_SHORT_HEIGHT)):
'''
General function for plots along the time axis.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
variable_list : a list of rank-1 array
One varialbe for each row.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
ylabel : string
The label shown on the y axis for the lowest figure.
ymin : float
Lower limit for y axis.
ymax : float
Upper limit for y axis.
grid : bool
True if grid is necessary.
figsize : float tuple (float1, float2)
Figure size (width, height).
'''
# start_index = self.force_analysis.time_index(start_time)
# end_index = self.force_analysis.next_time_index(end_time)
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
_subplot_row(
out_filenames=out_filenames,
x_variable=self.force_analysis.time[time_index],
y_variable_list=variable_list[:, time_index],
# x_variable=self.force_analysis.time[start_index: end_index],
# y_variable_list=variable_list[:, start_index: end_index],
xlabel=xlabel,
ylabel=ylabel,
text_list=text_list,
xmin=start_time,
xmax=end_time,
ymin=ymin,
ymax=ymax,
grid=grid,
figsize=figsize)
def _plot_along_span(self,
out_filenames,
variable,
xlabel,
xmin,
xmax,
color=style.DARK_COLOR,
clf=True,
save=True,
grid=False,
figsize=(style.SINGLE_COLUMN_WIDTH / 2,
style.SINGLE_COLUMN_LONG_HEIGHT / 2)):
'''
General function for plots along the span axis.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
variable : rank-1 array
Varialbe for x axis.
xlabel : string
The label shown on the x axis.
xmin : float
Lower limit for x axis.
xmax : float
Upper limit for x axis.
color : string (must be acceptable for matplotlib )
Line color used in the figure.
clf : bool
True for starting a new figure.
save : bool
True for ending a figure.
grid : bool
True if grid is necessary.
figsize : float tuple (float1, float2)
Figure size (width, height).
'''
if clf:
matplotlib.pyplot.clf()
matplotlib.pyplot.gcf().set_size_inches(figsize)
axis = matplotlib.pyplot.gca()
figure = matplotlib.pyplot.gcf()
axis.locator_params(axis='x', nbins=style.SHORT_XTICK_MAX_LENGTH)
axis.locator_params(axis='y', nbins=style.LONG_YTICK_MAX_LENGTH)
matplotlib.pyplot.grid(grid)
matplotlib.pyplot.plot(
variable, self.force_analysis.span, color=color)
matplotlib.pyplot.xlim(xmin, xmax)
figure.savefig('')
x_sci_notaion = axis.xaxis.get_offset_text()
x_sci_notaion.set_visible(False)
if x_sci_notaion.get_text():
xlabel = "{:s} / {:s}".format(xlabel[:-1],
x_sci_notaion.get_text()[1:])
y_sci_notaion = axis.yaxis.get_offset_text()
y_sci_notaion.set_visible(False)
if y_sci_notaion.get_text():
ylabel = r"{:s} / {:s}".format(ylabel[:-1],
y_sci_notaion.get_text()[1:])
matplotlib.pyplot.xlabel(xlabel)
matplotlib.pyplot.ylabel(r'$z\cdot L^{-1}$')
# matplotlib.pyplot.tight_layout()
if save:
for out_filename in out_filenames:
figure.savefig(out_filename)
matplotlib.pyplot.close()
def _subplot_along_span(self,
out_filenames,
variable_list,
xlabel_list,
xmin_list,
xmax_list,
color=style.DARK_COLOR,
grid=False,
figsize=(style.ONE_AND_HALF_COLUMN_WIDTH,
style.ONE_AND_HALF_COLUMN_SHORT_HEIGHT)):
'''
General function for subplots along the span axis.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
variable_list : a list of rank-1 array
One varialbe for x axis of each column.
xlabel_list : string
The labels shown on the x axes.
xmin_list : a list of floats
Lower limit for x axes.
xmax_list : a list of floats
Upper limit for x axes.
color : string (must be acceptable for matplotlib )
Line color used in the figure.
grid : bool
True if grid is necessary.
figsize : float tuple (float1, float2)
Figure size (width, height).
'''
assert len(variable_list) == len(xlabel_list) == len(xmin_list) == len(
xmax_list)
figure, axis_tuple = matplotlib.pyplot.subplots(
1, len(variable_list), figsize=figsize, sharey=True)
axis_tuple[0].set_ylabel(r'$z\cdot L^{-1}$')
for axis, variable, xlabel, xmin, xmax in\
zip(axis_tuple, variable_list, xlabel_list, xmin_list, xmax_list):
axis.plot(variable, self.force_analysis.span, color=color)
axis.set_xlim(xmin, xmax)
axis.locator_params(axis='x', nbins=style.SHORT_XTICK_MAX_LENGTH)
axis.locator_params(axis='y', nbins=style.LONG_YTICK_MAX_LENGTH)
figure.savefig('')
x_sci_notaion = axis.xaxis.get_offset_text()
x_sci_notaion.set_visible(False)
if x_sci_notaion.get_text():
xlabel = "{:s} / {:s}".format(xlabel[:-1],
x_sci_notaion.get_text()[1:])
axis.set_xlabel(xlabel)
# axis.xaxis.set_major_formatter(nice_math_text_form)
axis.grid(grid)
# matplotlib.pyplot.tight_layout()
for out_filename in out_filenames:
figure.savefig(out_filename)
matplotlib.pyplot.close()
def plot_force_along_time_function(
self,
out_filenames,
node_i,
start_time,
end_time,
xlabel,
force_label,
force_min,
force_max,
time_function,
grid=False,
figsize=(style.SINGLE_COLUMN_WIDTH,
style.SINGLE_COLUMN_SHORT_HEIGHT)):
'''
Plot the time history of force for specific node.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
node_i : int
Node number of the force to be plotted.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
force_label : string
The label shown on the force axis for the lowest figure.
force_min : float
Lower limit for force axis.
force_max : float
Upper limit for force axis.
'''
print('plot_force_along_time_function')
return self._plot_along_time_function(
out_filenames=out_filenames,
time_function=time_function,
variable=self.force_analysis.force[:, node_i - 1],
start_time=start_time,
end_time=end_time,
xlabel=xlabel,
ylabel=force_label,
ymin=force_min,
ymax=force_max,
grid=grid,
figsize=figsize)
def plot_time_history_force(
self,
out_filenames,
node_i,
start_time,
end_time,
force_label,
force_min,
force_max,
xlabel=r'$t\mathrm{\ (s)}$',
reference_line_factor_tuple=(),
grid=False,
figsize=(style.SINGLE_COLUMN_WIDTH,
style.SINGLE_COLUMN_SHORT_HEIGHT / 2)):
'''
Plot the time history of force for specific node.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
node_i : int
Node number of the force to be plotted.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
force_label : string
The label shown on the force axis for the lowest figure.
force_min : float
Lower limit for force axis.
force_max : float
Upper limit for force axis.
'''
print('plot_time_history_force')
return self._plot_along_time(
out_filenames=out_filenames,
variable=self.force_analysis.force[:, node_i - 1],
start_time=start_time,
end_time=end_time,
xlabel=xlabel,
ylabel=force_label,
ymin=force_min,
ymax=force_max,
reference_line_factor_tuple=reference_line_factor_tuple,
grid=grid,
figsize=figsize)
def plot_time_history_force_deviation(
self,
out_filenames,
node_i,
start_time,
end_time,
force_deviation_label,
force_deviation_min,
force_deviation_max,
xlabel=r'$t\mathrm{\ (s)}$',
figsize=(style.SINGLE_COLUMN_WIDTH,
style.SINGLE_COLUMN_SHORT_HEIGHT / 2)):
'''
Plot the time history of force deviation for specific node.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
node_i : int
Node number of the force deviation to be plotted.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
force_deviation_label : string
The label shown on the force deviation axis for the lowest figure.
force_deviation_min : float
Lower limit for force deviation axis.
force_deviation_max : float
Upper limit for force deviation axis.
'''
print('plot_time_history_force_deviation')
self._plot_along_time(
out_filenames=out_filenames,
variable=self.force_analysis.force_deviation[:, node_i -
1],
start_time=start_time,
end_time=end_time,
xlabel=xlabel,
ylabel=force_deviation_label,
ymin=force_deviation_min,
ymax=force_deviation_max,
figsize=figsize)
def plot_time_history_velocity(
self,
out_filenames,
node_i,
start_time,
end_time,
velocity_label,
velocity_min,
velocity_max,
reduced_velocity=None,
xlabel=r'$t\mathrm{\ (s)}$',
reference_line_factor_tuple=(),
input_time_referece_tuple=(),
grid=False,
figsize=(style.SINGLE_COLUMN_WIDTH,
style.SINGLE_COLUMN_SHORT_HEIGHT / 2)):
'''
Plot the time history of velocity for specific node.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
node_i : int
Node number of the velocity to be plotted.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
velocity_label : string
The label shown on the velocity axis for the lowest figure.
velocity_min : float
Lower limit for velocity axis.
velocity_max : float
Upper limit for velocity axis.
reduced_velocity : string
Means to obtain non-dimensional velocity.
'''
print('plot_time_history_velocity')
if reduced_velocity is None:
velocity = self.force_analysis.velocity
elif reduced_velocity is 'fundamental_natural_frequency':
velocity = self.force_analysis.velocity / (
self.force_analysis.modal_natural_frequencies[0])
else:
raise TypeError('Wrong reduced velocity type.')
return self._plot_along_time(
out_filenames=out_filenames,
variable=velocity[:, node_i - 1],
start_time=start_time,
end_time=end_time,
xlabel=xlabel,
ylabel=velocity_label,
ymin=velocity_min,
ymax=velocity_max,
reference_line_factor_tuple=reference_line_factor_tuple,
input_time_referece_tuple=input_time_referece_tuple,
grid=grid,
figsize=figsize)
def plot_modal_weight_force(self, out_filenames, mode_i, start_time,
end_time, ylabel, ymin, ymax,
xlabel=r'$t\mathrm{\ (s)}$',
):
'''
Plot the modal weight history of force for specific mode.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
mode_i : int
Mode number of the modal weight history to be plotted.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
ylabel : string
The label shown on the y axis for the lowest figure.
ymin : float
Lower limit for y axis.
ymax : float
Upper limit for y axis.
'''
if not self.force_analysis.modal_analysis:
return
print('plot_modal_weight_force')
self._plot_along_time(
out_filenames=out_filenames,
variable=self.force_analysis.
modal_weight_force[:, mode_i -
self.force_analysis.mode_number_min],
start_time=start_time,
end_time=end_time,
xlabel=xlabel,
ylabel=ylabel,
ymin=ymin,
ymax=ymax)
def plot_outline(self,
out_filenames,
start_time,
end_time,
line_number,
xlabel,
xmin,
xmax,
show_min_max=False,
show_y=True,
figsize=(style.SINGLE_COLUMN_WIDTH / 2,
style.SINGLE_COLUMN_LONG_HEIGHT / 2)):
'''
Plot instaneous force at multi-times.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
line_number : int
Line number of the figure.
xlabel : string
The label shown on the x axis.
xmin : float
Lower limit for x axis.
xmax : float
Upper limit for x axis.
show_min_max : bool
True for showing minimum and maximum force along the whole time.
show_y : bool
True if ylabel is needed.
figsize : float tuple (float1, float2)
Figure size (width, height).
'''
print('plot_outline')
figure, axis = matplotlib.pyplot.subplots(figsize=figsize)
ylabel = r'$z\cdot L^{-1}$'
if show_min_max:
axis.plot(self.force_analysis.force_min,
self.force_analysis.span,
**style.LIGHT_LINE_STYLE)
axis.plot(self.force_analysis.force_max,
self.force_analysis.span,
**style.LIGHT_LINE_STYLE)
# start_index = self.force_analysis.time_index(start_time)
# end_index = self.force_analysis.next_time_index(end_time)
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
# for time_index in numpy.linspace(
# start_index, end_index, line_number, endpoint=False):
step = int(sum(time_index) // line_number)
for force in self.force_analysis.force[
time_index, :][::step, :]:
# start_index, end_index, line_number, endpoint=False):
# time_index = self.force_analysis.time_index(time)
# time_index = int(numpy.around(time_index))
axis.plot(
force,
# self.force_analysis.force[time_index, :],
self.force_analysis.span,
**style.SINGLE_LINE_STYLE)
axis.set_xlim(xmin, xmax)
axis.locator_params(axis='x', nbins=style.SHORT_XTICK_MAX_LENGTH)
axis.locator_params(axis='y', nbins=style.LONG_YTICK_MAX_LENGTH)
figure.savefig('')
x_sci_notaion = axis.xaxis.get_offset_text()
x_sci_notaion.set_visible(False)
if x_sci_notaion.get_text():
xlabel = "{:s} / {:s}".format(xlabel[:-1],
x_sci_notaion.get_text()[1:])
axis.set_xlabel(xlabel)
axis.set_ylabel(ylabel)
if not show_y:
matplotlib.pyplot.setp(axis.get_yticklabels(), visible=False)
axis.set_ylabel(r' ')
# matplotlib.pyplot.tight_layout()
for out_filename in out_filenames:
figure.savefig(out_filename)
matplotlib.pyplot.close()
def plot_deviation_outline(self,
out_filenames,
start_time,
end_time,
line_number,
xlabel,
xmin,
xmax,
show_min_max=False,
show_y=True,
figsize=(style.SINGLE_COLUMN_WIDTH / 2,
style.SINGLE_COLUMN_LONG_HEIGHT / 2)):
'''
Plot instaneous force deviation at multi-times.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
line_number : int
Line number of the figure.
xlabel : string
The label shown on the x axis.
xmin : float
Lower limit for x axis.
xmax : float
Upper limit for x axis.
show_min_max : bool
True for showing minimum and maximum force along the whole time.
show_y : bool
True if ylabel is needed.
figsize : float tuple (float1, float2)
Figure size (width, height).
'''
print('plot_deviation_outline')
matplotlib.pyplot.clf()
matplotlib.pyplot.gcf().set_size_inches(figsize)
if show_min_max:
matplotlib.pyplot.plot(self.force_analysis.force_min -
self.force_analysis.force_mean,
self.force_analysis.span,
**style.LIGHT_LINE_STYLE)
matplotlib.pyplot.plot(self.force_analysis.force_max -
self.force_analysis.force_mean,
self.force_analysis.span,
**style.LIGHT_LINE_STYLE)
# start_index = self.force_analysis.time_index(start_time)
# end_index = self.force_analysis.next_time_index(end_time)
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
# for specific time period
# for time in numpy.linspace(start_time, end_time, line_number,
# endpoint=False):
# for time_index in numpy.linspace(
# start_index, end_index, line_number, endpoint=False):
step = int(sum(time_index) // line_number)
for force_deviation in self.force_analysis.force_deviation[
time_index, :][::step, :]:
# time_index = self.force_analysis.time_index(time)
# time_index = int(numpy.around(time_index))
matplotlib.pyplot.plot(
# self.force_analysis.force_deviation[time_index,
# :],
force_deviation,
self.force_analysis.span,
**style.SINGLE_LINE_STYLE)
matplotlib.pyplot.xlim(xmin, xmax)
axis = matplotlib.pyplot.gca()
axis.locator_params(axis='x', nbins=style.SHORT_XTICK_MAX_LENGTH)
axis.locator_params(axis='y', nbins=style.LONG_YTICK_MAX_LENGTH)
matplotlib.pyplot.savefig('')
x_sci_notaion = axis.xaxis.get_offset_text()
x_sci_notaion.set_visible(False)
if x_sci_notaion.get_text():
xlabel = "{:s} / {:s}".format(xlabel[:-1],
x_sci_notaion.get_text()[1:])
matplotlib.pyplot.xlabel(xlabel)
matplotlib.pyplot.ylabel(r'$z\cdot L^{-1}$')
if not show_y:
matplotlib.pyplot.setp(axis.get_yticklabels(), visible=False)
matplotlib.pyplot.ylabel(r' ')
# matplotlib.pyplot.tight_layout()
for out_filename in out_filenames:
matplotlib.pyplot.savefig(out_filename)
matplotlib.pyplot.close()
def plot_force_mean(self, out_filenames, xlabel, xmin, xmax):
'''
Plot force mean value along the whole time.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
xlabel : string
The label shown on the x axis.
xmin : float
Lower limit for x axis.
xmax : float
Upper limit for x axis.
'''
print('plot_force_mean')
self._plot_along_span(
out_filenames,
self.force_analysis.force_mean,
xlabel,
xmin,
xmax,
color=style.DARK_COLOR,
clf=True,
save=False,
grid=True)
self._plot_along_span(
out_filenames,
self.force_analysis.force_min,
xlabel,
xmin,
xmax,
color=style.LIGHT_COLOR,
clf=False,
save=False,
grid=True)
self._plot_along_span(
out_filenames,
self.force_analysis.force_max,
xlabel,
xmin,
xmax,
color=style.LIGHT_COLOR,
clf=False,
save=True,
grid=True)
return {
'mean': self.force_analysis.force_mean.tolist(),
'min': self.force_analysis.force_min.tolist(),
'max': self.force_analysis.force_max.tolist()
}
def plot_curvature_mean(self, out_filenames, xlabel, xmin, xmax):
'''
Plot curvature mean value along the whole time.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
xlabel : string
The label shown on the x axis.
xmin : float
Lower limit for x axis.
xmax : float
Upper limit for x axis.
'''
print('plot_curvature_mean')
# here plot mean curvature to the opposite value
self._plot_along_span(
out_filenames,
-self.force_analysis.curvature_mean,
xlabel,
xmin,
xmax,
color=style.DARK_COLOR,
clf=True,
save=False,
grid=True)
self._plot_along_span(
out_filenames,
-self.force_analysis.curvature_min,
xlabel,
xmin,
xmax,
color=style.LIGHT_COLOR,
clf=False,
save=False,
grid=True)
self._plot_along_span(
out_filenames,
-self.force_analysis.curvature_max,
xlabel,
xmin,
xmax,
color=style.LIGHT_COLOR,
clf=False,
save=True,
grid=True)
# return actual value rather than opposite value.
return {
'curvature_mean': self.force_analysis.curvature_mean.tolist(),
'min': self.force_analysis.curvature_min.tolist(),
'max': self.force_analysis.curvature_max.tolist()
}
def plot_force_std(self, out_filenames, xlabel, xmin, xmax):
'''
Plot force standard deviations along the whole time.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
xlabel : string
The label shown on the x axis.
xmin : float
Lower limit for x axis.
xmax : float
Upper limit for x axis.
'''
print('plot_force_std')
self._plot_along_span(
out_filenames,
self.force_analysis.force_std,
xlabel,
xmin,
xmax,
grid=True)
return self.force_analysis.force_std
def plot_curvature_std(self, out_filenames, xlabel, xmin, xmax):
'''
Plot curvature standard deviations along the whole time.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
xlabel : string
The label shown on the x axis.
xmin : float
Lower limit for x axis.
xmax : float
Upper limit for x axis.
'''
print('plot_curvature_std')
self._plot_along_span(
out_filenames,
self.force_analysis.curvature_std,
xlabel,
xmin,
xmax,
grid=True)
return self.force_analysis.curvature_std
def subplot_force_curvature_std(self, out_filenames, xlabel_list,
xmin_list, xmax_list):
'''
Subplot standard deviations of force and curvature along the whole time.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
xlabel_list : a list of string
The labels shown on the x axes.
xmin_list : float
Lower limit for x axis.
xmax_list : float
Upper limit for x axis.
'''
print('subplot_force_curvature_std')
self._subplot_along_span(
out_filenames, [
self.force_analysis.force_std,
self.force_analysis.curvature_std,
],
xlabel_list,
xmin_list,
xmax_list,
grid=True,
figsize=(style.SINGLE_COLUMN_WIDTH,
style.SINGLE_COLUMN_LONG_HEIGHT / 2))
def subplot_force_curvature_mean_std(
self, out_filenames, xlabel_list, xmin_list, xmax_list):
'''
Subplot mean values and standard deviations of force and curvature along the whole time.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
xlabel_list : a list of string
The labels shown on the x axes.
xmin_list : float
Lower limit for x axis.
xmax_list : float
Upper limit for x axis.
'''
print('subplot_force_curvature_mean_std')
self._subplot_along_span(
out_filenames, [
self.force_analysis.force_mean,
-self.force_analysis.curvature_mean,
self.force_analysis.force_std,
self.force_analysis.curvature_std
],
xlabel_list,
xmin_list,
xmax_list,
grid=True,
figsize=(style.ONE_AND_HALF_COLUMN_WIDTH,
style.ONE_AND_HALF_COLUMN_SHORT_HEIGHT))
def subplot_modal_weight_force(
self,
out_filenames,
start_time,
end_time,
ylabel,
ymin,
ymax,
xlabel=r'$t\mathrm{\ (s)}$',
figsize=(style.FULL_WIDTH * 2 / 3, style.FULL_SHORT_HEIGHT)):
'''
Plot the time history of modal weight of force.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
ylabel : string
The label shown on the y axis for the lowest figure.
ymin : float
Lower limit for y axis.
ymax : float
Upper limit for y axis.
'''
if not self.force_analysis.modal_analysis:
return
print('subplot_modal_weight_force')
self._subplot_along_time(
out_filenames=out_filenames,
variable_list=self.force_analysis.modal_weight_force.T,
start_time=start_time,
end_time=end_time,
xlabel=xlabel,
ylabel=ylabel,
text_list=[
r'Mode {:d}'.format(mode_i)
for mode_i in
range(self.force_analysis.mode_number_min,
self.force_analysis.mode_number_max + 1)
],
ymin=ymin,
ymax=ymax,
grid=False,
figsize=figsize)
def contourf_contour_spatio_temporal_force(
self,
out_filenames,
start_time,
end_time,
colorbar_min,
colorbar_max,
contourf_num,
contour_num,
xlabel=r'$t\mathrm{\ (s)}$',
colorbar_zlabel='', ):
'''
Spatio temporal contour over contourf of force.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
colorbar_min : float
Lower limit for the contour.
colorbar_max : float
Upper limit for the contour.
contour_num : int
Line number for the contour.
contourf_num : int
Line number for the contourf.
colorbar_zlabel : string
Label shown on the colorbar.
'''
print('contourf_contour_spatio_temporal_force')
# start_index = self.force_analysis.time_index(start_time)
# end_index = self.force_analysis.next_time_index(end_time)
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
time = self.force_analysis.time[time_index]
_contourf_contour(
out_filenames=out_filenames,
variable=self.force_analysis.force_deviation[
time_index],
x=time,
y=self.force_analysis.span,
xlabel=r'$t\mathrm{\ (s)}$',
ylabel=r'$z\cdot L^{-1}$',
colorbar_min=colorbar_min,
colorbar_max=colorbar_max,
contourf_num=contourf_num,
contour_num=contour_num,
colorbar_zlabel=colorbar_zlabel)
def contour_spatio_temporal_force(self, out_filenames, start_time,
end_time, colorbar_min,
colorbar_max, contour_num,
xlabel=r'$t\mathrm{\ (s)}$',
):
'''
Spatio temporal contour for force.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
colorbar_min : float
Lower limit for the contour.
colorbar_max : float
Upper limit for the contour.
contour_num : int
Line number for the contour.
'''
print('contour_spatio_temporal_force')
# start_index = self.force_analysis.time_index(start_time)
# end_index = self.force_analysis.next_time_index(end_time)
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
time = self.force_analysis.time[time_index]
_contour(
out_filenames=out_filenames,
variable=self.force_analysis.force_deviation[
time_index],
x=time,
y=self.force_analysis.span,
xlabel=xlabel,
ylabel=r'$z\cdot L^{-1}$',
colorbar_min=colorbar_min,
colorbar_max=colorbar_max,
contour_num=contour_num)
def contourf_contour_spatio_temporal_curvature(
self,
out_filenames,
start_time,
end_time,
colorbar_min,
colorbar_max,
contourf_num,
contour_num,
xlabel=r'$t\mathrm{\ (s)}$',
colorbar_zlabel='', ):
'''
Spatio temporal contour over contourf of curvature.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
colorbar_min : float
Lower limit for the contour.
colorbar_max : float
Upper limit for the contour.
contour_num : int
Line number for the contour.
contourf_num : int
Line number for the contourf.
colorbar_zlabel : string
Label shown on the colorbar.
'''
print('contourf_contour_spatio_temporal_curvature')
# start_index = self.force_analysis.time_index(start_time)
# end_index = self.force_analysis.next_time_index(end_time)
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
time = self.force_analysis.time[time_index]
_contourf_contour(
out_filenames=out_filenames,
variable=self.force_analysis.curvature_deviation[time_index],
x=time,
y=self.force_analysis.span,
xlabel=xlabel,
ylabel=r'$z\cdot L^{-1}$',
colorbar_min=colorbar_min,
colorbar_max=colorbar_max,
contourf_num=contourf_num,
contour_num=contour_num,
colorbar_zlabel=colorbar_zlabel)
def contour_spatio_temporal_curvature(self, out_filenames, start_time,
end_time, colorbar_min, colorbar_max,
contour_num,
xlabel=r'$t\mathrm{\ (s)}$',
):
'''
Spatio temporal contour for curvature.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
colorbar_min : float
Lower limit for the contour.
colorbar_max : float
Upper limit for the contour.
contour_num : int
Line number for the contour.
'''
print('contour_spatio_temporal_curvature')
# start_index = self.force_analysis.time_index(start_time)
# end_index = self.force_analysis.next_time_index(end_time)
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
time = self.force_analysis.time[time_index]
_contour(
out_filenames=out_filenames,
variable=self.force_analysis.curvature_deviation[time_index],
x=time,
y=self.force_analysis.span,
xlabel=xlabel,
ylabel=r'$z\cdot L^{-1}$',
colorbar_min=colorbar_min,
colorbar_max=colorbar_max,
contour_num=contour_num)
def subplot_span_force(self,
out_filenames,
force_label,
start_time,
end_time,
force_min,
force_max,
xlabel=r'$t\mathrm{\ (s)}$',
num=9,
figsize=(style.SINGLE_COLUMN_WIDTH,
style.SINGLE_COLUMN_LONG_HEIGHT)):
'''
Subplots of force along time.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
force_label : string
Label for force.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
force_min : float
Lower limit for force axis.
force_max : float
Upper limit for force axis.
num : int
Number of nodes for subplots.
'''
num += 1
print('subplot_span_force')
step = int(self.force_analysis.node_number // num)
span_list = numpy.arange(0, 1,
step / self.force_analysis.node_number)
self._subplot_along_time(
out_filenames=out_filenames,
variable_list=self.force_analysis.force[:, ::step].T[
-2:0:-1],
start_time=start_time,
end_time=end_time,
xlabel=xlabel,
ylabel=force_label,
text_list=[
r'$z\cdot L^{{-1}}={:.1f}$'.format(span)
for span in span_list[-2:0:-1]#inverse
],
ymin=force_min,
ymax=force_max,
grid=False,
figsize=figsize)
def subplot_span_force_deviation(
self,
out_filenames,
force_deviation_label,
start_time,
end_time,
force_deviation_min,
force_deviation_max,
xlabel=r'$t\mathrm{\ (s)}$',
num=9,
figsize=(style.SINGLE_COLUMN_WIDTH,
style.SINGLE_COLUMN_LONG_HEIGHT)):
'''
Subplots of force deviation along time.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
force_deviation_label : string
Label for force.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
force_deviation_min : float
Lower limit for force axis.
force_deviation_max : float
Upper limit for force axis.
num : int
Number of nodes for subplots.
'''
num += 1
print('subplot_span_force_deviation')
step = int(self.force_analysis.node_number // num)
span_list = numpy.arange(0, 1,
step / self.force_analysis.node_number)
self._subplot_along_time(
out_filenames=out_filenames,
variable_list=self.force_analysis.
force_deviation[:, ::step].T[-2:0:-1],
start_time=start_time,
end_time=end_time,
xlabel=xlabel,
ylabel=force_deviation_label,
text_list=[
r'$z\cdot L^{{-1}}={:.1f}$'.format(span)
for span in span_list[-2:0:-1]#inverse
],
ymin=force_deviation_min,
ymax=force_deviation_max,
grid=False,
figsize=figsize)
def subplot_span_velocity(self,
out_filenames,
velocity_label,
start_time,
end_time,
velocity_min,
velocity_max,
num=9,
figsize=(style.SINGLE_COLUMN_WIDTH,
style.SINGLE_COLUMN_LONG_HEIGHT)):
'''
Subplots of velocity along time.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
velocity_label : string
Label for velocity.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
velocity_min : float
Lower limit for velocity axis.
velocity_max : float
Upper limit for velocity axis.
num : int
Number of nodes for subplots.
'''
num += 1
print('subplot_span_velocity')
step = int(self.force_analysis.node_number // num)
span_list = numpy.arange(0, 1,
step / self.force_analysis.node_number)
self._subplot_along_time(
out_filenames=out_filenames,
variable_list=self.force_analysis.velocity[:, ::step].T[-1:0:-1],
start_time=start_time,
end_time=end_time,
ylabel=velocity_label,
text_list=[
r'$z\cdot L^{{-1}}={:.1f}$'.format(span)
for span in span_list[-2:0:-1]#inverse
],
ymin=velocity_min,
ymax=velocity_max,
grid=False,
figsize=figsize)
def subplot_fft_amplitude(
self,
out_filenames,
num=9,
xlabel=r'$f_o\mathrm{\ (Hz)}$',
ylabel=r'$\mathrm{A_{FFT}}$',
figsize=(style.SINGLE_COLUMN_WIDTH,
style.SINGLE_COLUMN_LONG_HEIGHT)):
'''
Subplots of FFT amplitude.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
num : int
number of nodes for subplots.
'''
if not self.force_analysis.frequency_domain_analysis:
return
num += 1
print('subplot_fft_amplitude')
step = int(self.force_analysis.node_number // num)
span_list = numpy.arange(0, 1,
step / self.force_analysis.node_number)
_subplot_row(
out_filenames=out_filenames,
x_variable=self.force_analysis.fft_frequency,
y_variable_list=self.force_analysis.fft_amplitude[:, ::step].T[1:-1],
xlabel=xlabel,
ylabel=ylabel,
text_list=[
r'$z\cdot L^{{-1}}={:.1f}$'.format(span)
for span in span_list[1:-1]
],
xmin=self.force_analysis.frequency_min,
xmax=self.force_analysis.frequency_max,
ymin=0,
ymax=numpy.nanmax(self.force_analysis.fft_amplitude),
grid=False,
figsize=figsize, )
def subplot_power_spectral_density(
self,
out_filenames,
num=9,
xlabel=r'$f_o\mathrm{\ (Hz)}$',
ylabel=r'$\mathrm{PSD}$',
figsize=(style.SINGLE_COLUMN_WIDTH,
style.SINGLE_COLUMN_LONG_HEIGHT)):
'''
Subplots of power spectral density.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
num : int
number of nodes for subplots.
'''
if not self.force_analysis.frequency_domain_analysis:
return
num += 1
print('subplot_power_spectral_density')
step = int(self.force_analysis.node_number // num)
span_list = numpy.arange(0, 1,
step / self.force_analysis.node_number)
_subplot_row(
out_filenames=out_filenames,
x_variable=self.force_analysis.fft_frequency,
y_variable_list=self.force_analysis.
power_spectral_density[:, ::step].T[-2:0:-1],#inverse
xlabel=xlabel,
ylabel=ylabel,
text_list=[
r'$z\cdot L^{{-1}}={:.1f}$'.format(span)
for span in span_list[-2:0:-1]#inverse
],
xmin=self.force_analysis.frequency_min,
xmax=self.force_analysis.frequency_max,
ymin=0,
ymax=numpy.nanmax(self.force_analysis.power_spectral_density),
grid=False,
figsize=figsize, )
def subplot_modal_fft_amplitude(
self,
out_filenames,
xlabel=r'$f_o^m\mathrm{\ (Hz)}$',
ylabel=r'$\mathrm{A_{FFT}}$',
figsize=(style.FULL_WIDTH * 1 / 3, style.FULL_SHORT_HEIGHT)):
'''
Subplots modal fft amplitude for multi modes.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
figsize : float tuple (float1, float2)
Figure size (width, height).
'''
if not self.force_analysis.modal_analysis:
return
print('subplot_modal_fft_amplitude')
_subplot_row(
out_filenames=out_filenames,
x_variable=self.force_analysis.fft_frequency,
y_variable_list=self.force_analysis.modal_fft_amplitude.T,
xlabel=xlabel,
ylabel=ylabel,
text_list=[
r'Mode {:d}'.format(mode_i)
for mode_i in
range(self.force_analysis.mode_number_min,
self.force_analysis.mode_number_max + 1)
],
xmin=self.force_analysis.frequency_min,
xmax=self.force_analysis.frequency_max,
ymin=0,
ymax=numpy.nanmax(
self.force_analysis.modal_fft_amplitude),
grid=False,
figsize=figsize)
def subplot_modal_power_spectral_density(
self,
out_filenames,
xlabel=r'$f_o^m\mathrm{\ (Hz)}$',
ylabel=r'$\mathrm{PSD}$',
figsize=(style.FULL_WIDTH * 1 / 3, style.FULL_SHORT_HEIGHT)):
'''
Subplots modal power spectral density for multi modes.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
figsize : float tuple (float1, float2)
Figure size (width, height).
'''
if not self.force_analysis.modal_analysis:
return
print('subplot_modal_power_spectral_density')
_subplot_row(
out_filenames=out_filenames,
x_variable=self.force_analysis.fft_frequency,
y_variable_list=self.force_analysis.
modal_power_spectral_density.T,
xlabel=xlabel,
ylabel=ylabel,
text_list=[
r'Mode {:d}'.format(mode_i)
for mode_i in
range(self.force_analysis.mode_number_min,
self.force_analysis.mode_number_max + 1)
],
xmin=self.force_analysis.frequency_min,
xmax=self.force_analysis.frequency_max,
ymin=0,
ymax=numpy.nanmax(
self.force_analysis.modal_power_spectral_density),
grid=False,
figsize=figsize)
def plot3d_power_spectral_density(self,
out_filenames,
xlabel=r'$f_o\mathrm{\ (Hz)}$',
ylabel=r'$z\cdot L^{-1}$',
figsize=(style.SINGLE_COLUMN_WIDTH / 2,
style.SINGLE_COLUMN_WIDTH / 2)):
'''
3d power spectral density plots.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
figsize : float tuple (float1, float2)
Figure size (width, height).
'''
if not self.force_analysis.frequency_domain_analysis:
return
print('plot3d_power_spectral_densityl density')
from mpl_toolkits.mplot3d import Axes3D
matplotlib.pyplot.clf()
matplotlib.pyplot.gcf().set_size_inches(figsize)
ax = matplotlib.pyplot.gca(projection='3d')
ax.set_xlabel(xlabel)
ax.set_xlim3d(self.force_analysis.frequency_min,
self.force_analysis.frequency_max)
ax.set_ylim3d(0, 1)
ax.set_ylabel(ylabel)
ax.set_zlim3d(
numpy.nanmin(self.force_analysis.power_spectral_density),
numpy.nanmax(self.force_analysis.power_spectral_density))
ax.set_zlabel(r'Power spectral density', labelpad=30)
ax.zaxis.set_major_formatter(
matplotlib.ticker.StrMethodFormatter('{x:.0e}'))
num = 10
step = self.force_analysis.node_number // num
span = numpy.linspace(0, 1, num=num)
line = matplotlib.collections.LineCollection(
[
list(zip(self.force_analysis.fft_frequency, A.T))
for A in self.force_analysis.power_spectral_density[:, ::
step].T
],
edgecolor=matplotlib.colors.ColorConverter().to_rgb(
style.DARK_COLOR))
ax.add_collection3d(line, zs=span, zdir='y')
ax.view_init(elev=45, azim=-120)
ax.grid()
matplotlib.pyplot.tight_layout()
for out_filename in out_filenames:
matplotlib.pyplot.savefig(out_filename)
matplotlib.pyplot.close()
def contourf_fft_amplitude(self, out_filenames,
contourf_num,
xlabel=r'$f_o\mathrm{\ (Hz)}$',
ylabel=r'$z\cdot L^{-1}$',
):
'''
Contourfs of FFT amplitude for all nodes.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
contourf_num : int
Line number for the contourf.
'''
if not self.force_analysis.frequency_domain_analysis:
return
print('contourf_fft_amplitude')
_contourf(
out_filenames=out_filenames,
variable=self.force_analysis.fft_amplitude,
x=self.force_analysis.fft_frequency,
y=self.force_analysis.span,
xlabel=xlabel,
ylabel=ylabel,
colorbar_min=numpy.nanmin(
self.force_analysis.fft_amplitude),
colorbar_max=numpy.nanmax(
self.force_analysis.fft_amplitude),
contourf_num=contourf_num, )
def contourf_power_spectral_density(self, out_filenames,
contourf_num,
xlabel=r'$f_o\mathrm{\ (Hz)}$',
ylabel=r'$z\cdot L^{-1}$',
):
'''
Contourfs of power spectral density for all nodes.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
contourf_num : int
Line number for the contourf.
'''
if not self.force_analysis.frequency_domain_analysis:
return
print('contourf_power_spectral_density')
_contourf(
out_filenames=out_filenames,
variable=self.force_analysis.power_spectral_density,
x=self.force_analysis.fft_frequency,
y=self.force_analysis.span,
xlabel=xlabel,
ylabel=ylabel,
colorbar_min=numpy.nanmin(
self.force_analysis.power_spectral_density),
colorbar_max=numpy.nanmax(
self.force_analysis.power_spectral_density),
contourf_num=contourf_num, )
def contour_fft_amplitude(self, out_filenames,
contour_num,
xlabel=r'$f_o\mathrm{\ (Hz)}$',
ylabel=r'$z\cdot L^{-1}$',
):
'''
Contours of fft amplitude for all nodes.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
contour_num : int
Line number for the contourf.
'''
if not self.force_analysis.frequency_domain_analysis:
return
print('contour_fft_amplitude')
_contour(
out_filenames=out_filenames,
variable=self.force_analysis.fft_amplitude,
x=self.force_analysis.fft_frequency,
y=self.force_analysis.span,
xlabel=xlabel,
ylabel=ylabel,
colorbar_min=numpy.nanmin(
self.force_analysis.fft_amplitude),
colorbar_max=numpy.nanmax(
self.force_analysis.fft_amplitude),
contour_num=contour_num)
def contour_power_spectral_density(self, out_filenames,
contour_num,
xlabel=r'$f_o\mathrm{\ (Hz)}$',
ylabel=r'$z\cdot L^{-1}$',
):
'''
Contours of power spectral density for all nodes.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
contour_num : int
Line number for the contourf.
'''
if not self.force_analysis.frequency_domain_analysis:
return
print('contour_power_spectral_density')
_contour(
out_filenames=out_filenames,
variable=self.force_analysis.power_spectral_density,
x=self.force_analysis.fft_frequency,
y=self.force_analysis.span,
xlabel=xlabel,
ylabel=ylabel,
colorbar_min=numpy.nanmin(
self.force_analysis.power_spectral_density),
colorbar_max=numpy.nanmax(
self.force_analysis.power_spectral_density),
contour_num=contour_num)
def contourf_wavelet(self, node_i, out_filenames, start_time, end_time,
contourf_num,
xlabel=r'$t\mathrm{\ (s)}$',
ylabel=r'$f_o\mathrm{\ (Hz)}$',
):
'''
Contourfs of wavelet for specific node.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
contourf_num : int
Line number for the contourf.
'''
if not self.force_analysis.wavelet_analysis:
return
print('contourf_wavelet')
# time_index = (
# self.force_analysis.wavelet_time >= start_time) & (
# self.force_analysis.wavelet_time <= end_time)
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
_contourf(
out_filenames,
variable=self.force_analysis.wavelet_power[node_i - 1].T[
time_index],
#x=self.force_analysis.wavelet_time[time_index],
x=self.force_analysis.time[time_index],
y=self.force_analysis.wavelet_frequency,
xlabel=xlabel,
ylabel=ylabel,
colorbar_min=numpy.nanmin(self.force_analysis.wavelet_power[
node_i - 1]),
colorbar_max=numpy.nanmax(self.force_analysis.wavelet_power[
node_i - 1]),
contourf_num=contourf_num, )
def contour_wavelet(self, node_i, out_filenames, start_time, end_time,
contour_num,
xlabel=r'$t\mathrm{\ (s)}$',
ylabel=r'$f_o\mathrm{\ (Hz)}$',
):
'''
Contours of wavelet for specific node.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
contour_num : int
Line number for the contourf.
'''
if not self.force_analysis.wavelet_analysis:
return
print('contour_wavelet')
# time_index = (
# self.force_analysis.wavelet_time >= start_time) & (
# self.force_analysis.wavelet_time <= end_time)
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
_contour(
out_filenames,
variable=self.force_analysis.wavelet_power[node_i - 1].T[
time_index],
#x=self.force_analysis.wavelet_time[time_index],
x=self.force_analysis.time[time_index],
y=self.force_analysis.wavelet_frequency,
xlabel=xlabel,
ylabel=ylabel,
colorbar_min=numpy.nanmin(self.force_analysis.wavelet_power[
node_i - 1]),
colorbar_max=numpy.nanmax(self.force_analysis.wavelet_power[
node_i - 1]),
contour_num=contour_num)
def plot_wavelet_dominant_frequency(self, out_filenames, node_i,
start_time, end_time,
xlabel=r'$t\mathrm{\ (s)}$',
ylabel=r'$f_o\mathrm{\ (Hz)}$',
):
'''
Dominant frequency along time axis for specific node.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
node_i : int
Node number of the dominant frequency to be plotted.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
'''
if not self.force_analysis.wavelet_analysis:
return
print('plot_wavelet_dominant_frequency')
self._plot_along_time(
out_filenames=out_filenames,
variable=self.force_analysis.wavelet_dominant_frequencies[
node_i - 1],
start_time=start_time,
end_time=end_time,
xlabel=xlabel,
ylabel=ylabel,
ymin=self.force_analysis.frequency_min,
ymax=self.force_analysis.frequency_max)
def contourf_wavelet_dominant_frequency(self, out_filenames, start_time,
end_time, contourf_num,
xlabel=r'$t\mathrm{\ (s)}$',
):
'''
Contourf of dominant frequency along time axis for all nodes.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
contourf_num : int
Line number for the contourf.
'''
if not self.force_analysis.wavelet_analysis:
return
print('contourf_wavelet_dominant_frequency')
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
# start_index = self.force_analysis.time_index(start_time)
# end_index = self.force_analysis.next_time_index(end_time)
time = self.force_analysis.time[time_index]
_contourf(
out_filenames=out_filenames,
variable=self.force_analysis.
wavelet_dominant_frequencies[:, time_index].T,
x=time,
y=self.force_analysis.span,
xlabel=xlabel,
ylabel=r'$z\cdot L^{-1}$',
colorbar_min=self.force_analysis.frequency_min,
colorbar_max=self.force_analysis.frequency_max,
contourf_num=contourf_num, )
def contour_wavelet_dominant_frequency(self, out_filenames, start_time,
end_time, contour_num,
xlabel=r'$t\mathrm{\ (s)}$',
):
'''
Contour of dominant frequency along time axis for all nodes.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
contour_num : int
Line number for the contourf.
'''
if not self.force_analysis.wavelet_analysis:
return
print('contour_wavelet_dominant_frequency')
# start_index = self.force_analysis.time_index(start_time)
# end_index = self.force_analysis.next_time_index(end_time)
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
time = self.force_analysis.time[time_index]
_contour(
out_filenames=out_filenames,
variable=self.force_analysis.
wavelet_dominant_frequencies[:, time_index].T,
x=time,
y=self.force_analysis.span,
xlabel=xlabel,
ylabel=r'$z\cdot L^{-1}$',
colorbar_min=self.force_analysis.frequency_min,
colorbar_max=self.force_analysis.frequency_max,
contour_num=contour_num)
def subplot_wavelet_dominant_frequency(
self,
out_filenames,
start_time,
end_time,
ymin,
ymax,
num=9,
xlabel=r'$t\mathrm{\ (s)}$',
ylabel=r'$f_o\mathrm{\ (Hz)}$',
figsize=(style.SINGLE_COLUMN_WIDTH,
style.SINGLE_COLUMN_LONG_HEIGHT)):
'''
Plot the time history of dominant frequency of force.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
ylabel : string
The label shown on the y axis for the lowest figure.
ymin : float
Lower limit for y axis.
ymax : float
Upper limit for y axis.
num : int
number of nodes for subplots.
'''
if not self.force_analysis.wavelet_analysis:
return
print('subplot_wavelet_dominant_frequency')
num += 1
print('subplot_power_spectral_density')
step = int(self.force_analysis.node_number // num)
span_list = numpy.arange(0, 1,
step / self.force_analysis.node_number)
self._subplot_along_time(
out_filenames=out_filenames,
variable_list=self.force_analysis.
wavelet_dominant_frequencies[::step, :][-2:0:-1],#inverse
start_time=start_time,
end_time=end_time,
xlabel=xlabel,
ylabel=ylabel,
text_list=[
r'$z\cdot L^{{-1}}={:.1f}$'.format(span)
for span in span_list[-2:0:-1]#inverse
],
ymin=ymin,
ymax=ymax,
grid=False,
figsize=figsize)
def _add_and_move_line_along_time(
self,
variable_along_span,
out_filenames,
start_time,
end_time,
xlabel,
xmin,
xmax,
grid,
data_step,
fps,
dpi=100,
line_number=10,
color=style.DARK_COLOR,
figsize=(style.SINGLE_COLUMN_WIDTH / 2,
style.SINGLE_COLUMN_LONG_HEIGHT / 2)):
matplotlib.pyplot.clf()
figure = matplotlib.pyplot.gcf()
figure.set_size_inches(figsize)
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
variable_along_span = variable_along_span[time_index]
lines = []
lines.append(
matplotlib.pyplot.plot(
variable_along_span[0, :],
self.force_analysis.span,
color=color)[0])
matplotlib.pyplot.xlim(xmin, xmax)
axis = matplotlib.pyplot.gca()
axis.locator_params(axis='x', nbins=style.SHORT_XTICK_MAX_LENGTH)
axis.locator_params(axis='y', nbins=style.LONG_YTICK_MAX_LENGTH)
matplotlib.pyplot.grid(grid)
figure.savefig('')
x_sci_notaion = axis.xaxis.get_offset_text()
x_sci_notaion.set_visible(False)
if x_sci_notaion.get_text():
xlabel = "{:s} / {:s}".format(xlabel[:-1],
x_sci_notaion.get_text()[1:])
matplotlib.pyplot.xlabel(xlabel)
matplotlib.pyplot.ylabel(r'$z\cdot L^{-1}$')
# start_index = self.force_analysis.time_index(start_time)
# end_index = self.force_analysis.time_index(end_time)
add_line_interval = int(sum(time_index) // (data_step * line_number))
# animation function. This is called sequentially
def animate(i):
lines[0].set_xdata(variable_along_span[data_step * i, :])
if i % add_line_interval == 0:
lines.append(
matplotlib.pyplot.plot(variable_along_span[
data_step * i, :], self.force_analysis.span, **
style.LIGHT_LINE_STYLE)[0])
return lines
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
animator = matplotlib.animation.FuncAnimation(
figure,
animate,
frames=int(sum(time_index) // data_step),
blit=False)
matplotlib.pyplot.tight_layout()
for out_filename in out_filenames:
animator.save(out_filename, writer='imagemagick', fps=fps, dpi=dpi)
def _move_line_along_time(self,
variable_along_span,
out_filenames,
start_time,
end_time,
xlabel,
xmin,
xmax,
grid,
data_step,
fps,
dpi=100,
color=style.DARK_COLOR,
figsize=(style.SINGLE_COLUMN_WIDTH / 2,
style.SINGLE_COLUMN_LONG_HEIGHT / 2)):
matplotlib.pyplot.clf()
figure = matplotlib.pyplot.gcf()
figure.set_size_inches(figsize)
time_index = (self.force_analysis.time >= start_time) & (
self.force_analysis.time <= end_time)
variable_along_span = variable_along_span[time_index]
line, = matplotlib.pyplot.plot(
variable_along_span[0, :],
self.force_analysis.span,
color=color)
matplotlib.pyplot.xlim(xmin, xmax)
axis = matplotlib.pyplot.gca()
axis.locator_params(axis='x', nbins=style.SHORT_XTICK_MAX_LENGTH)
axis.locator_params(axis='y', nbins=style.LONG_YTICK_MAX_LENGTH)
matplotlib.pyplot.grid(grid)
figure.savefig('')
x_sci_notaion = axis.xaxis.get_offset_text()
x_sci_notaion.set_visible(False)
if x_sci_notaion.get_text():
xlabel = "{:s} / {:s}".format(xlabel[:-1],
x_sci_notaion.get_text()[1:])
matplotlib.pyplot.xlabel(xlabel)
matplotlib.pyplot.ylabel(r'$z\cdot L^{-1}$')
# start_index = self.force_analysis.time_index(start_time)
# end_index = self.force_analysis.time_index(end_time)
# initialization function: plot the background of each frame
def init():
line.set_data(variable_along_span[0, :],
self.force_analysis.span)
return line,
# animation function. This is called sequentially
def animate(i):
line.set_xdata(variable_along_span[data_step * i, :])
return line,
# call the animator. blit=True means only re-draw the parts that have
# changed.
animator = matplotlib.animation.FuncAnimation(
figure,
animate,
init_func=init,
frames=int(sum(time_index) // data_step),
blit=True)
matplotlib.pyplot.tight_layout()
for out_filename in out_filenames:
animator.save(out_filename, writer='imagemagick', fps=fps, dpi=dpi)
def make_force_animation_along_time(self,
out_filenames,
start_time,
end_time,
xlabel,
xmin,
xmax,
grid=True,
data_step=50,
fps=24):
'''
Make force animation.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
xlabel : string
The label shown on the x axis.
xmin : float
Lower limit for x axis.
xmax : float
Upper limit for x axis.
grid : bool
True if grid is necessary.
data_step : int
Plot every data step
fps : int
Frame per second
'''
print('make_force_animation_along_time')
self._move_line_along_time(self.force_analysis.force,
out_filenames, start_time, end_time, xlabel,
xmin, xmax, grid, data_step, fps)
def make_curvature_animation_along_time(self,
out_filenames,
start_time,
end_time,
xlabel,
xmin,
xmax,
grid=True,
data_step=50,
fps=24):
'''
Make curvature animation.
Parameters
----------
out_filenames : a list of string
The filenames for saving figures.
start_time : float
Lower limit for time axis.
end_time : float
Upper limit for time axis.
xlabel : string
The label shown on the x axis.
xmin : float
Lower limit for x axis.
xmax : float
Upper limit for x axis.
grid : bool
True if grid is necessary.
data_step : int
Plot every data step
fps : int
Frame per second
'''
print('make_curvature_animation_along_time')
self._move_line_along_time(-self.force_analysis.curvature,
out_filenames, start_time, end_time, xlabel,
xmin, xmax, grid, data_step, fps)
| [
"numpy.amin",
"numpy.diff",
"numpy.linspace",
"numpy.nanmax",
"numpy.nanmin",
"numpy.meshgrid",
"numpy.amax",
"numpy.arange"
] | [((4977, 5012), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {'indexing': '"""ij"""'}), "(x, y, indexing='ij')\n", (4991, 5012), False, 'import numpy\n'), ((5033, 5088), 'numpy.linspace', 'numpy.linspace', (['colorbar_min', 'colorbar_max', 'contour_num'], {}), '(colorbar_min, colorbar_max, contour_num)\n', (5047, 5088), False, 'import numpy\n'), ((7018, 7053), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {'indexing': '"""ij"""'}), "(x, y, indexing='ij')\n", (7032, 7053), False, 'import numpy\n'), ((7075, 7131), 'numpy.linspace', 'numpy.linspace', (['colorbar_min', 'colorbar_max', 'contourf_num'], {}), '(colorbar_min, colorbar_max, contourf_num)\n', (7089, 7131), False, 'import numpy\n'), ((9687, 9722), 'numpy.meshgrid', 'numpy.meshgrid', (['x', 'y'], {'indexing': '"""ij"""'}), "(x, y, indexing='ij')\n", (9701, 9722), False, 'import numpy\n'), ((9744, 9800), 'numpy.linspace', 'numpy.linspace', (['colorbar_min', 'colorbar_max', 'contourf_num'], {}), '(colorbar_min, colorbar_max, contourf_num)\n', (9758, 9800), False, 'import numpy\n'), ((9821, 9876), 'numpy.linspace', 'numpy.linspace', (['colorbar_min', 'colorbar_max', 'contour_num'], {}), '(colorbar_min, colorbar_max, contour_num)\n', (9835, 9876), False, 'import numpy\n'), ((59976, 60034), 'numpy.arange', 'numpy.arange', (['(0)', '(1)', '(step / self.force_analysis.node_number)'], {}), '(0, 1, step / self.force_analysis.node_number)\n', (59988, 60034), False, 'import numpy\n'), ((61766, 61824), 'numpy.arange', 'numpy.arange', (['(0)', '(1)', '(step / self.force_analysis.node_number)'], {}), '(0, 1, step / self.force_analysis.node_number)\n', (61778, 61824), False, 'import numpy\n'), ((63646, 63704), 'numpy.arange', 'numpy.arange', (['(0)', '(1)', '(step / self.force_analysis.node_number)'], {}), '(0, 1, step / self.force_analysis.node_number)\n', (63658, 63704), False, 'import numpy\n'), ((65007, 65065), 'numpy.arange', 'numpy.arange', (['(0)', '(1)', '(step / self.force_analysis.node_number)'], {}), '(0, 1, step / self.force_analysis.node_number)\n', (65019, 65065), False, 'import numpy\n'), ((66513, 66571), 'numpy.arange', 'numpy.arange', (['(0)', '(1)', '(step / self.force_analysis.node_number)'], {}), '(0, 1, step / self.force_analysis.node_number)\n', (66525, 66571), False, 'import numpy\n'), ((71766, 71795), 'numpy.linspace', 'numpy.linspace', (['(0)', '(1)'], {'num': 'num'}), '(0, 1, num=num)\n', (71780, 71795), False, 'import numpy\n'), ((85667, 85725), 'numpy.arange', 'numpy.arange', (['(0)', '(1)', '(step / self.force_analysis.node_number)'], {}), '(0, 1, step / self.force_analysis.node_number)\n', (85679, 85725), False, 'import numpy\n'), ((16800, 16829), 'numpy.amax', 'numpy.amax', (['selected_variable'], {}), '(selected_variable)\n', (16810, 16829), False, 'import numpy\n'), ((16858, 16887), 'numpy.amin', 'numpy.amin', (['selected_variable'], {}), '(selected_variable)\n', (16868, 16887), False, 'import numpy\n'), ((18327, 18399), 'numpy.amin', 'numpy.amin', (['numpy.r_[upper_intersection_times, lower_intersection_times]'], {}), '(numpy.r_[upper_intersection_times, lower_intersection_times])\n', (18337, 18399), False, 'import numpy\n'), ((18453, 18525), 'numpy.amax', 'numpy.amax', (['numpy.r_[upper_intersection_times, lower_intersection_times]'], {}), '(numpy.r_[upper_intersection_times, lower_intersection_times])\n', (18463, 18525), False, 'import numpy\n'), ((71390, 71446), 'numpy.nanmin', 'numpy.nanmin', (['self.force_analysis.power_spectral_density'], {}), '(self.force_analysis.power_spectral_density)\n', (71402, 71446), False, 'import numpy\n'), ((71460, 71516), 'numpy.nanmax', 'numpy.nanmax', (['self.force_analysis.power_spectral_density'], {}), '(self.force_analysis.power_spectral_density)\n', (71472, 71516), False, 'import numpy\n'), ((65637, 65684), 'numpy.nanmax', 'numpy.nanmax', (['self.force_analysis.fft_amplitude'], {}), '(self.force_analysis.fft_amplitude)\n', (65649, 65684), False, 'import numpy\n'), ((67187, 67243), 'numpy.nanmax', 'numpy.nanmax', (['self.force_analysis.power_spectral_density'], {}), '(self.force_analysis.power_spectral_density)\n', (67199, 67243), False, 'import numpy\n'), ((68575, 68628), 'numpy.nanmax', 'numpy.nanmax', (['self.force_analysis.modal_fft_amplitude'], {}), '(self.force_analysis.modal_fft_amplitude)\n', (68587, 68628), False, 'import numpy\n'), ((70021, 70083), 'numpy.nanmax', 'numpy.nanmax', (['self.force_analysis.modal_power_spectral_density'], {}), '(self.force_analysis.modal_power_spectral_density)\n', (70033, 70083), False, 'import numpy\n'), ((73333, 73380), 'numpy.nanmin', 'numpy.nanmin', (['self.force_analysis.fft_amplitude'], {}), '(self.force_analysis.fft_amplitude)\n', (73345, 73380), False, 'import numpy\n'), ((73424, 73471), 'numpy.nanmax', 'numpy.nanmax', (['self.force_analysis.fft_amplitude'], {}), '(self.force_analysis.fft_amplitude)\n', (73436, 73471), False, 'import numpy\n'), ((74419, 74475), 'numpy.nanmin', 'numpy.nanmin', (['self.force_analysis.power_spectral_density'], {}), '(self.force_analysis.power_spectral_density)\n', (74431, 74475), False, 'import numpy\n'), ((74519, 74575), 'numpy.nanmax', 'numpy.nanmax', (['self.force_analysis.power_spectral_density'], {}), '(self.force_analysis.power_spectral_density)\n', (74531, 74575), False, 'import numpy\n'), ((75481, 75528), 'numpy.nanmin', 'numpy.nanmin', (['self.force_analysis.fft_amplitude'], {}), '(self.force_analysis.fft_amplitude)\n', (75493, 75528), False, 'import numpy\n'), ((75572, 75619), 'numpy.nanmax', 'numpy.nanmax', (['self.force_analysis.fft_amplitude'], {}), '(self.force_analysis.fft_amplitude)\n', (75584, 75619), False, 'import numpy\n'), ((76557, 76613), 'numpy.nanmin', 'numpy.nanmin', (['self.force_analysis.power_spectral_density'], {}), '(self.force_analysis.power_spectral_density)\n', (76569, 76613), False, 'import numpy\n'), ((76657, 76713), 'numpy.nanmax', 'numpy.nanmax', (['self.force_analysis.power_spectral_density'], {}), '(self.force_analysis.power_spectral_density)\n', (76669, 76713), False, 'import numpy\n'), ((78189, 78248), 'numpy.nanmin', 'numpy.nanmin', (['self.force_analysis.wavelet_power[node_i - 1]'], {}), '(self.force_analysis.wavelet_power[node_i - 1])\n', (78201, 78248), False, 'import numpy\n'), ((78292, 78351), 'numpy.nanmax', 'numpy.nanmax', (['self.force_analysis.wavelet_power[node_i - 1]'], {}), '(self.force_analysis.wavelet_power[node_i - 1])\n', (78304, 78351), False, 'import numpy\n'), ((79821, 79880), 'numpy.nanmin', 'numpy.nanmin', (['self.force_analysis.wavelet_power[node_i - 1]'], {}), '(self.force_analysis.wavelet_power[node_i - 1])\n', (79833, 79880), False, 'import numpy\n'), ((79924, 79983), 'numpy.nanmax', 'numpy.nanmax', (['self.force_analysis.wavelet_power[node_i - 1]'], {}), '(self.force_analysis.wavelet_power[node_i - 1])\n', (79936, 79983), False, 'import numpy\n'), ((10272, 10302), 'numpy.diff', 'numpy.diff', (['contour_range[::2]'], {}), '(contour_range[::2])\n', (10282, 10302), False, 'import numpy\n')] |
from flask import Flask, jsonify, request, Response, make_response, send_file
import argparse, librosa, os, sys
import soundfile as sf
import copy
import numpy as np
import time
import re
sys.path.append('./tts')
sys.path.append('./waveglow')
bb = time.time()
import torch
import torch.nn as nn
from torch.autograd import Variable
from config_dict import emo_dict, gen_dict, age_dict
from tts.model import load_model
from tts.text import cmudict, text_to_sequence, decompose_hangul
from tts.text.korean_normalization import txt_preprocessing_only_num
from tts.hparams import create_hparams
from waveglow.denoiser import Denoiser
from VocGAN.model.generator import ModifiedGenerator
from VocGAN.utils.hparams import HParam, load_hparam_str
from VocGAN.denoiser import Denoiser as VocGAN_Denoiser
import zipfile
import torch.nn.functional as F
aa = time.time()
app = Flask(__name__)
parser = argparse.ArgumentParser(description='training script')
# generation option
parser.add_argument('--out_dir', type=str, default='generated', help='')
parser.add_argument('--init_cmudict', type=str,
default='./data/cmu_dictionary')
parser.add_argument('--init_model', type=str,
default='./models/allinone_37000')
parser.add_argument('--init_waveglow', type=str,
default='./models/waveglow_256channels_ljs_v3.pt')
parser.add_argument('--init_VocGAN', type=str,
default='./models/VocGAN_0364.pt')
parser.add_argument('--config_VocGAN', type=str,
default='./VocGAN/config/default.yaml')
parser.add_argument('--use_GST',
default=False,
action='store_true')
parser.add_argument('--enable_gpus', type=str,
default='0,1,2,3,4,5,6,7',
help='number of gpus')
parser.add_argument('--init_from', type=str, default='allinone_37000')
parser.add_argument('--port', type=int, default=8081, help='port number for api')
parser.add_argument('--details', default=True, action='store_false', help='attention and text will be saved as well')
new_args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=new_args.enable_gpus
hparams = create_hparams()
hparams.max_decoder_steps=2000
model = load_model(hparams).cuda().eval()
model.load_state_dict(torch.load(new_args.init_model)['state_dict'])
waveglow = torch.load(new_args.init_waveglow)['model'].cuda().eval()
denoiser = Denoiser(waveglow).cuda().eval()
hp = HParam(new_args.config_VocGAN)
checkpoint = torch.load(new_args.init_VocGAN)
VocGAN = ModifiedGenerator(hp.audio.n_mel_channels, hp.model.n_residual_layers,
ratios=hp.model.generator_ratio, mult = hp.model.mult,
out_band = hp.model.out_channels).cuda()
VocGAN.load_state_dict(checkpoint['model_g'])
VocGAN.eval(inference=True)
VocGAN_denoiser = VocGAN_Denoiser(VocGAN).cuda().eval()
arpabet_dict = cmudict.CMUDict(new_args.init_cmudict)
use_GST = new_args.use_GST
special_gst_mel = torch.load('./gst_mel/gst_mel').cuda()
def saveAttention(input_sentence, attentions, outpath):
# Set up figure with colorbar
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
fig, ax = plt.subplots()
cax = ax.matshow(attentions.cpu().numpy(), aspect='auto', origin='upper',cmap='gray')
# fig.colorbar(cax)
plt.ylabel('Encoder timestep', fontsize=18)
plt.xlabel('Decoder timestep', fontsize=18)
if input_sentence:
plt.ylabel('Encoder timestep', fontsize=18)
# Set up axes
# ax.set_yticklabels([' '] + list(input_sentence) + [' '])
# Show label at every tick
# ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.tight_layout()
plt.savefig(outpath)
plt.close('all')
def generate(sentence, emotion, age, gender, intensity, file_name, padding=2.0, gst_mel=None, korean_num=True):
"""
sentence(str): sentence to be synthesized
emotion(int): emotion index
age(int): age index
gender(int): gender index
intensity(float): intensty of the emotion
padding(float): length of synthesized
"""
# write to file
if korean_num == True:
sentence = txt_preprocessing_only_num(sentence)
text = decompose_hangul(sentence)
# text = '. ' + text + ' .'
inputs = torch.LongTensor(text_to_sequence(text, hparams.text_cleaners, arpabet_dict))[None, :].cuda()
emo_id = Variable(torch.LongTensor(emotion), requires_grad=False)
emo_id = emo_id.cuda()
gen_id = Variable(torch.LongTensor(gender), requires_grad=False)
gen_id = gen_id.cuda()
x_inference = (inputs.long(), torch.tensor([0]).cuda(), gen_id, emo_id)
start = time.time()
model.decoder.max_decoder_steps = int(inputs.shape[-1] * 5 * padding) + 50
with torch.no_grad():
_, mel_outputs_postnet, _, alignments = model.inference(x_inference, logging=False, gst_mel=gst_mel)
# wave = denoiser(waveglow.infer(mel_outputs_postnet, sigma=0.7), 0.01)[:, 0]
wave = VocGAN_denoiser(VocGAN.inference(mel_outputs_postnet).squeeze(0), 0.01)[:, 0]
wave = wave.squeeze()
wave = wave[:-(hp.audio.hop_length*10)]
wave_length = len(wave) / 22050
generate_time = time.time() - start
generation_speed = wave_length / generate_time
print('{:.2f}s/s: it takes {:.2f}s for {:.2f}s wave.'.format(generation_speed, generate_time, wave_length))
# amplifying
wave = wave.squeeze().cpu().numpy()
wave = librosa.core.resample(wave, 22050, 44100)
wave = np.stack((wave, wave))
maxv = 2 ** (16 - 1)
wave /= max(abs(wave.max()), abs(wave.min()))
wave = (wave * maxv * 0.95).astype(np.int16)
#wave *= args.amp
outpath1 = '%s/%s_%s_%s_i%.1f_%s.wav' % (new_args.out_dir, os.path.basename(new_args.init_from), emotion, gender, intensity, file_name)
#librosa.output.write_wav(outpath1, wave, 44100)
sf.write(outpath1, wave.T, 44100, format='WAV', endian='LITTLE', subtype='PCM_16')
if new_args.details:
outpath2 = '%s/%s_%s_%s_i%0.1f_%s.png' % (new_args.out_dir, os.path.basename(new_args.init_from), emotion, gender, intensity, file_name)
outpath3 = '%s/%s_%s_%s_i%0.1f_%s.npy' % (new_args.out_dir, os.path.basename(new_args.init_from), emotion, gender, intensity, file_name)
outpath4 = '%s/%s_%s_%s_i%0.1f_%s.txt' % (new_args.out_dir, os.path.basename(new_args.init_from), emotion, gender, intensity, file_name)
alignments = alignments.transpose(1,2).squeeze()
# first step 0 step attention
alignments[0, 0] = 1.0
# no_zero_index = (torch.cumsum((torch.argmax(alignments, dim=0) != 0), dim=0) != 0)
# no_zero_index *= (torch.argmax(alignments, dim=0) == 0)
# alignments[0, no_zero_index] = 0
saveAttention(text, alignments, outpath2)
np.save(outpath3, alignments.cpu().numpy())
with open(outpath4, 'w') as f:
f.write(sentence + '\n')
f.write(text + '\n')
#f.write(' '.join(['{:.2f}'.format(i*12.5) for i in range(wave_lengths[0])]) + '\n')
# return outpath1
def zipFiles(file_list):
zippath = '%s/%s_%s_%s_i%0.1f_%s.zip' % (new_args.out_dir, os.path.basename(new_args.init_from), emotion, gender, intensity, file_name)
zipf = zipfile.ZipFile(zippath, 'w', zipfile.ZIP_DEFLATED)
for files in file_list:
zipf.write(files)
zipf.close()
return zippath
file_list = []
file_list.append(outpath1)
file_list.append(outpath3)
file_list.append(outpath4)
zippath = zipFiles(file_list)
print('zippath', zippath)
return zippath
@app.route('/', methods=['POST'])
def calc():
print('request.form : {}'.format(request.form))
sentence = request.form['sentence'] if 'sentence' in request.form.keys() else None
emotion = request.form['emotion'] if 'emotion' in request.form.keys() else '10005'
age = request.form['age'] if 'age' in request.form.keys() else '20003'
gender = request.form['gender'] if 'gender' in request.form.keys() else '30002'
intensity = request.form['intensity'] if 'intensity' in request.form.keys() else 1
intensity = float(intensity)
assert sentence is not None
aa = time.time()
emotion = [int(emo_dict[emotion])]
age = [int(age_dict[age])]
gender = [int(gen_dict[gender])]
if len(sentence) > 80:
file_name = copy.deepcopy(sentence[:80])
else:
file_name = copy.deepcopy(sentence)
if use_GST:
if '있어?' in sentence[-4:]:
gst_mel = special_gst_mel
else:
gst_mel = None
else:
gst_mel = None
sentence = sentence.replace('?', '.')
sentence = sentence.replace('!', '.')
if sentence[-1] != '.':
sentence += '.'
outpath = '%s/%s_%s_%s_i%.1f_%s.zip' % (new_args.out_dir, os.path.basename(new_args.init_from), emotion, gender, intensity, file_name)
if not os.path.exists(outpath):
filename = generate(sentence, emotion, age, gender, intensity, file_name, gst_mel=gst_mel)
else:
print('file already exists {}'.format(outpath))
filename = outpath
print('it takes {:.2f}s'.format(time.time() - aa))
return send_file(filename, mimetype="zip", as_attachment=True, attachment_filename="generated.zip")
if __name__ == '__main__':
print('pre-loading takes {}s'.format(time.time() - bb))
app.run(host='0.0.0.0', port=new_args.port)
| [
"zipfile.ZipFile",
"flask.Flask",
"matplotlib.pyplot.ylabel",
"tts.text.korean_normalization.txt_preprocessing_only_num",
"torch.LongTensor",
"soundfile.write",
"tts.hparams.create_hparams",
"copy.deepcopy",
"sys.path.append",
"os.path.exists",
"tts.text.decompose_hangul",
"argparse.ArgumentPa... | [((188, 212), 'sys.path.append', 'sys.path.append', (['"""./tts"""'], {}), "('./tts')\n", (203, 212), False, 'import argparse, librosa, os, sys\n'), ((213, 242), 'sys.path.append', 'sys.path.append', (['"""./waveglow"""'], {}), "('./waveglow')\n", (228, 242), False, 'import argparse, librosa, os, sys\n'), ((248, 259), 'time.time', 'time.time', ([], {}), '()\n', (257, 259), False, 'import time\n'), ((853, 864), 'time.time', 'time.time', ([], {}), '()\n', (862, 864), False, 'import time\n'), ((871, 886), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (876, 886), False, 'from flask import Flask, jsonify, request, Response, make_response, send_file\n'), ((897, 951), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""training script"""'}), "(description='training script')\n", (920, 951), False, 'import argparse, librosa, os, sys\n'), ((2247, 2263), 'tts.hparams.create_hparams', 'create_hparams', ([], {}), '()\n', (2261, 2263), False, 'from tts.hparams import create_hparams\n'), ((2526, 2556), 'VocGAN.utils.hparams.HParam', 'HParam', (['new_args.config_VocGAN'], {}), '(new_args.config_VocGAN)\n', (2532, 2556), False, 'from VocGAN.utils.hparams import HParam, load_hparam_str\n'), ((2570, 2602), 'torch.load', 'torch.load', (['new_args.init_VocGAN'], {}), '(new_args.init_VocGAN)\n', (2580, 2602), False, 'import torch\n'), ((2973, 3011), 'tts.text.cmudict.CMUDict', 'cmudict.CMUDict', (['new_args.init_cmudict'], {}), '(new_args.init_cmudict)\n', (2988, 3011), False, 'from tts.text import cmudict, text_to_sequence, decompose_hangul\n'), ((3215, 3236), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (3229, 3236), False, 'import matplotlib\n'), ((3327, 3341), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3339, 3341), True, 'import matplotlib.pyplot as plt\n'), ((3460, 3503), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Encoder timestep"""'], {'fontsize': '(18)'}), "('Encoder timestep', fontsize=18)\n", (3470, 3503), True, 'import matplotlib.pyplot as plt\n'), ((3508, 3551), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Decoder timestep"""'], {'fontsize': '(18)'}), "('Decoder timestep', fontsize=18)\n", (3518, 3551), True, 'import matplotlib.pyplot as plt\n'), ((3821, 3839), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3837, 3839), True, 'import matplotlib.pyplot as plt\n'), ((3844, 3864), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outpath'], {}), '(outpath)\n', (3855, 3864), True, 'import matplotlib.pyplot as plt\n'), ((3869, 3885), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (3878, 3885), True, 'import matplotlib.pyplot as plt\n'), ((4392, 4418), 'tts.text.decompose_hangul', 'decompose_hangul', (['sentence'], {}), '(sentence)\n', (4408, 4418), False, 'from tts.text import cmudict, text_to_sequence, decompose_hangul\n'), ((4855, 4866), 'time.time', 'time.time', ([], {}), '()\n', (4864, 4866), False, 'import time\n'), ((5642, 5683), 'librosa.core.resample', 'librosa.core.resample', (['wave', '(22050)', '(44100)'], {}), '(wave, 22050, 44100)\n', (5663, 5683), False, 'import argparse, librosa, os, sys\n'), ((5695, 5717), 'numpy.stack', 'np.stack', (['(wave, wave)'], {}), '((wave, wave))\n', (5703, 5717), True, 'import numpy as np\n'), ((6063, 6150), 'soundfile.write', 'sf.write', (['outpath1', 'wave.T', '(44100)'], {'format': '"""WAV"""', 'endian': '"""LITTLE"""', 'subtype': '"""PCM_16"""'}), "(outpath1, wave.T, 44100, format='WAV', endian='LITTLE', subtype=\n 'PCM_16')\n", (6071, 6150), True, 'import soundfile as sf\n'), ((8404, 8415), 'time.time', 'time.time', ([], {}), '()\n', (8413, 8415), False, 'import time\n'), ((9416, 9513), 'flask.send_file', 'send_file', (['filename'], {'mimetype': '"""zip"""', 'as_attachment': '(True)', 'attachment_filename': '"""generated.zip"""'}), "(filename, mimetype='zip', as_attachment=True, attachment_filename\n ='generated.zip')\n", (9425, 9513), False, 'from flask import Flask, jsonify, request, Response, make_response, send_file\n'), ((2359, 2390), 'torch.load', 'torch.load', (['new_args.init_model'], {}), '(new_args.init_model)\n', (2369, 2390), False, 'import torch\n'), ((2612, 2776), 'VocGAN.model.generator.ModifiedGenerator', 'ModifiedGenerator', (['hp.audio.n_mel_channels', 'hp.model.n_residual_layers'], {'ratios': 'hp.model.generator_ratio', 'mult': 'hp.model.mult', 'out_band': 'hp.model.out_channels'}), '(hp.audio.n_mel_channels, hp.model.n_residual_layers,\n ratios=hp.model.generator_ratio, mult=hp.model.mult, out_band=hp.model.\n out_channels)\n', (2629, 2776), False, 'from VocGAN.model.generator import ModifiedGenerator\n'), ((3058, 3089), 'torch.load', 'torch.load', (['"""./gst_mel/gst_mel"""'], {}), "('./gst_mel/gst_mel')\n", (3068, 3089), False, 'import torch\n'), ((3584, 3627), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Encoder timestep"""'], {'fontsize': '(18)'}), "('Encoder timestep', fontsize=18)\n", (3594, 3627), True, 'import matplotlib.pyplot as plt\n'), ((4335, 4371), 'tts.text.korean_normalization.txt_preprocessing_only_num', 'txt_preprocessing_only_num', (['sentence'], {}), '(sentence)\n', (4361, 4371), False, 'from tts.text.korean_normalization import txt_preprocessing_only_num\n'), ((4581, 4606), 'torch.LongTensor', 'torch.LongTensor', (['emotion'], {}), '(emotion)\n', (4597, 4606), False, 'import torch\n'), ((4683, 4707), 'torch.LongTensor', 'torch.LongTensor', (['gender'], {}), '(gender)\n', (4699, 4707), False, 'import torch\n'), ((4955, 4970), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4968, 4970), False, 'import torch\n'), ((5386, 5397), 'time.time', 'time.time', ([], {}), '()\n', (5395, 5397), False, 'import time\n'), ((7453, 7504), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zippath', '"""w"""', 'zipfile.ZIP_DEFLATED'], {}), "(zippath, 'w', zipfile.ZIP_DEFLATED)\n", (7468, 7504), False, 'import zipfile\n'), ((8572, 8600), 'copy.deepcopy', 'copy.deepcopy', (['sentence[:80]'], {}), '(sentence[:80])\n', (8585, 8600), False, 'import copy\n'), ((8631, 8654), 'copy.deepcopy', 'copy.deepcopy', (['sentence'], {}), '(sentence)\n', (8644, 8654), False, 'import copy\n'), ((9132, 9155), 'os.path.exists', 'os.path.exists', (['outpath'], {}), '(outpath)\n', (9146, 9155), False, 'import argparse, librosa, os, sys\n'), ((5929, 5965), 'os.path.basename', 'os.path.basename', (['new_args.init_from'], {}), '(new_args.init_from)\n', (5945, 5965), False, 'import argparse, librosa, os, sys\n'), ((7965, 7984), 'flask.request.form.keys', 'request.form.keys', ([], {}), '()\n', (7982, 7984), False, 'from flask import Flask, jsonify, request, Response, make_response, send_file\n'), ((8049, 8068), 'flask.request.form.keys', 'request.form.keys', ([], {}), '()\n', (8066, 8068), False, 'from flask import Flask, jsonify, request, Response, make_response, send_file\n'), ((8124, 8143), 'flask.request.form.keys', 'request.form.keys', ([], {}), '()\n', (8141, 8143), False, 'from flask import Flask, jsonify, request, Response, make_response, send_file\n'), ((8208, 8227), 'flask.request.form.keys', 'request.form.keys', ([], {}), '()\n', (8225, 8227), False, 'from flask import Flask, jsonify, request, Response, make_response, send_file\n'), ((8301, 8320), 'flask.request.form.keys', 'request.form.keys', ([], {}), '()\n', (8318, 8320), False, 'from flask import Flask, jsonify, request, Response, make_response, send_file\n'), ((9039, 9075), 'os.path.basename', 'os.path.basename', (['new_args.init_from'], {}), '(new_args.init_from)\n', (9055, 9075), False, 'import argparse, librosa, os, sys\n'), ((2303, 2322), 'tts.model.load_model', 'load_model', (['hparams'], {}), '(hparams)\n', (2313, 2322), False, 'from tts.model import load_model\n'), ((2487, 2505), 'waveglow.denoiser.Denoiser', 'Denoiser', (['waveglow'], {}), '(waveglow)\n', (2495, 2505), False, 'from waveglow.denoiser import Denoiser\n'), ((2919, 2942), 'VocGAN.denoiser.Denoiser', 'VocGAN_Denoiser', (['VocGAN'], {}), '(VocGAN)\n', (2934, 2942), True, 'from VocGAN.denoiser import Denoiser as VocGAN_Denoiser\n'), ((4796, 4813), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (4808, 4813), False, 'import torch\n'), ((6239, 6275), 'os.path.basename', 'os.path.basename', (['new_args.init_from'], {}), '(new_args.init_from)\n', (6255, 6275), False, 'import argparse, librosa, os, sys\n'), ((6384, 6420), 'os.path.basename', 'os.path.basename', (['new_args.init_from'], {}), '(new_args.init_from)\n', (6400, 6420), False, 'import argparse, librosa, os, sys\n'), ((6529, 6565), 'os.path.basename', 'os.path.basename', (['new_args.init_from'], {}), '(new_args.init_from)\n', (6545, 6565), False, 'import argparse, librosa, os, sys\n'), ((7361, 7397), 'os.path.basename', 'os.path.basename', (['new_args.init_from'], {}), '(new_args.init_from)\n', (7377, 7397), False, 'import argparse, librosa, os, sys\n'), ((9386, 9397), 'time.time', 'time.time', ([], {}), '()\n', (9395, 9397), False, 'import time\n'), ((9580, 9591), 'time.time', 'time.time', ([], {}), '()\n', (9589, 9591), False, 'import time\n'), ((2418, 2452), 'torch.load', 'torch.load', (['new_args.init_waveglow'], {}), '(new_args.init_waveglow)\n', (2428, 2452), False, 'import torch\n'), ((4481, 4540), 'tts.text.text_to_sequence', 'text_to_sequence', (['text', 'hparams.text_cleaners', 'arpabet_dict'], {}), '(text, hparams.text_cleaners, arpabet_dict)\n', (4497, 4540), False, 'from tts.text import cmudict, text_to_sequence, decompose_hangul\n')] |
import sys
import re
import glob
import numpy as np
from os.path import basename
import environments
import experiments
from experiments.plotting import plot_policy_map, plot_policy_map1, \
plot_policy_map_combined, plot_value_map, plot_value_map1, plot_value_map2
base_dir = 'output/Q'
#base_dir = 'test-heatmap'
output_dir = 'output/images/Q'
#output_dir = 'test-heatmap'
envs = {}
envs['4x4'] = environments.get_small_frozen_lake_environment()
envs['8x8'] = environments.get_medium_rewarding_frozen_lake_environment()
envs['15x15'] = environments.get_large_frozen_lake_environment()
envs['4x12'] = environments.get_windy_cliff_walking_environment()
title_regex = re.compile('v-(.*)\.txt')
env_strs = ['4x4', '8x8', '15x15', '4x12']
#env_strs = ['4x12']
for env_str in env_strs:
grid_files = glob.glob('{}/v-*{}*.txt'.format(base_dir, env_str))
print(grid_files)
env = envs[env_str]
for path in grid_files:
file = basename(path)
search_result = title_regex.search(basename(file))
if search_result is None:
print("Could not parse: {}".format(basename(file)))
continue
match = search_result.groups()[0]
mdp_name = match
policy = np.loadtxt(base_dir + '/policy-' + mdp_name + '.txt')
v = np.loadtxt(base_dir + '/v-' + mdp_name + '.txt')
p = plot_policy_map(mdp_name, policy, env.desc, env.colors(), env.directions())
p.savefig(output_dir + '/policy-' + mdp_name + '-0.png', format='png', dpi=150)
p.close()
p = plot_policy_map1(mdp_name, policy, env.desc, env.colors(), env.directions())
p.savefig(output_dir + '/policy-' + mdp_name + '-1.png', format='png', dpi=150)
p.close()
p = plot_policy_map_combined(mdp_name, policy, v, env.desc, env.colors(), env.directions())
p.savefig(output_dir + '/combined-' + mdp_name + '-0.png', format='png', dpi=150)
p.close()
p = plot_value_map(mdp_name, v, env.desc, env.colors())
p.savefig(output_dir + '/v-' + mdp_name + '-0.png', format='png', dpi=150)
p.close()
p = plot_value_map1(mdp_name, v, env.desc, env.colors())
p.savefig(output_dir + '/v-' + mdp_name + '-1.png', format='png', dpi=150)
p.close()
p = plot_value_map2(mdp_name, v, env.desc, env.colors())
p.savefig(output_dir + '/v-' + mdp_name + '-2.png', format='png', dpi=150)
p.close()
| [
"environments.get_medium_rewarding_frozen_lake_environment",
"environments.get_large_frozen_lake_environment",
"re.compile",
"environments.get_windy_cliff_walking_environment",
"os.path.basename",
"numpy.loadtxt",
"environments.get_small_frozen_lake_environment"
] | [((407, 455), 'environments.get_small_frozen_lake_environment', 'environments.get_small_frozen_lake_environment', ([], {}), '()\n', (453, 455), False, 'import environments\n'), ((472, 531), 'environments.get_medium_rewarding_frozen_lake_environment', 'environments.get_medium_rewarding_frozen_lake_environment', ([], {}), '()\n', (529, 531), False, 'import environments\n'), ((548, 596), 'environments.get_large_frozen_lake_environment', 'environments.get_large_frozen_lake_environment', ([], {}), '()\n', (594, 596), False, 'import environments\n'), ((613, 663), 'environments.get_windy_cliff_walking_environment', 'environments.get_windy_cliff_walking_environment', ([], {}), '()\n', (661, 663), False, 'import environments\n'), ((679, 705), 're.compile', 're.compile', (['"""v-(.*)\\\\.txt"""'], {}), "('v-(.*)\\\\.txt')\n", (689, 705), False, 'import re\n'), ((957, 971), 'os.path.basename', 'basename', (['path'], {}), '(path)\n', (965, 971), False, 'from os.path import basename\n'), ((1236, 1289), 'numpy.loadtxt', 'np.loadtxt', (["(base_dir + '/policy-' + mdp_name + '.txt')"], {}), "(base_dir + '/policy-' + mdp_name + '.txt')\n", (1246, 1289), True, 'import numpy as np\n'), ((1307, 1355), 'numpy.loadtxt', 'np.loadtxt', (["(base_dir + '/v-' + mdp_name + '.txt')"], {}), "(base_dir + '/v-' + mdp_name + '.txt')\n", (1317, 1355), True, 'import numpy as np\n'), ((1015, 1029), 'os.path.basename', 'basename', (['file'], {}), '(file)\n', (1023, 1029), False, 'from os.path import basename\n'), ((1112, 1126), 'os.path.basename', 'basename', (['file'], {}), '(file)\n', (1120, 1126), False, 'from os.path import basename\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020-2021 <NAME> and <NAME>
#
# Distributed under terms of the BSD 3-Clause license.
""" Functions related to creating data for a simulation.
"""
import os
import time
import pathlib
import shutil
import glob
from typing import Tuple
import urllib3
import requests
import numpy
import rasterio
import rasterio.features
import rasterio.transform
from gclandspill import _misc
from gclandspill import clawutil
def create_data(
case_dir: os.PathLike, log_level: int = None,
out_dir: os.PathLike = "_output", overwrite: bool = False
):
"""Create *.data files (and topography & hydrological files) in case folder.
Arguments
---------
case_dir : PathLike
The (absolute) path to the target case folder, where setrun.py can be found.
log_level : int or None
To overwrite the log verbosity in the original config file.
out_dir : str or PathLike
Folder to put output files for this particular run. If not an absolute path, assume it's
relative to `case_dir`
overwrite : bool
Whether or not to force overwrite `out_dir` if it exists. If False (default) and if
`out_dir` exists, copy to a new folder with current time appended.
"""
# let pathlib handle path-related stuff
case_dir = pathlib.Path(case_dir).expanduser().resolve()
if not case_dir.is_dir():
raise FileNotFoundError("{} does not exist or is not a folder".format(case_dir))
# import setrun.py
setrun = _misc.import_setrun(case_dir)
rundata = setrun.setrun() # get ClawRunData object
# if we need to overwrite the log verbosity
if log_level is not None:
rundata.clawdata.verbosity = log_level
rundata.clawdata.verbosity_regrid = log_level
rundata.amrdata.verbosity_regrid = log_level
# geoclaw' `TopographyData` assumes topography filenames are relative to the output folder,
# while we assuem the filenames are always relative to the case folder. This forces us to write
# *.data to the case folder first and move to our target output folder
rundata.write(str(case_dir)) # write *.data to the case folder
# get a list of *.data files just outputed
data_files = glob.glob(str(case_dir.joinpath("*.data"))) # glob doesn't like PathLike...
# prepare the true output folder so we can move *.data to there
out_dir = pathlib.Path(out_dir).expanduser()
if not out_dir.is_absolute():
out_dir = case_dir.joinpath(out_dir)
if out_dir.is_dir():
if not overwrite: # make a backup if out_dir exists and overwriting not allowed
shutil.move(out_dir, str(out_dir)+"."+time.strftime("%Y%m%dT%H%M%S%Z"))
else: # just delete the old out_dir
shutil.rmtree(out_dir)
# create the folder regardless
os.makedirs(out_dir)
# move *.data to the true output folder
for data_file in data_files:
shutil.move(data_file, out_dir)
# check if topo file exists. Download it if not exist
check_download_topo(case_dir, rundata)
# check if hudro file exists. Download it if not exist
check_download_hydro(case_dir, rundata)
def check_download_topo(case_dir: os.PathLike, rundata: clawutil.data.ClawRunData):
"""Check topo file and download it if it does not exist.
Arguments
---------
case_dir : PathLike
Path to the directory of a case (where setrun.py can be found).
rundata : ClawRunData
An instance of `ClawRunData`.
"""
# let pathlib handle paht-related stuff
case_dir = pathlib.Path(case_dir).expanduser().resolve()
# calculate extend and resolution required by the setrun.py
ext = [rundata.clawdata.lower[0], rundata.clawdata.lower[1],
rundata.clawdata.upper[0], rundata.clawdata.upper[1]]
n_x = rundata.clawdata.num_cells[0]
for i in range(rundata.amrdata.amr_levels_max-1):
n_x *= rundata.amrdata.refinement_ratios_x[i]
n_y = rundata.clawdata.num_cells[1]
for i in range(rundata.amrdata.amr_levels_max-1):
n_y *= rundata.amrdata.refinement_ratios_y[i]
res = min((ext[2]-ext[0])/n_x, (ext[3]-ext[1])/n_y)
# make the extent of the topo file a little bit larger than comp. domain
ext[0] -= res
ext[1] -= res
ext[2] += res
ext[3] += res
# to indicate we've already download the topo once
downloaded = None
# check and download each topography file
for topo in rundata.topo_data.topofiles: # each topofile is represented as a list
if os.path.isabs(topo[-1]): # the topo filename is at the last element of the list
topo_file = pathlib.Path(topo[-1])
else:
topo_file = case_dir.joinpath(topo[-1]).resolve()
if topo_file.is_file():
print("Topo file {} found. ".format(topo_file) + "Re-use it.")
continue # skip downloading
if downloaded is None:
print("Topo file {} not found. ".format(topo_file) + "Download it now.")
downloaded = download_topo_single(topo_file, ext, res)
else: # already downloaded once, just check and copy that file
print("Topo file {} not found. ".format(topo_file) + "Copy from {}.".format(downloaded))
shutil.copyfile(downloaded, topo_file)
def download_topo_single(
topo_file: os.PathLike,
ext: Tuple[float, float, float, float],
res: float):
"""Download a topo file.
Arguments
---------
topo_file : PathLike
Path to the topo file.
ext : tuple
Extent of the topo, i.e., [x_min, y_min, x_max, y_max]
res : float
Resolution of the topo file.
Returns
-------
The path to the downloaded file. Should be the same as the provided `topo_file`.
"""
topo_file = pathlib.Path(topo_file).resolve()
# prepare the folders
os.makedirs(topo_file.parent, exist_ok=True)
# download a GeoTiff file
print("Downloading {}".format(topo_file.with_suffix(".tif")))
obtain_geotiff(ext, topo_file.with_suffix(".tif"), res)
print("Done downloading {}".format(topo_file.with_suffix(".tif")))
# convert to Esri ASCII
print("Converting to ESRI ASCII file")
geotiff_2_esri_ascii(topo_file.with_suffix(".tif"), topo_file)
print("Done converting to {}".format(topo_file))
# remove the GeoTiff
os.remove(topo_file.with_suffix(".tif"))
return topo_file
def check_download_hydro(case_dir: os.PathLike, rundata: clawutil.data.ClawRunData):
"""Check hydro file and download it if it does not exist.
Arguments
---------
case_dir : PathLike
Path to the directory of a case (where setrun.py can be found).
rundata : ClawRunData
An instance of `ClawRunData`.
"""
if not rundata.landspill_data.hydro_features.files:
return
case_dir = pathlib.Path(case_dir).expanduser().resolve()
if os.path.isabs(rundata.landspill_data.hydro_features.files[0]):
hydro_file = pathlib.Path(rundata.landspill_data.hydro_features.files[0])
else:
hydro_file = case_dir.joinpath(rundata.landspill_data.hydro_features.files[0]).resolve()
if os.path.isfile(hydro_file):
print("Hydro file {} found. ".format(hydro_file) + "Re-use it.")
return
print("Hydro file {} not found. ".format(hydro_file) + "Download it now.")
ext = [rundata.clawdata.lower[0], rundata.clawdata.lower[1],
rundata.clawdata.upper[0], rundata.clawdata.upper[1]]
n_x = rundata.clawdata.num_cells[0]
for i in range(rundata.amrdata.amr_levels_max-1):
n_x *= rundata.amrdata.refinement_ratios_x[i]
n_y = rundata.clawdata.num_cells[1]
for i in range(rundata.amrdata.amr_levels_max-1):
n_y *= rundata.amrdata.refinement_ratios_y[i]
res = min((ext[2]-ext[0])/n_x, (ext[3]-ext[1])/n_y)
# make the extent of the hydro file a little bit larger than comp. domain
ext[0] -= res
ext[1] -= res
ext[2] += res
ext[3] += res
os.makedirs(hydro_file.parent, exist_ok=True)
print("Obtaining GeoJson from NHD high resolution dataset server.")
feats = obtain_NHD_geojson(ext)
print("Write GeoJson data to raster file {}".format(hydro_file))
convert_geojson_2_raster(feats, hydro_file, ext, res)
print("Done writing to {}".format(hydro_file))
def request_arcgis_token(
username, password,
token_server="https://www.arcgis.com/sharing/rest/generateToken",
exp=5):
"""Request a token from ArcGIS token server
Request a user token from ArcGIS. Only required when the source of elevation
is chosen to be ESRI World Elevation.
Return: a str, the token.
"""
# information that will post to token server to obtain a token
token_applicant = {
"f": "json",
"username": username,
"password": password,
"client": "referer",
"expiration": str(exp),
"referer": "https://www.arcgis.com"
}
# request a token
token_response = requests.post(token_server, token_applicant)
# try to raise an error if the server does not return success signal
token_response.raise_for_status()
# if execution comes to this point, we've got the token from the server
token = token_response.json()["token"]
return token
def obtain_geotiff(extent, filename, res=1, source="3DEP", token=None):
"""Grab the GeoTiff file for the elevation of a region.
The region is defined by the argument extent. extent is a list with 4
floating point numbers. Its format is extent = [Xmin, Ymin, Xmax, Ymax].
Available elevation sources are 3DEP and ESRI. If using ESRI data, then
token must present. Token can be obtained from the function
request_arcgis_token. If using 3DEP, then remember that 3DEP only has data
for North America.
Args:
extent [in]: a list with format [Xmin, Ymin, Xmax, Ymax]
filename [in]: output GeoTiff filename.
res [in]: output resolution. Default: 1 meter.
source [in]: either 3DEP or ESRI.
token [in]: if using ESRI source, the token must be provided.
"""
# the REST endpoint of exportImage of the elevation server
if source == "ESRI":
dem_server = \
"https://elevation.arcgis.com/arcgis/rest/services/" + \
"WorldElevation/Terrain/ImageServer/exportImage"
assert token is not None, \
"Token cannot be None when using ESRI data source"
elif source == "3DEP":
dem_server = \
"https://elevation.nationalmap.gov/arcgis/rest/services/" + \
"3DEPElevation/ImageServer/exportImage"
else:
raise ValueError("Invalid elevation source: {}".format(source))
# calculate number of cells
x_size = int((extent[2]-extent[0])/res+0.5)
y_size = int((extent[3]-extent[1])/res+0.5)
# adjust North and East boundary to match x_size and y_size
extent[2] = extent[0] + x_size * res
extent[3] = extent[1] + y_size * res
# parameters used to get response from the REST endpoint
dem_query = {
"f": "json",
"bbox": "{},{},{},{}".format(extent[0], extent[1], extent[2], extent[3]),
"size": "{},{}".format(x_size, y_size),
"imageSR": "3857",
"bboxSr": "3857",
"format": "tiff",
"pixelType": "F32",
"noData": "-9999",
"interpolation": "RSP_BilinearInterpolation",
}
# add token to parameters if using ESRI
if source == "ESRI":
dem_query["token"] = token
else:
dem_query["mosaicRule"] = \
"{\"mosaicMethod\":\"esriMosaicAttribute\",\"sortField\":\"AcquisitionDate\"}"
# create a HTTP session that can retry 5 times if 500, 502, 503, 504 happens
session = requests.Session()
session.mount("https://", requests.adapters.HTTPAdapter(
max_retries=urllib3.util.retry.Retry(
total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])))
# use GET to get response
dem_response = session.get(dem_server, stream=True, params=dem_query)
# try to raise an error if the server does not return success signal
dem_response.raise_for_status()
# close the session
session.close()
# if execution comes to this point, we've got the GeoTiff from the server
tif_url = dem_response.json()["href"]
# download the GeoTiff file, retry unitl success or timeout
count = 0
while True:
rspnd = requests.get(tif_url, stream=True, allow_redirects=True)
if rspnd.status_code == requests.codes.ok: # pylint: disable=no-member
break
time.sleep(3)
count += 3
if count > 300:
rspnd.raise_for_status()
with open(os.path.abspath(filename), "wb") as file_obj:
file_obj.write(rspnd.content)
def geotiff_2_esri_ascii(in_file, out_file):
"""Convert a GeoTiff to an ESRI ASCII file."""
geotiff = rasterio.open(in_file, "r")
# a workaround to ignore ERROR 4 message; we create a new Tiff and close it
rasterio.open(
os.path.abspath(out_file), mode="w", driver="GTiff",
width=1, height=1, count=1, crs=rasterio.crs.CRS.from_epsg(3857),
transform=geotiff.transform, dtype=rasterio.int8, nodata=0
).close()
dst = rasterio.open(
os.path.abspath(out_file), mode="w", driver="AAIGrid",
width=geotiff.width, height=geotiff.height, count=geotiff.count,
crs=rasterio.crs.CRS.from_epsg(3857),
transform=geotiff.transform, dtype=rasterio.float32,
nodata=geotiff.nodata)
dst.write_band(1, geotiff.read(1))
dst.close()
geotiff.close()
def obtain_NHD_geojson(extent): # pylint: disable=invalid-name
"""Obtain features from NHD high resolution dataset MapServer
Retrun:
A list: [<flowline>, <area>, <water body>]. The data types are GeoJson.
"""
servers = []
# flowline
servers.append(
"https://hydro.nationalmap.gov/arcgis/rest/services/nhd/MapServer/6/query")
# area
servers.append(
"https://hydro.nationalmap.gov/arcgis/rest/services/nhd/MapServer/8/query")
# waterbody
servers.append(
"https://hydro.nationalmap.gov/arcgis/rest/services/nhd/MapServer/10/query")
queries = []
# flowline
queries.append({
"where": "1=1", # in the future, use this to filter FCode(s)
"f": "geojson",
"geometry": "{},{},{},{}".format(extent[0], extent[1], extent[2], extent[3]),
"geometryType": "esriGeometryEnvelope",
"inSR": "3857",
"spatialRel": "esriSpatialRelIntersects",
"returnGeometry": "true",
"outSR": "3857"
})
# area
queries.append({
"where": "1=1", # in the future, use this to filter FCode(s)
"f": "geojson",
"geometry": "{},{},{},{}".format(extent[0], extent[1], extent[2], extent[3]),
"geometryType": "esriGeometryEnvelope",
"inSR": "3857",
"spatialRel": "esriSpatialRelIntersects",
"returnGeometry": "true",
"outSR": "3857"
})
# waterbody
queries.append({
"where": "1=1", # in the future, use this to filter FCode(s)
"f": "geojson",
"geometry": "{},{},{},{}".format(extent[0], extent[1], extent[2], extent[3]),
"geometryType": "esriGeometryEnvelope",
"inSR": "3857",
"spatialRel": "esriSpatialRelIntersects",
"returnGeometry": "true",
"outSR": "3857"
})
geoms = []
session = requests.Session()
session.mount("https://", requests.adapters.HTTPAdapter(
max_retries=urllib3.util.retry.Retry(
total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])))
for i, server in enumerate(servers):
response = session.get(server, stream=True, params=queries[i])
response.raise_for_status()
geoms.append(response.json())
session.close()
return geoms
def convert_geojson_2_raster(feat_layers, filename, extent, res, crs=3857):
"""Convert a list of GeoTiff dict to raster (ESRI ASCII files)."""
if crs != 3857:
raise NotImplementedError("crs other than 3857 are not implemented yet")
width = int((extent[2]-extent[0])/res+0.5)
height = int((extent[3]-extent[1])/res+0.5)
transform = rasterio.transform.from_origin(extent[0], extent[3], res, res)
shapes = []
for feat_layer in feat_layers:
for geo in feat_layer["features"]:
if not rasterio.features.is_valid_geom(geo["geometry"]):
raise ValueError("Not a valid GeoJson gemoetry data")
shapes.append((geo["geometry"], 10))
# if no geometry exists in the list
if not shapes:
image = numpy.ones((height, width), dtype=rasterio.float32) * -9999.
# else if there's any geometry
else:
image = rasterio.features.rasterize(
shapes=shapes, out_shape=(width, height), fill=-9999.,
transform=transform, all_touched=True, dtype=rasterio.float32)
# a workaround to ignore ERROR 4 message
rasterio.open(
os.path.abspath(filename), mode="w", driver="GTiff",
width=1, height=1, count=1, crs=rasterio.crs.CRS.from_epsg(3857),
transform=transform, dtype=rasterio.int8, nodata=0
).close()
dst = rasterio.open(
os.path.abspath(filename), mode="w", driver="AAIGrid",
width=width, height=height, count=1,
crs=rasterio.crs.CRS.from_epsg(crs), transform=transform,
dtype=rasterio.float32, nodata=-9999.)
dst.write(image, indexes=1)
dst.close()
def download_satellite_image(extent, filepath, force=False):
"""Download a setellite image of the given extent.
Arguments
---------
extent: a list of [xmin, ymin, xmax, ymax]
The bound of the domain.
filepath: os.PathLike
Where to save the image.
force: bool
Download regardless of if the file already exists.
Returns
-------
The extent of the saved image. The server does not always return the
image within exactly the extent. Sometimes the extent of the image is
larger, so we need to know.
"""
filepath = pathlib.Path(filepath)
api_url = "http://server.arcgisonline.com/arcgis/rest/services/World_Imagery/MapServer/export"
extent_file = filepath.with_suffix(filepath.suffix+".extent")
# always with extra 5 pixels outside each boundary
extent[0] = int(extent[0]) - 5
extent[1] = int(extent[1]) - 5
extent[2] = int(extent[2]) + 5
extent[3] = int(extent[3]) + 5
width = extent[2] - extent[0]
height = extent[3] - extent[1]
# if image and extent info already exists, we may stop downloading and leave
if extent_file.is_file() and filepath.is_file() and not force:
with open(extent_file, "r") as fileobj:
img_extent = fileobj.readline()
img_extent = [float(i) for i in img_extent.strip().split()]
return img_extent
# REST API parameters
params = {
"bbox": "{},{},{},{}".format(*extent),
"bbSR": "3857",
"size": "{},{}".format(width, height),
"imageSR": "3857",
"format": "png",
"f": "json"
}
# create a HTTP session that can retry 5 times if 500, 502, 503, 504 happens
session = requests.Session()
session.mount("https://", requests.adapters.HTTPAdapter(
max_retries=requests.packages.urllib3.util.retry.Retry( # pylint: disable=no-member
total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504]
)
))
# use GET to get response
respns = session.get(api_url, params=params)
respns.raise_for_status() # raise an error if not success
respns = respns.json() # convert to a dictionary
assert "href" in respns # make sure the image's url is in the response
# download the file, retry unitl success or timeout
respns2 = session.get(respns["href"], stream=True, allow_redirects=True)
respns2.raise_for_status()
with open(filepath, "wb") as fileobj:
fileobj.write(respns2.content)
# close the session
session.close()
# write image extent to a text file
with open(extent_file, "w") as fileobj:
fileobj.write("{} {} {} {}".format(
respns["extent"]["xmin"], respns["extent"]["ymin"],
respns["extent"]["xmax"], respns["extent"]["ymax"]
))
return [respns["extent"]["xmin"], respns["extent"]["ymin"],
respns["extent"]["xmax"], respns["extent"]["ymax"]]
| [
"requests.post",
"requests.Session",
"time.sleep",
"rasterio.features.rasterize",
"rasterio.transform.from_origin",
"pathlib.Path",
"shutil.move",
"os.path.isabs",
"numpy.ones",
"urllib3.util.retry.Retry",
"rasterio.open",
"gclandspill._misc.import_setrun",
"requests.get",
"os.path.isfile"... | [((1557, 1586), 'gclandspill._misc.import_setrun', '_misc.import_setrun', (['case_dir'], {}), '(case_dir)\n', (1576, 1586), False, 'from gclandspill import _misc\n'), ((2875, 2895), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (2886, 2895), False, 'import os\n'), ((5929, 5973), 'os.makedirs', 'os.makedirs', (['topo_file.parent'], {'exist_ok': '(True)'}), '(topo_file.parent, exist_ok=True)\n', (5940, 5973), False, 'import os\n'), ((6975, 7036), 'os.path.isabs', 'os.path.isabs', (['rundata.landspill_data.hydro_features.files[0]'], {}), '(rundata.landspill_data.hydro_features.files[0])\n', (6988, 7036), False, 'import os\n'), ((7235, 7261), 'os.path.isfile', 'os.path.isfile', (['hydro_file'], {}), '(hydro_file)\n', (7249, 7261), False, 'import os\n'), ((8073, 8118), 'os.makedirs', 'os.makedirs', (['hydro_file.parent'], {'exist_ok': '(True)'}), '(hydro_file.parent, exist_ok=True)\n', (8084, 8118), False, 'import os\n'), ((9093, 9137), 'requests.post', 'requests.post', (['token_server', 'token_applicant'], {}), '(token_server, token_applicant)\n', (9106, 9137), False, 'import requests\n'), ((11856, 11874), 'requests.Session', 'requests.Session', ([], {}), '()\n', (11872, 11874), False, 'import requests\n'), ((13026, 13053), 'rasterio.open', 'rasterio.open', (['in_file', '"""r"""'], {}), "(in_file, 'r')\n", (13039, 13053), False, 'import rasterio\n'), ((15614, 15632), 'requests.Session', 'requests.Session', ([], {}), '()\n', (15630, 15632), False, 'import requests\n'), ((16409, 16471), 'rasterio.transform.from_origin', 'rasterio.transform.from_origin', (['extent[0]', 'extent[3]', 'res', 'res'], {}), '(extent[0], extent[3], res, res)\n', (16439, 16471), False, 'import rasterio\n'), ((18319, 18341), 'pathlib.Path', 'pathlib.Path', (['filepath'], {}), '(filepath)\n', (18331, 18341), False, 'import pathlib\n'), ((19443, 19461), 'requests.Session', 'requests.Session', ([], {}), '()\n', (19459, 19461), False, 'import requests\n'), ((2982, 3013), 'shutil.move', 'shutil.move', (['data_file', 'out_dir'], {}), '(data_file, out_dir)\n', (2993, 3013), False, 'import shutil\n'), ((4593, 4616), 'os.path.isabs', 'os.path.isabs', (['topo[-1]'], {}), '(topo[-1])\n', (4606, 4616), False, 'import os\n'), ((7059, 7119), 'pathlib.Path', 'pathlib.Path', (['rundata.landspill_data.hydro_features.files[0]'], {}), '(rundata.landspill_data.hydro_features.files[0])\n', (7071, 7119), False, 'import pathlib\n'), ((12555, 12611), 'requests.get', 'requests.get', (['tif_url'], {'stream': '(True)', 'allow_redirects': '(True)'}), '(tif_url, stream=True, allow_redirects=True)\n', (12567, 12611), False, 'import requests\n'), ((12720, 12733), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (12730, 12733), False, 'import time\n'), ((13404, 13429), 'os.path.abspath', 'os.path.abspath', (['out_file'], {}), '(out_file)\n', (13419, 13429), False, 'import os\n'), ((16953, 17104), 'rasterio.features.rasterize', 'rasterio.features.rasterize', ([], {'shapes': 'shapes', 'out_shape': '(width, height)', 'fill': '(-9999.0)', 'transform': 'transform', 'all_touched': '(True)', 'dtype': 'rasterio.float32'}), '(shapes=shapes, out_shape=(width, height), fill=\n -9999.0, transform=transform, all_touched=True, dtype=rasterio.float32)\n', (16980, 17104), False, 'import rasterio\n'), ((17431, 17456), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (17446, 17456), False, 'import os\n'), ((2441, 2462), 'pathlib.Path', 'pathlib.Path', (['out_dir'], {}), '(out_dir)\n', (2453, 2462), False, 'import pathlib\n'), ((2812, 2834), 'shutil.rmtree', 'shutil.rmtree', (['out_dir'], {}), '(out_dir)\n', (2825, 2834), False, 'import shutil\n'), ((4698, 4720), 'pathlib.Path', 'pathlib.Path', (['topo[-1]'], {}), '(topo[-1])\n', (4710, 4720), False, 'import pathlib\n'), ((5315, 5353), 'shutil.copyfile', 'shutil.copyfile', (['downloaded', 'topo_file'], {}), '(downloaded, topo_file)\n', (5330, 5353), False, 'import shutil\n'), ((5864, 5887), 'pathlib.Path', 'pathlib.Path', (['topo_file'], {}), '(topo_file)\n', (5876, 5887), False, 'import pathlib\n'), ((12829, 12854), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (12844, 12854), False, 'import os\n'), ((13544, 13576), 'rasterio.crs.CRS.from_epsg', 'rasterio.crs.CRS.from_epsg', (['(3857)'], {}), '(3857)\n', (13570, 13576), False, 'import rasterio\n'), ((16831, 16882), 'numpy.ones', 'numpy.ones', (['(height, width)'], {'dtype': 'rasterio.float32'}), '((height, width), dtype=rasterio.float32)\n', (16841, 16882), False, 'import numpy\n'), ((17543, 17574), 'rasterio.crs.CRS.from_epsg', 'rasterio.crs.CRS.from_epsg', (['crs'], {}), '(crs)\n', (17569, 17574), False, 'import rasterio\n'), ((11956, 12051), 'urllib3.util.retry.Retry', 'urllib3.util.retry.Retry', ([], {'total': '(5)', 'backoff_factor': '(1)', 'status_forcelist': '[500, 502, 503, 504]'}), '(total=5, backoff_factor=1, status_forcelist=[500, \n 502, 503, 504])\n', (11980, 12051), False, 'import urllib3\n'), ((13162, 13187), 'os.path.abspath', 'os.path.abspath', (['out_file'], {}), '(out_file)\n', (13177, 13187), False, 'import os\n'), ((15714, 15809), 'urllib3.util.retry.Retry', 'urllib3.util.retry.Retry', ([], {'total': '(5)', 'backoff_factor': '(1)', 'status_forcelist': '[500, 502, 503, 504]'}), '(total=5, backoff_factor=1, status_forcelist=[500, \n 502, 503, 504])\n', (15738, 15809), False, 'import urllib3\n'), ((16586, 16634), 'rasterio.features.is_valid_geom', 'rasterio.features.is_valid_geom', (["geo['geometry']"], {}), "(geo['geometry'])\n", (16617, 16634), False, 'import rasterio\n'), ((17197, 17222), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (17212, 17222), False, 'import os\n'), ((19543, 19655), 'requests.packages.urllib3.util.retry.Retry', 'requests.packages.urllib3.util.retry.Retry', ([], {'total': '(5)', 'backoff_factor': '(1)', 'status_forcelist': '[500, 502, 503, 504]'}), '(total=5, backoff_factor=1,\n status_forcelist=[500, 502, 503, 504])\n', (19585, 19655), False, 'import requests\n'), ((1354, 1376), 'pathlib.Path', 'pathlib.Path', (['case_dir'], {}), '(case_dir)\n', (1366, 1376), False, 'import pathlib\n'), ((2721, 2753), 'time.strftime', 'time.strftime', (['"""%Y%m%dT%H%M%S%Z"""'], {}), "('%Y%m%dT%H%M%S%Z')\n", (2734, 2753), False, 'import time\n'), ((3624, 3646), 'pathlib.Path', 'pathlib.Path', (['case_dir'], {}), '(case_dir)\n', (3636, 3646), False, 'import pathlib\n'), ((6921, 6943), 'pathlib.Path', 'pathlib.Path', (['case_dir'], {}), '(case_dir)\n', (6933, 6943), False, 'import pathlib\n'), ((13255, 13287), 'rasterio.crs.CRS.from_epsg', 'rasterio.crs.CRS.from_epsg', (['(3857)'], {}), '(3857)\n', (13281, 13287), False, 'import rasterio\n'), ((17290, 17322), 'rasterio.crs.CRS.from_epsg', 'rasterio.crs.CRS.from_epsg', (['(3857)'], {}), '(3857)\n', (17316, 17322), False, 'import rasterio\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME> <<EMAIL>>"
import numpy as np
class MSELoss:
def loss(self, y, yhat):
return np.mean((y - yhat)**2 / 2)
def loss_gradient(self, y, yhat):
return np.expand_dims(np.mean(yhat - y, axis=-1), axis=-1) | [
"numpy.mean"
] | [((161, 189), 'numpy.mean', 'np.mean', (['((y - yhat) ** 2 / 2)'], {}), '((y - yhat) ** 2 / 2)\n', (168, 189), True, 'import numpy as np\n'), ((257, 283), 'numpy.mean', 'np.mean', (['(yhat - y)'], {'axis': '(-1)'}), '(yhat - y, axis=-1)\n', (264, 283), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from talib import abstract
from lib.strategy.base_strategy import BaseStrategy
class StochRsi(BaseStrategy):
# settings
period = 30
buy_threshold = 0.7
sell_threshold = 0.3
def __init__(self, feed: pd.DataFrame):
super().__init__(feed)
if self.has_enough_feed():
rsi = abstract.RSI(self.feed, period=self.period)
maxrsi = abstract.MAX(rsi, period=self.period)
minrsi = abstract.MIN(rsi, period=self.period)
srsi = (rsi - minrsi) / (maxrsi - minrsi)
self.feed["srsi"] = srsi
prev_srsi = self.feed["srsi"].shift(1)
self.feed["cross_up"] = (self.feed["srsi"] > self.buy_threshold) & (
prev_srsi <= self.buy_threshold
)
self.feed["cross_down"] = (self.feed["srsi"] < self.sell_threshold) & (
prev_srsi >= self.sell_threshold
)
def get_name(self) -> str:
return "stoch_rsi"
def should_buy(self) -> bool:
return self.feed.iloc[-1]["cross_up"]
def should_sell(self) -> bool:
return self.feed.iloc[-1]["cross_down"]
def is_valid(self) -> bool:
return not np.isnan(self.feed.iloc[-1]["srsi"])
| [
"talib.abstract.RSI",
"numpy.isnan",
"talib.abstract.MAX",
"talib.abstract.MIN"
] | [((362, 405), 'talib.abstract.RSI', 'abstract.RSI', (['self.feed'], {'period': 'self.period'}), '(self.feed, period=self.period)\n', (374, 405), False, 'from talib import abstract\n'), ((427, 464), 'talib.abstract.MAX', 'abstract.MAX', (['rsi'], {'period': 'self.period'}), '(rsi, period=self.period)\n', (439, 464), False, 'from talib import abstract\n'), ((486, 523), 'talib.abstract.MIN', 'abstract.MIN', (['rsi'], {'period': 'self.period'}), '(rsi, period=self.period)\n', (498, 523), False, 'from talib import abstract\n'), ((1241, 1277), 'numpy.isnan', 'np.isnan', (["self.feed.iloc[-1]['srsi']"], {}), "(self.feed.iloc[-1]['srsi'])\n", (1249, 1277), True, 'import numpy as np\n')] |
from ..dot.jit import (
mod_matrix_dot,
)
# TODO cut below
import numpy as np
import numba as nb
@nb.njit
def mod_matrix_pow(
a: np.ndarray,
n: int,
mod: int,
) -> np.ndarray:
m = len(a)
assert a.shape == (m, m)
x = np.eye(m, dtype=np.int64)
while n:
if n & 1:
x = mod_matrix_dot(x, a, mod)
a = mod_matrix_dot(a, a, mod)
n >>= 1
return x | [
"numpy.eye"
] | [((235, 260), 'numpy.eye', 'np.eye', (['m'], {'dtype': 'np.int64'}), '(m, dtype=np.int64)\n', (241, 260), True, 'import numpy as np\n')] |
import logging
from typing import Iterator
from omegaconf import DictConfig, OmegaConf
import hydra
import os
import sys
import errno
import random
from time import time
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate import Accelerator
from common.utils import summary
from common.dataset_generators import UnchunkedGeneratorDataset, ChunkedGeneratorDataset
from trainval import create_model, fetch_ntu, load_dataset, fetch, load_dataset_ntu, load_weight, train, eval, prepare_actions, fetch_actions, evaluate
log = logging.getLogger('hpe-3d')
@hydra.main(config_path="config/", config_name="conf")
def main(cfg: DictConfig):
log.info('Config:\n' + OmegaConf.to_yaml(cfg))
if cfg.resume and cfg.evaluate:
log.error(
'Invlid Config: resume and evaluate can not be set at the same time')
exit(-1)
if cfg.dataset != 'ntu' and cfg.depth_map:
log.error('Cannot use depth map when not using ntu dataset')
exit(-1)
try:
# Create checkpoint directory if it does not exist
os.makedirs(cfg.checkpoint)
except OSError as e:
if e.errno != errno.EEXIST:
raise RuntimeError(
'Unable to create checkpoint directory:', cfg.checkpoint)
os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg.gpu)
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed_all(42)
if cfg.dataset == 'ntu':
dataset, keypoints, keypoints_metadata, kps_left, kps_right, joints_left, joints_right = load_dataset_ntu(cfg.data_dir,
cfg.dataset, cfg.keypoints, cfg.depth_map)
else:
dataset, keypoints, keypoints_metadata, kps_left, kps_right, joints_left, joints_right = load_dataset(cfg.data_dir,
cfg.dataset, cfg.keypoints)
subjects_train = cfg.subjects_train.split(',')
subjects_test = cfg.subjects_test.split(',')
action_filter = None if cfg.actions == '*' else cfg.actions.split(',')
if action_filter is not None:
log.info('Selected actions:', action_filter)
if cfg.dataset == 'ntu':
cameras_valid, poses_valid, poses_valid_2d = fetch_ntu(
subjects_test, dataset, keypoints, action_filter, cfg.downsample, cfg.subset)
else:
cameras_valid, poses_valid, poses_valid_2d = fetch(
subjects_test, dataset, keypoints, action_filter, cfg.downsample, cfg.subset)
model_pos_train, model_pos, pad, causal_shift = create_model(
cfg, dataset, poses_valid_2d)
receptive_field = model_pos.receptive_field()
log.info("Receptive field: {} frames".format(receptive_field))
if cfg.causal:
log.info("Using causal convolutions")
# Loading weight
model_pos_train, model_pos, checkpoint = load_weight(
cfg, model_pos_train, model_pos)
test_dataset = UnchunkedGeneratorDataset(cameras_valid, poses_valid, poses_valid_2d,
pad=pad, causal_shift=causal_shift, augment=False,
kps_left=kps_left, kps_right=kps_right, joints_left=joints_left, joints_right=joints_right)
test_loader = DataLoader(test_dataset, batch_size=1,
shuffle=False, num_workers=cfg.num_workers)
log.info("Testing on {} frames".format(test_dataset.num_frames()))
if not cfg.evaluate:
if cfg.dataset == 'ntu':
cameras_train, poses_train, poses_train_2d = fetch_ntu(subjects_train, dataset, keypoints, action_filter,
cfg.downsample, subset=cfg.subset)
else:
cameras_train, poses_train, poses_train_2d = fetch(subjects_train, dataset, keypoints, action_filter,
cfg.downsample, subset=cfg.subset)
lr = cfg.learning_rate
optimizer = torch.optim.Adam(
model_pos_train.parameters(), lr=lr, amsgrad=True)
lr_decay = cfg.lr_decay
losses_3d_train = []
losses_3d_train_eval = []
losses_3d_valid = []
epoch = 0
initial_momentum = 0.1
final_momentum = 0.001
train_dataset = ChunkedGeneratorDataset(cameras_train, poses_train, poses_train_2d,
cfg.stride,
pad=pad, causal_shift=causal_shift, shuffle=True, augment=cfg.data_augmentation,
kps_left=kps_left, kps_right=kps_right, joints_left=joints_left,
joints_right=joints_right)
train_loader = DataLoader(
train_dataset, batch_size=cfg.batch_size, shuffle=False, num_workers=cfg.num_workers)
train_dataset_eval = UnchunkedGeneratorDataset(cameras_train, poses_train, poses_train_2d,
pad=pad, causal_shift=causal_shift, augment=False)
train_loader_eval = DataLoader(
train_dataset_eval, batch_size=1, shuffle=False, num_workers=cfg.num_workers)
log.info('Training on {} frames'.format(
train_dataset_eval.num_frames()))
sample_inputs_2d = train_dataset[0][-1]
input_shape = [cfg.batch_size]
input_shape += list(sample_inputs_2d.shape)
log.info('Input 2d shape: {}'.format(
input_shape))
summary(log, model_pos,
sample_inputs_2d.shape, cfg.batch_size, device='cpu')
if cfg.resume:
epoch = checkpoint['epoch']
if 'optimizer' in checkpoint and checkpoint['optimizer'] is not None:
optimizer.load_state_dict(checkpoint['optimizer'])
else:
log.info(
'WARNING: this checkpoint does not contain an optimizer state. The optimizer will be reinitialized.')
lr = checkpoint['lr']
log.info(
'** Note: reported losses are averaged over all frames and test-time augmentation is not used here.')
log.info(
'** The final evaluation will be carried out after the last training epoch.')
# Prepare everything for gpu and fp16
accelerator = Accelerator(device_placement=True, fp16=cfg.fp16)
model_pos_train, model_pos, optimizer, train_loader, train_loader_eval, test_loader = accelerator.prepare(
model_pos_train, model_pos, optimizer, train_loader, train_loader_eval, test_loader)
log.info("Training on device: {}".format(accelerator.device))
loss_min = 49.5
# Pos model only
while epoch < cfg.epochs:
start_time = time()
model_pos_train.train()
# Regular supervised scenario
epoch_loss_3d = train(
accelerator, model_pos_train, train_loader, optimizer)
losses_3d_train.append(epoch_loss_3d)
# After training an epoch, whether to evaluate the loss of the training and validation set
if not cfg.no_eval:
model_train_dict = model_pos_train.state_dict()
losses_3d_valid_ave, losses_3d_train_eval_ave = eval(
model_train_dict, model_pos, test_loader, train_loader_eval)
losses_3d_valid.append(losses_3d_valid_ave)
losses_3d_train_eval.append(losses_3d_train_eval_ave)
elapsed = (time() - start_time) / 60
if cfg.no_eval:
log.info('[%d] time %.2f lr %f 3d_train %f' % (
epoch + 1,
elapsed,
lr,
losses_3d_train[-1] * 1000))
else:
log.info('[%d] time %.2f lr %f 3d_train %f 3d_eval %f 3d_valid %f' % (
epoch + 1,
elapsed,
lr,
losses_3d_train[-1] * 1000,
losses_3d_train_eval[-1] * 1000,
losses_3d_valid[-1] * 1000))
# Saving the best result
if losses_3d_valid[-1]*1000 < loss_min:
chk_path = os.path.join(cfg.checkpoint, 'epoch_best.bin')
log.info('Saving checkpoint to {}'.format(chk_path))
torch.save({
'epoch': epoch,
'lr': lr,
'optimizer': optimizer.state_dict(),
'model_pos': model_pos_train.state_dict()
}, chk_path)
loss_min = losses_3d_valid[-1]*1000
# Decay learning rate exponentially
lr *= lr_decay
for param_group in optimizer.param_groups:
param_group['lr'] *= lr_decay
epoch += 1
# Decay BatchNorm momentum
momentum = initial_momentum * \
np.exp(-epoch/cfg.epochs *
np.log(initial_momentum/final_momentum))
model_pos_train.set_bn_momentum(momentum)
# Save checkpoint if necessary
if epoch % cfg.checkpoint_frequency == 0:
chk_path = os.path.join(
cfg.checkpoint, 'epoch_{}.bin'.format(epoch))
log.info('Saving checkpoint to {}'.format(chk_path))
torch.save({
'epoch': epoch,
'lr': lr,
'optimizer': optimizer.state_dict(),
'model_pos': model_pos_train.state_dict()
}, chk_path)
# Save training curves after every epoch, as .png images (if requested)
if cfg.export_training_curves and epoch > 3:
if 'matplotlib' not in sys.modules:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.figure()
epoch_x = np.arange(3, len(losses_3d_train)) + 1
plt.plot(epoch_x, losses_3d_train[3:], '--', color='C0')
plt.plot(epoch_x, losses_3d_train_eval[3:], color='C0')
plt.plot(epoch_x, losses_3d_valid[3:], color='C1')
plt.legend(['3d train', '3d train (eval)', '3d valid (eval)'])
plt.ylabel('MPJPE (m)')
plt.xlabel('Epoch')
plt.xlim((3, epoch))
plt.savefig(os.path.join(cfg.checkpoint, 'loss_3d.png'))
plt.close('all')
# Evaluate
log.info('Evaluating...')
all_actions, all_actions_by_subject = prepare_actions(
subjects_test, dataset)
def run_evaluation(actions, action_filter):
errors_p1 = []
errors_p2 = []
for action_key in actions.keys():
if action_filter is not None:
found = False
for a in action_filter:
if action_key.startswith(a):
found = True
break
if not found:
continue
poses_act, poses_2d_act = fetch_actions(
actions[action_key], keypoints, dataset, cfg.downsample)
_dataset = UnchunkedGeneratorDataset(None, poses_act, poses_2d_act,
pad=pad, causal_shift=causal_shift, augment=cfg.test_time_augment, kps_left=kps_left, kps_right=kps_right, joints_left=joints_left,
joints_right=joints_right)
action_loader = DataLoader(_dataset, 1, shuffle=False)
action_loader = accelerator.prepare_data_loader(action_loader)
e1, e2 = evaluate(action_loader, model_pos,
action=action_key, log=log, joints_left=joints_left, joints_right=joints_right, test_augment=cfg.test_time_augment)
errors_p1.append(e1)
errors_p2.append(e2)
log.info('Protocol #1 (MPJPE) action-wise average: {} mm'.format(
round(np.mean(errors_p1), 1)))
log.info('Protocol #2 (P-MPJPE) action-wise average: {} mm'.format(
round(np.mean(errors_p2), 1)))
if not cfg.by_subject:
run_evaluation(all_actions, action_filter)
else:
for subject in all_actions_by_subject.keys():
log.info('Evaluating on subject: {}'.format(subject))
run_evaluation(all_actions_by_subject[subject], action_filter)
log.info('')
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"trainval.evaluate",
"trainval.fetch_actions",
"matplotlib.pyplot.ylabel",
"numpy.log",
"trainval.load_dataset_ntu",
"trainval.load_dataset",
"trainval.fetch",
"trainval.create_model",
"numpy.mean",
"hydra.main",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"m... | [((560, 587), 'logging.getLogger', 'logging.getLogger', (['"""hpe-3d"""'], {}), "('hpe-3d')\n", (577, 587), False, 'import logging\n'), ((591, 644), 'hydra.main', 'hydra.main', ([], {'config_path': '"""config/"""', 'config_name': '"""conf"""'}), "(config_path='config/', config_name='conf')\n", (601, 644), False, 'import hydra\n'), ((1344, 1359), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (1355, 1359), False, 'import random\n'), ((1364, 1382), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1378, 1382), True, 'import numpy as np\n'), ((1387, 1408), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (1404, 1408), False, 'import torch\n'), ((1413, 1443), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(42)'], {}), '(42)\n', (1439, 1443), False, 'import torch\n'), ((2691, 2733), 'trainval.create_model', 'create_model', (['cfg', 'dataset', 'poses_valid_2d'], {}), '(cfg, dataset, poses_valid_2d)\n', (2703, 2733), False, 'from trainval import create_model, fetch_ntu, load_dataset, fetch, load_dataset_ntu, load_weight, train, eval, prepare_actions, fetch_actions, evaluate\n'), ((2992, 3036), 'trainval.load_weight', 'load_weight', (['cfg', 'model_pos_train', 'model_pos'], {}), '(cfg, model_pos_train, model_pos)\n', (3003, 3036), False, 'from trainval import create_model, fetch_ntu, load_dataset, fetch, load_dataset_ntu, load_weight, train, eval, prepare_actions, fetch_actions, evaluate\n'), ((3066, 3287), 'common.dataset_generators.UnchunkedGeneratorDataset', 'UnchunkedGeneratorDataset', (['cameras_valid', 'poses_valid', 'poses_valid_2d'], {'pad': 'pad', 'causal_shift': 'causal_shift', 'augment': '(False)', 'kps_left': 'kps_left', 'kps_right': 'kps_right', 'joints_left': 'joints_left', 'joints_right': 'joints_right'}), '(cameras_valid, poses_valid, poses_valid_2d, pad=\n pad, causal_shift=causal_shift, augment=False, kps_left=kps_left,\n kps_right=kps_right, joints_left=joints_left, joints_right=joints_right)\n', (3091, 3287), False, 'from common.dataset_generators import UnchunkedGeneratorDataset, ChunkedGeneratorDataset\n'), ((3387, 3474), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': 'cfg.num_workers'}), '(test_dataset, batch_size=1, shuffle=False, num_workers=cfg.\n num_workers)\n', (3397, 3474), False, 'from torch.utils.data import DataLoader\n'), ((10824, 10863), 'trainval.prepare_actions', 'prepare_actions', (['subjects_test', 'dataset'], {}), '(subjects_test, dataset)\n', (10839, 10863), False, 'from trainval import create_model, fetch_ntu, load_dataset, fetch, load_dataset_ntu, load_weight, train, eval, prepare_actions, fetch_actions, evaluate\n'), ((1089, 1116), 'os.makedirs', 'os.makedirs', (['cfg.checkpoint'], {}), '(cfg.checkpoint)\n', (1100, 1116), False, 'import os\n'), ((1571, 1644), 'trainval.load_dataset_ntu', 'load_dataset_ntu', (['cfg.data_dir', 'cfg.dataset', 'cfg.keypoints', 'cfg.depth_map'], {}), '(cfg.data_dir, cfg.dataset, cfg.keypoints, cfg.depth_map)\n', (1587, 1644), False, 'from trainval import create_model, fetch_ntu, load_dataset, fetch, load_dataset_ntu, load_weight, train, eval, prepare_actions, fetch_actions, evaluate\n'), ((1866, 1920), 'trainval.load_dataset', 'load_dataset', (['cfg.data_dir', 'cfg.dataset', 'cfg.keypoints'], {}), '(cfg.data_dir, cfg.dataset, cfg.keypoints)\n', (1878, 1920), False, 'from trainval import create_model, fetch_ntu, load_dataset, fetch, load_dataset_ntu, load_weight, train, eval, prepare_actions, fetch_actions, evaluate\n'), ((2377, 2468), 'trainval.fetch_ntu', 'fetch_ntu', (['subjects_test', 'dataset', 'keypoints', 'action_filter', 'cfg.downsample', 'cfg.subset'], {}), '(subjects_test, dataset, keypoints, action_filter, cfg.downsample,\n cfg.subset)\n', (2386, 2468), False, 'from trainval import create_model, fetch_ntu, load_dataset, fetch, load_dataset_ntu, load_weight, train, eval, prepare_actions, fetch_actions, evaluate\n'), ((2541, 2629), 'trainval.fetch', 'fetch', (['subjects_test', 'dataset', 'keypoints', 'action_filter', 'cfg.downsample', 'cfg.subset'], {}), '(subjects_test, dataset, keypoints, action_filter, cfg.downsample, cfg\n .subset)\n', (2546, 2629), False, 'from trainval import create_model, fetch_ntu, load_dataset, fetch, load_dataset_ntu, load_weight, train, eval, prepare_actions, fetch_actions, evaluate\n'), ((4440, 4707), 'common.dataset_generators.ChunkedGeneratorDataset', 'ChunkedGeneratorDataset', (['cameras_train', 'poses_train', 'poses_train_2d', 'cfg.stride'], {'pad': 'pad', 'causal_shift': 'causal_shift', 'shuffle': '(True)', 'augment': 'cfg.data_augmentation', 'kps_left': 'kps_left', 'kps_right': 'kps_right', 'joints_left': 'joints_left', 'joints_right': 'joints_right'}), '(cameras_train, poses_train, poses_train_2d, cfg.\n stride, pad=pad, causal_shift=causal_shift, shuffle=True, augment=cfg.\n data_augmentation, kps_left=kps_left, kps_right=kps_right, joints_left=\n joints_left, joints_right=joints_right)\n', (4463, 4707), False, 'from common.dataset_generators import UnchunkedGeneratorDataset, ChunkedGeneratorDataset\n'), ((4908, 5008), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'cfg.batch_size', 'shuffle': '(False)', 'num_workers': 'cfg.num_workers'}), '(train_dataset, batch_size=cfg.batch_size, shuffle=False,\n num_workers=cfg.num_workers)\n', (4918, 5008), False, 'from torch.utils.data import DataLoader\n'), ((5048, 5173), 'common.dataset_generators.UnchunkedGeneratorDataset', 'UnchunkedGeneratorDataset', (['cameras_train', 'poses_train', 'poses_train_2d'], {'pad': 'pad', 'causal_shift': 'causal_shift', 'augment': '(False)'}), '(cameras_train, poses_train, poses_train_2d, pad=\n pad, causal_shift=causal_shift, augment=False)\n', (5073, 5173), False, 'from common.dataset_generators import UnchunkedGeneratorDataset, ChunkedGeneratorDataset\n'), ((5252, 5345), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset_eval'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': 'cfg.num_workers'}), '(train_dataset_eval, batch_size=1, shuffle=False, num_workers=cfg\n .num_workers)\n', (5262, 5345), False, 'from torch.utils.data import DataLoader\n'), ((5669, 5746), 'common.utils.summary', 'summary', (['log', 'model_pos', 'sample_inputs_2d.shape', 'cfg.batch_size'], {'device': '"""cpu"""'}), "(log, model_pos, sample_inputs_2d.shape, cfg.batch_size, device='cpu')\n", (5676, 5746), False, 'from common.utils import summary\n'), ((6487, 6536), 'accelerate.Accelerator', 'Accelerator', ([], {'device_placement': '(True)', 'fp16': 'cfg.fp16'}), '(device_placement=True, fp16=cfg.fp16)\n', (6498, 6536), False, 'from accelerate import Accelerator\n'), ((699, 721), 'omegaconf.OmegaConf.to_yaml', 'OmegaConf.to_yaml', (['cfg'], {}), '(cfg)\n', (716, 721), False, 'from omegaconf import DictConfig, OmegaConf\n'), ((3686, 3785), 'trainval.fetch_ntu', 'fetch_ntu', (['subjects_train', 'dataset', 'keypoints', 'action_filter', 'cfg.downsample'], {'subset': 'cfg.subset'}), '(subjects_train, dataset, keypoints, action_filter, cfg.downsample,\n subset=cfg.subset)\n', (3695, 3785), False, 'from trainval import create_model, fetch_ntu, load_dataset, fetch, load_dataset_ntu, load_weight, train, eval, prepare_actions, fetch_actions, evaluate\n'), ((3921, 4016), 'trainval.fetch', 'fetch', (['subjects_train', 'dataset', 'keypoints', 'action_filter', 'cfg.downsample'], {'subset': 'cfg.subset'}), '(subjects_train, dataset, keypoints, action_filter, cfg.downsample,\n subset=cfg.subset)\n', (3926, 4016), False, 'from trainval import create_model, fetch_ntu, load_dataset, fetch, load_dataset_ntu, load_weight, train, eval, prepare_actions, fetch_actions, evaluate\n'), ((6929, 6935), 'time.time', 'time', ([], {}), '()\n', (6933, 6935), False, 'from time import time\n'), ((7043, 7103), 'trainval.train', 'train', (['accelerator', 'model_pos_train', 'train_loader', 'optimizer'], {}), '(accelerator, model_pos_train, train_loader, optimizer)\n', (7048, 7103), False, 'from trainval import create_model, fetch_ntu, load_dataset, fetch, load_dataset_ntu, load_weight, train, eval, prepare_actions, fetch_actions, evaluate\n'), ((11337, 11407), 'trainval.fetch_actions', 'fetch_actions', (['actions[action_key]', 'keypoints', 'dataset', 'cfg.downsample'], {}), '(actions[action_key], keypoints, dataset, cfg.downsample)\n', (11350, 11407), False, 'from trainval import create_model, fetch_ntu, load_dataset, fetch, load_dataset_ntu, load_weight, train, eval, prepare_actions, fetch_actions, evaluate\n'), ((11448, 11677), 'common.dataset_generators.UnchunkedGeneratorDataset', 'UnchunkedGeneratorDataset', (['None', 'poses_act', 'poses_2d_act'], {'pad': 'pad', 'causal_shift': 'causal_shift', 'augment': 'cfg.test_time_augment', 'kps_left': 'kps_left', 'kps_right': 'kps_right', 'joints_left': 'joints_left', 'joints_right': 'joints_right'}), '(None, poses_act, poses_2d_act, pad=pad,\n causal_shift=causal_shift, augment=cfg.test_time_augment, kps_left=\n kps_left, kps_right=kps_right, joints_left=joints_left, joints_right=\n joints_right)\n', (11473, 11677), False, 'from common.dataset_generators import UnchunkedGeneratorDataset, ChunkedGeneratorDataset\n'), ((11790, 11828), 'torch.utils.data.DataLoader', 'DataLoader', (['_dataset', '(1)'], {'shuffle': '(False)'}), '(_dataset, 1, shuffle=False)\n', (11800, 11828), False, 'from torch.utils.data import DataLoader\n'), ((11926, 12081), 'trainval.evaluate', 'evaluate', (['action_loader', 'model_pos'], {'action': 'action_key', 'log': 'log', 'joints_left': 'joints_left', 'joints_right': 'joints_right', 'test_augment': 'cfg.test_time_augment'}), '(action_loader, model_pos, action=action_key, log=log, joints_left=\n joints_left, joints_right=joints_right, test_augment=cfg.test_time_augment)\n', (11934, 12081), False, 'from trainval import create_model, fetch_ntu, load_dataset, fetch, load_dataset_ntu, load_weight, train, eval, prepare_actions, fetch_actions, evaluate\n'), ((7435, 7500), 'trainval.eval', 'eval', (['model_train_dict', 'model_pos', 'test_loader', 'train_loader_eval'], {}), '(model_train_dict, model_pos, test_loader, train_loader_eval)\n', (7439, 7500), False, 'from trainval import create_model, fetch_ntu, load_dataset, fetch, load_dataset_ntu, load_weight, train, eval, prepare_actions, fetch_actions, evaluate\n'), ((10148, 10160), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10158, 10160), True, 'import matplotlib.pyplot as plt\n'), ((10242, 10298), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch_x', 'losses_3d_train[3:]', '"""--"""'], {'color': '"""C0"""'}), "(epoch_x, losses_3d_train[3:], '--', color='C0')\n", (10250, 10298), True, 'import matplotlib.pyplot as plt\n'), ((10315, 10370), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch_x', 'losses_3d_train_eval[3:]'], {'color': '"""C0"""'}), "(epoch_x, losses_3d_train_eval[3:], color='C0')\n", (10323, 10370), True, 'import matplotlib.pyplot as plt\n'), ((10387, 10437), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch_x', 'losses_3d_valid[3:]'], {'color': '"""C1"""'}), "(epoch_x, losses_3d_valid[3:], color='C1')\n", (10395, 10437), True, 'import matplotlib.pyplot as plt\n'), ((10454, 10516), 'matplotlib.pyplot.legend', 'plt.legend', (["['3d train', '3d train (eval)', '3d valid (eval)']"], {}), "(['3d train', '3d train (eval)', '3d valid (eval)'])\n", (10464, 10516), True, 'import matplotlib.pyplot as plt\n'), ((10533, 10556), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MPJPE (m)"""'], {}), "('MPJPE (m)')\n", (10543, 10556), True, 'import matplotlib.pyplot as plt\n'), ((10573, 10592), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (10583, 10592), True, 'import matplotlib.pyplot as plt\n'), ((10609, 10629), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(3, epoch)'], {}), '((3, epoch))\n', (10617, 10629), True, 'import matplotlib.pyplot as plt\n'), ((10719, 10735), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (10728, 10735), True, 'import matplotlib.pyplot as plt\n'), ((7676, 7682), 'time.time', 'time', ([], {}), '()\n', (7680, 7682), False, 'from time import time\n'), ((8396, 8442), 'os.path.join', 'os.path.join', (['cfg.checkpoint', '"""epoch_best.bin"""'], {}), "(cfg.checkpoint, 'epoch_best.bin')\n", (8408, 8442), False, 'import os\n'), ((10057, 10078), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (10071, 10078), False, 'import matplotlib\n'), ((10658, 10701), 'os.path.join', 'os.path.join', (['cfg.checkpoint', '"""loss_3d.png"""'], {}), "(cfg.checkpoint, 'loss_3d.png')\n", (10670, 10701), False, 'import os\n'), ((12273, 12291), 'numpy.mean', 'np.mean', (['errors_p1'], {}), '(errors_p1)\n', (12280, 12291), True, 'import numpy as np\n'), ((12397, 12415), 'numpy.mean', 'np.mean', (['errors_p2'], {}), '(errors_p2)\n', (12404, 12415), True, 'import numpy as np\n'), ((9191, 9232), 'numpy.log', 'np.log', (['(initial_momentum / final_momentum)'], {}), '(initial_momentum / final_momentum)\n', (9197, 9232), True, 'import numpy as np\n')] |
"""
Unit tests for meta/train/env.py.
"""
from itertools import product
from typing import Dict, List, Any, Tuple
import numpy as np
import torch
from meta.utils.storage import RolloutStorage
from meta.train.env import (
get_env,
get_base_env,
get_metaworld_ml_benchmark_names,
get_metaworld_benchmark_names,
)
from tests.helpers import get_policy, DEFAULT_SETTINGS
METAWORLD_OBS_GOAL_POS = 39
ROLLOUT_LENGTH = 128
TIME_LIMIT = 4
PROCESS_EPISODES = 5
TASK_EPISODES = 3
ENOUGH_THRESHOLD = 0.5
SINGLE_ENV_NAME = "reach-v2"
def test_collect_rollout_MT1_single() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld MT1 benchmark, to ensure that the task indices are returned correctly
and goals are resampled correctly, with a single process.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "MT1_%s" % SINGLE_ENV_NAME
settings["num_processes"] = 1
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = True
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_MT1_single_normalize() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld MT1 benchmark, to ensure that the task indices are returned correctly
and goals are resampled correctly, with a single process and observation
normalization.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "MT1_%s" % SINGLE_ENV_NAME
settings["num_processes"] = 1
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = True
settings["normalize_first_n"] = METAWORLD_OBS_GOAL_POS
settings["same_np_seed"] = True
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_MT1_multi() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld MT1 benchmark, to ensure that the task indices are returned correctly
and goals are resampled correctly, when running a multi-process environment.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "MT1_%s" % SINGLE_ENV_NAME
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = True
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_MT1_multi_normalize() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld MT1 benchmark, to ensure that the task indices are returned correctly
and goals are resampled correctly, when running a multi-process environment and
observation normalization.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "MT1_%s" % SINGLE_ENV_NAME
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = True
settings["normalize_first_n"] = METAWORLD_OBS_GOAL_POS
settings["same_np_seed"] = True
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_MT10_single() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld MT10 benchmark, to ensure that the task indices are returned correctly
and tasks/goals are resampled correctly, with a single process.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "MT10"
settings["num_processes"] = 1
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = True
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_MT10_single_normalize() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld MT10 benchmark, to ensure that the task indices are returned correctly
and tasks/goals are resampled correctly, with a single process and observation
normalization.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "MT10"
settings["num_processes"] = 1
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = True
settings["normalize_first_n"] = METAWORLD_OBS_GOAL_POS
settings["same_np_seed"] = True
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_MT10_multi() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld MT10 benchmark, to ensure that the task indices are returned correctly
and tasks/goals are resampled correctly, when running a multi-process environment.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "MT10"
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = True
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_MT10_multi_normalize() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld MT10 benchmark, to ensure that the task indices are returned correctly
and tasks/goals are resampled correctly, when running a multi-process environment
and observation normalization.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "MT10"
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = True
settings["normalize_first_n"] = METAWORLD_OBS_GOAL_POS
settings["same_np_seed"] = True
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_MT10_multi_save_memory() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld MT10 benchmark, to ensure that the task indices are returned correctly
and tasks/goals are resampled correctly, when running a multi-process environment
with the `save_memory` option enabled.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "MT10"
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = True
settings["save_memory"] = True
check_metaworld_rollout(settings)
def test_collect_rollout_MT50_multi() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld MT50 benchmark, to ensure that the task indices are returned correctly
and tasks/goals are resampled correctly, when running a multi-process environment.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "MT50"
settings["num_processes"] = 4
settings["rollout_length"] = 8 * ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = True
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_MT50_multi_save_memory() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld MT50 benchmark, to ensure that the task indices are returned correctly
and tasks/goals are resampled correctly, when running a multi-process environment
with the `save_memory` option enabled.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "MT50"
settings["num_processes"] = 4
settings["rollout_length"] = 8 * ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = True
settings["save_memory"] = True
check_metaworld_rollout(settings)
def test_collect_rollout_ML1_train_single() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML1_train benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, with a single process.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML1_train_%s" % SINGLE_ENV_NAME
settings["num_processes"] = 1
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML1_train_single_normalize() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML1_train benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, with a single process and observation
normalization.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML1_train_%s" % SINGLE_ENV_NAME
settings["num_processes"] = 1
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = True
settings["normalize_first_n"] = METAWORLD_OBS_GOAL_POS
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML1_train_multi() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML1_train benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, when running a multi-process
environment.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML1_train_%s" % SINGLE_ENV_NAME
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML1_train_multi_normalize() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML1_train benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, when running a multi-process
environment and observation normalization.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML1_train_%s" % SINGLE_ENV_NAME
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = True
settings["normalize_first_n"] = METAWORLD_OBS_GOAL_POS
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML1_test_single() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML1_test benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, with a single process.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML1_test_%s" % SINGLE_ENV_NAME
settings["num_processes"] = 1
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML1_test_single_normalize() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML1_test benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, with a single process and observation
normalization.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML1_test_%s" % SINGLE_ENV_NAME
settings["num_processes"] = 1
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = True
settings["normalize_first_n"] = METAWORLD_OBS_GOAL_POS
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML1_test_multi() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML1_test benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, when running a multi-process
environment.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML1_test_%s" % SINGLE_ENV_NAME
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML1_test_multi_normalize() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML1_test benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, when running a multi-process
environment and observation normalization.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML1_test_%s" % SINGLE_ENV_NAME
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = True
settings["normalize_first_n"] = METAWORLD_OBS_GOAL_POS
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML10_train_single() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML10_train benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, with a single process.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML10_train"
settings["num_processes"] = 1
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML10_train_single_normalize() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML10_train benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, with a single process and observation
normalization.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML10_train"
settings["num_processes"] = 1
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = True
settings["normalize_first_n"] = METAWORLD_OBS_GOAL_POS
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML10_train_multi() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML10_train benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, when running a multi-process
environment.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML10_train"
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML10_train_multi_normalize() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML10_train benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, when running a multi-process
environment and observation normalization.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML10_train"
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = True
settings["normalize_first_n"] = METAWORLD_OBS_GOAL_POS
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML10_test_single() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML10_test benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, with a single process.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML10_test"
settings["num_processes"] = 1
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML10_test_single_normalize() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML10_test benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, with a single process and observation
normalization.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML10_test"
settings["num_processes"] = 1
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = True
settings["normalize_first_n"] = METAWORLD_OBS_GOAL_POS
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML10_test_multi() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML10_test benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, when running a multi-process
environment.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML10_test"
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML10_test_multi_normalize() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML10_test benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, when running a multi-process
environment and observation normalization.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML10_test"
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = True
settings["normalize_first_n"] = METAWORLD_OBS_GOAL_POS
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML45_train_multi() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML45_train benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, when running a multi-process
environment.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML45_train"
settings["num_processes"] = 4
settings["rollout_length"] = 8 * ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML45_train_multi_save_memory() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML45_train benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, when running a multi-process
environment and with the `save_memory` option enabled.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML45_train"
settings["num_processes"] = 4
settings["rollout_length"] = 8 * ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = False
settings["save_memory"] = True
check_metaworld_rollout(settings)
def test_collect_rollout_ML45_test_multi() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML45_train benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, when running a multi-process
environment.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML45_test"
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = False
settings["save_memory"] = False
check_metaworld_rollout(settings)
def test_collect_rollout_ML45_test_multi_save_memory() -> None:
"""
Test the values of the returned RolloutStorage objects collected from a rollout on
the MetaWorld ML45_train benchmark, to ensure that the task indices are returned
correctly and goals are resampled correctly, when running a multi-process
environment and with the `save_memory` option enabled.
"""
settings = dict(DEFAULT_SETTINGS)
settings["env_name"] = "ML45_test"
settings["num_processes"] = 4
settings["rollout_length"] = ROLLOUT_LENGTH
settings["time_limit"] = TIME_LIMIT
settings["normalize_transition"] = False
settings["normalize_first_n"] = None
settings["same_np_seed"] = False
settings["save_memory"] = True
check_metaworld_rollout(settings)
def check_metaworld_rollout(settings: Dict[str, Any]) -> None:
"""
Verify that rollouts on MetaWorld benchmarks satisfy a few assumptions:
- If running a multi-task benchmark, each observation is a vector with length at
least 39, and the elements after 39 form a one-hot vector with length equal to the
number of tasks denoting the task index. The task denoted by the one-hot vector
changes when we encounter a done=True, and only then. Also, each process should
resample tasks each episode, and the sequence of tasks sampled by each process
should be different.
- Goals for a single task are fixed within episodes and either resampled each
episode (meta learning benchmarks) or fixed across episodes (multi task learning
benchmarks). Also, the initial placement of objects is fixed across episodes
(multi task learning benchmarks) or resampled each episode (meta learning
benchmarks).
- Initial hand positions are identical between episodes from the same task.
"""
# Check if we are running a multi-task benchmark.
mt_benchmarks = get_metaworld_benchmark_names()
multitask = settings["env_name"] in mt_benchmarks
# Determine whether or not goals should be resampled.
ml_benchmarks = get_metaworld_ml_benchmark_names()
resample_goals = settings["env_name"] in ml_benchmarks or settings[
"env_name"
].startswith("ML1_")
settings["add_observability"] = resample_goals
# Perform rollout.
rollout = get_metaworld_rollout(settings)
# Check task indices and task resampling, if necessary.
if multitask:
task_check(rollout)
# Check goal resampling and initial observations, if necessary. We don't check this
# in the case that observations are normalized, because in this case the same
# goal/observation will look different on separate transitions due to varying
# normalization statistics.
check_goals = not settings["normalize_transition"]
if check_goals:
goal_check(rollout, resample_goals, multitask)
initial_hand_check(rollout, multitask)
def get_metaworld_rollout(
settings: Dict[str, Any]
) -> Tuple[RolloutStorage, np.ndarray]:
"""
Execute and return a single rollout over a MetaWorld environment using configuration
in `settings`.
"""
# Construct environment and policy.
env = get_env(
settings["env_name"],
num_processes=settings["num_processes"],
seed=settings["seed"],
time_limit=settings["time_limit"],
normalize_transition=settings["normalize_transition"],
normalize_first_n=settings["normalize_first_n"],
allow_early_resets=True,
same_np_seed=settings["same_np_seed"],
add_observability=settings["add_observability"],
save_memory=settings["save_memory"],
)
policy = get_policy(env, settings)
rollout = RolloutStorage(
rollout_length=settings["rollout_length"],
observation_space=env.observation_space,
action_space=env.action_space,
num_processes=settings["num_processes"],
hidden_state_size=1,
device=settings["device"],
)
rollout.set_initial_obs(env.reset())
# Collect rollout.
for rollout_step in range(rollout.rollout_length):
# Sample actions.
with torch.no_grad():
values, actions, action_log_probs, hidden_states = policy.act(
rollout.obs[rollout_step],
rollout.hidden_states[rollout_step],
rollout.dones[rollout_step],
)
# Perform step and record in ``rollout``.
obs, rewards, dones, infos = env.step(actions)
rollout.add_step(
obs, actions, dones, action_log_probs, values, rewards, hidden_states
)
env.close()
return rollout
def task_check(rollout: RolloutStorage) -> None:
"""
Given a rollout, checks that task indices are returned from observations correctly
and that tasks are resampled correctly within and between processes.
"""
# Get initial task indices.
task_indices = get_task_indices(rollout.obs[0])
episode_tasks = {
process: [task_indices[process]] for process in range(rollout.num_processes)
}
# Check if rollout satisfies conditions at each step.
for step in range(rollout.rollout_step):
# Get information from step.
obs = rollout.obs[step]
dones = rollout.dones[step]
assert len(obs) == len(dones)
new_task_indices = get_task_indices(obs)
# Make sure that task indices are the same if we haven't reached a done,
# otherwise set new task indices. Also track tasks attempted for each process.
for process in range(len(obs)):
done = dones[process]
if done:
task_indices[process] = new_task_indices[process]
episode_tasks[process].append(task_indices[process])
else:
assert task_indices[process] == new_task_indices[process]
# Check that each process is resampling tasks.
enough_ratio = sum(
len(tasks) >= PROCESS_EPISODES for tasks in episode_tasks.values()
) / len(episode_tasks)
if enough_ratio < ENOUGH_THRESHOLD:
raise ValueError(
"Less than %d episodes ran for more than half of processes, which is the"
" minimum amount needed for testing. Try increasing rollout length."
% (PROCESS_EPISODES)
)
for process, tasks in episode_tasks.items():
if len(tasks) >= PROCESS_EPISODES:
num_unique_tasks = len(set(tasks))
assert num_unique_tasks > 1
# Check that each process has distinct sequences of tasks.
for p1, p2 in product(range(rollout.num_processes), range(rollout.num_processes)):
if p1 == p2:
continue
assert episode_tasks[p1] != episode_tasks[p2]
print("\nTasks for each process: %s" % episode_tasks)
def goal_check(rollout: RolloutStorage, resample_goals: bool, multitask: bool) -> None:
"""
Given a rollout, checks that goals and initial object positions are resampled
correctly within and between processes.
"""
# Get initial goals.
task_indices = (
get_task_indices(rollout.obs[0]) if multitask else [0] * rollout.num_processes
)
goals = get_goals(rollout.obs[0])
episode_goals = {
task_indices[process]: [goals[process]]
for process in range(rollout.num_processes)
}
# Get initial object placements.
object_pos = get_object_pos(rollout.obs[0])
episode_object_pos = {
task_indices[process]: [object_pos[process]]
for process in range(rollout.num_processes)
}
# Check if rollout satisfies conditions at each step.
for step in range(rollout.rollout_step):
# Get information from step.
obs = rollout.obs[step]
dones = rollout.dones[step]
assert len(obs) == len(dones)
task_indices = (
get_task_indices(obs) if multitask else [0] * rollout.num_processes
)
new_goals = get_goals(obs)
new_object_pos = get_object_pos(obs)
# Make sure that goal is the same if we haven't reached a done or if goal should
# remain fixed across episodes, otherwise set new goal.
for process in range(len(obs)):
done = dones[process]
if done and (resample_goals or multitask):
goals[process] = new_goals[process]
else:
assert (goals[process] == new_goals[process]).all()
# Track goals and initial object positions from each task.
if done:
task = task_indices[process]
if task not in episode_goals:
episode_goals[task] = []
episode_goals[task].append(goals[process])
if task not in episode_object_pos:
episode_object_pos[task] = []
episode_object_pos[task].append(new_object_pos[process])
# Check that each task is resampling goals and initial object positions, if
# necessary.
enough_ratio = sum(
len(task_goals) >= TASK_EPISODES for task_goals in episode_goals.values()
) / len(episode_goals)
if enough_ratio < ENOUGH_THRESHOLD:
raise ValueError(
"Less than %d episodes ran for more than half of tasks, which is the"
"minimum amount needed for testing. Try increasing rollout length."
% (TASK_EPISODES)
)
for task, task_goals in episode_goals.items():
if len(task_goals) >= TASK_EPISODES:
goals_arr = np.array([g.numpy() for g in task_goals])
num_unique_goals = len(np.unique(goals_arr, axis=0))
if resample_goals:
assert num_unique_goals > 1
else:
assert num_unique_goals == 1
for task, task_object_pos in episode_object_pos.items():
if len(task_object_pos) >= TASK_EPISODES:
object_pos_arr = np.array([p.numpy() for p in task_object_pos])
num_unique_pos = len(np.unique(object_pos_arr, axis=0))
if resample_goals:
assert num_unique_pos > 1
else:
assert num_unique_pos == 1
print("\nGoals for each task: %s" % str(episode_goals))
print("\nInitial object positions for each task: %s" % str(episode_object_pos))
def initial_hand_check(rollout: RolloutStorage, multitask: bool) -> None:
"""
Given a rollout, checks that initial hand positions are identical between episodes
from the same task.
"""
# Get initial hand position of first episode for each process.
initial_hand_pos = {}
for process in range(rollout.num_processes):
task = get_task_indices(rollout.obs[0])[process] if multitask else 0
hand_pos = get_hand_pos(rollout.obs[0])[process]
if task in initial_hand_pos:
initial_hand_pos[task].append(hand_pos)
else:
initial_hand_pos[task] = [hand_pos]
# Step through rollout and collect initial hand positions from each episode.
for step in range(1, rollout.rollout_length):
# Get information from step.
obs = rollout.obs[step]
dones = rollout.dones[step]
assert len(obs) == len(dones)
task_indices = (
get_task_indices(obs) if multitask else [0] * rollout.num_processes
)
# If an observation is the beginning of a new episode, add it to the list of
# initial hand positions for its task.
hand_pos = get_hand_pos(obs)
for process in range(len(obs)):
if dones[process]:
task = task_indices[process]
if task not in initial_hand_pos:
initial_hand_pos[task] = []
initial_hand_pos[task].append(hand_pos[process])
# Check that initial observations are unique across episodes.
enough_ratio = sum(
len(obs) >= TASK_EPISODES for obs in initial_hand_pos.values()
) / len(initial_hand_pos)
if enough_ratio < ENOUGH_THRESHOLD:
raise ValueError(
"Less than %d episodes ran for more than half of task/process pairs, which"
" is the minimum amount needed for testing. Try increasing rollout length."
% (TASK_EPISODES)
)
for task, obs in initial_hand_pos.items():
if len(obs) >= TASK_EPISODES:
obs_arr = np.array([ob.numpy() for ob in obs]).round(decimals=3)
num_unique_obs = len(np.unique(obs_arr, axis=0))
assert num_unique_obs == 1
print("\nInitial obs for each task: %s" % str(initial_hand_pos))
def get_task_indices(obs: torch.Tensor) -> List[int]:
"""
Get the tasks indexed by the one-hot vectors in the latter part of the
observation from each environment.
"""
index_obs = obs[:, METAWORLD_OBS_GOAL_POS:]
# Make sure that each observation has exactly one non-zero entry, and that the
# nonzero entry is equal to 1.
nonzero_pos = index_obs.nonzero()
nonzero_obs = nonzero_pos[:, 0].tolist()
assert nonzero_obs == list(range(obs.shape[0]))
for pos in nonzero_pos:
assert index_obs[tuple(pos)].item() == 1.0
task_indices = index_obs.nonzero()[:, 1].tolist()
return task_indices
def get_hand_pos(obs: torch.Tensor) -> List[np.ndarray]:
"""
Get the hand positions written in each observation from a batch of observations.
Note that this will have to change if the format of the Meta-World observations ever
changes.
"""
return [x[:3] for x in obs]
def get_object_pos(obs: torch.Tensor) -> List[np.ndarray]:
"""
Get the object positions written in each observation from a batch of observations.
Note that this will have to change if the format of the Meta-World observations ever
changes.
"""
return [x[3:17] for x in obs]
def get_goals(obs: torch.Tensor) -> List[np.ndarray]:
"""
Get the goals written in each observation from a batch of observations. Note that
this will have to change if the format of the Meta-World observations ever changes.
"""
return [x[36:39] for x in obs]
| [
"tests.helpers.get_policy",
"numpy.unique",
"meta.train.env.get_env",
"meta.train.env.get_metaworld_ml_benchmark_names",
"meta.utils.storage.RolloutStorage",
"meta.train.env.get_metaworld_benchmark_names",
"torch.no_grad"
] | [((25184, 25215), 'meta.train.env.get_metaworld_benchmark_names', 'get_metaworld_benchmark_names', ([], {}), '()\n', (25213, 25215), False, 'from meta.train.env import get_env, get_base_env, get_metaworld_ml_benchmark_names, get_metaworld_benchmark_names\n'), ((25349, 25383), 'meta.train.env.get_metaworld_ml_benchmark_names', 'get_metaworld_ml_benchmark_names', ([], {}), '()\n', (25381, 25383), False, 'from meta.train.env import get_env, get_base_env, get_metaworld_ml_benchmark_names, get_metaworld_benchmark_names\n'), ((26463, 26868), 'meta.train.env.get_env', 'get_env', (["settings['env_name']"], {'num_processes': "settings['num_processes']", 'seed': "settings['seed']", 'time_limit': "settings['time_limit']", 'normalize_transition': "settings['normalize_transition']", 'normalize_first_n': "settings['normalize_first_n']", 'allow_early_resets': '(True)', 'same_np_seed': "settings['same_np_seed']", 'add_observability': "settings['add_observability']", 'save_memory': "settings['save_memory']"}), "(settings['env_name'], num_processes=settings['num_processes'], seed\n =settings['seed'], time_limit=settings['time_limit'],\n normalize_transition=settings['normalize_transition'],\n normalize_first_n=settings['normalize_first_n'], allow_early_resets=\n True, same_np_seed=settings['same_np_seed'], add_observability=settings\n ['add_observability'], save_memory=settings['save_memory'])\n", (26470, 26868), False, 'from meta.train.env import get_env, get_base_env, get_metaworld_ml_benchmark_names, get_metaworld_benchmark_names\n'), ((26946, 26971), 'tests.helpers.get_policy', 'get_policy', (['env', 'settings'], {}), '(env, settings)\n', (26956, 26971), False, 'from tests.helpers import get_policy, DEFAULT_SETTINGS\n'), ((26986, 27214), 'meta.utils.storage.RolloutStorage', 'RolloutStorage', ([], {'rollout_length': "settings['rollout_length']", 'observation_space': 'env.observation_space', 'action_space': 'env.action_space', 'num_processes': "settings['num_processes']", 'hidden_state_size': '(1)', 'device': "settings['device']"}), "(rollout_length=settings['rollout_length'], observation_space\n =env.observation_space, action_space=env.action_space, num_processes=\n settings['num_processes'], hidden_state_size=1, device=settings['device'])\n", (27000, 27214), False, 'from meta.utils.storage import RolloutStorage\n'), ((27420, 27435), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (27433, 27435), False, 'import torch\n'), ((32864, 32892), 'numpy.unique', 'np.unique', (['goals_arr'], {'axis': '(0)'}), '(goals_arr, axis=0)\n', (32873, 32892), True, 'import numpy as np\n'), ((33253, 33286), 'numpy.unique', 'np.unique', (['object_pos_arr'], {'axis': '(0)'}), '(object_pos_arr, axis=0)\n', (33262, 33286), True, 'import numpy as np\n'), ((35706, 35732), 'numpy.unique', 'np.unique', (['obs_arr'], {'axis': '(0)'}), '(obs_arr, axis=0)\n', (35715, 35732), True, 'import numpy as np\n')] |
"""
Go from the RVs <NAME> sent (with delta Pav as the
template) to RVs that can be input to radvel.
"""
import os
import pandas as pd, numpy as np
from astrobase.lcmath import find_lc_timegroups
from numpy import array as nparr
from timmy.paths import DATADIR
rvdir = os.path.join(DATADIR, 'spectra', 'Veloce', 'RVs')
rvpath = os.path.join(rvdir, 'TOI837_rvs_v1.txt')
df = pd.read_csv(rvpath, names=['time','rv','rv_err'], sep=' ')
ngroups, groupinds = find_lc_timegroups(nparr(df.time), mingap=0.8)
times, rvs, rverrs = [], [], []
for g in groupinds:
times.append( df.loc[g].time.mean() )
rvs.append( df.loc[g].rv.mean() )
rverrs.append( df.loc[g].rv.std() )
veloce_df = pd.DataFrame({
'time': times,
'mnvel': nparr(rvs) - np.nanmean(rvs),
'errvel': rverrs
})
veloce_df['tel'] = 'Veloce'
veloce_df['Name'] = 'toi837'
veloce_df['Source'] = 'Bergmann'
old_df = pd.read_csv(
os.path.join(DATADIR, 'spectra', 'RVs_20200525_clean.csv')
)
new_df = pd.concat((old_df, veloce_df))
outpath = os.path.join(DATADIR, 'spectra', 'RVs_20200624_clean.csv')
new_df.to_csv(outpath, index=False)
print(f'made {outpath}')
| [
"pandas.read_csv",
"os.path.join",
"numpy.array",
"numpy.nanmean",
"pandas.concat"
] | [((271, 320), 'os.path.join', 'os.path.join', (['DATADIR', '"""spectra"""', '"""Veloce"""', '"""RVs"""'], {}), "(DATADIR, 'spectra', 'Veloce', 'RVs')\n", (283, 320), False, 'import os\n'), ((330, 370), 'os.path.join', 'os.path.join', (['rvdir', '"""TOI837_rvs_v1.txt"""'], {}), "(rvdir, 'TOI837_rvs_v1.txt')\n", (342, 370), False, 'import os\n'), ((377, 437), 'pandas.read_csv', 'pd.read_csv', (['rvpath'], {'names': "['time', 'rv', 'rv_err']", 'sep': '""" """'}), "(rvpath, names=['time', 'rv', 'rv_err'], sep=' ')\n", (388, 437), True, 'import pandas as pd, numpy as np\n'), ((980, 1010), 'pandas.concat', 'pd.concat', (['(old_df, veloce_df)'], {}), '((old_df, veloce_df))\n', (989, 1010), True, 'import pandas as pd, numpy as np\n'), ((1022, 1080), 'os.path.join', 'os.path.join', (['DATADIR', '"""spectra"""', '"""RVs_20200624_clean.csv"""'], {}), "(DATADIR, 'spectra', 'RVs_20200624_clean.csv')\n", (1034, 1080), False, 'import os\n'), ((477, 491), 'numpy.array', 'nparr', (['df.time'], {}), '(df.time)\n', (482, 491), True, 'from numpy import array as nparr\n'), ((909, 967), 'os.path.join', 'os.path.join', (['DATADIR', '"""spectra"""', '"""RVs_20200525_clean.csv"""'], {}), "(DATADIR, 'spectra', 'RVs_20200525_clean.csv')\n", (921, 967), False, 'import os\n'), ((738, 748), 'numpy.array', 'nparr', (['rvs'], {}), '(rvs)\n', (743, 748), True, 'from numpy import array as nparr\n'), ((751, 766), 'numpy.nanmean', 'np.nanmean', (['rvs'], {}), '(rvs)\n', (761, 766), True, 'import pandas as pd, numpy as np\n')] |
# -*- coding: utf-8 -*-
""" ISRS GN model implementation
This module implements the function that returns the nonlinear interference
power and coefficient for each WDM channel. This function implements the
ISRS GN model in closed-form published in:
<NAME>, <NAME>, <NAME>, "A Closed-Form Approximation of the
Gaussian Noise Model in the Presence of Inter-Channel Stimulated Raman
Scattering, " J. Lighw. Technol., Early Access, Jan. 2019
Author: <NAME>, <NAME>, <NAME>, <NAME>, Jan 2019.
"""
from numpy import abs,arange,arcsinh,arctan,isfinite,log,mean,newaxis,pi,sum,zeros
def ISRSGNmodel(
Att, Att_bar, Cr, Pch, fi, Bch, Length, D, S, gamma, RefLambda, coherent=True,
**P_unused):
"""
Returns nonlinear interference power and coefficient for each WDM
channel. This function implements the ISRS GN model in closed-form
published in:
<NAME>, <NAME>, <NAME>, "A Closed-Form Approximation of the
Gaussian Noise Model in the Presence of Inter-Channel Stimulated Raman
Scattering, " J. Lighw. Technol., vol. xx, no. xx, pp.xxxx-xxxx, Jan. 2019
Format:
- channel dependent quantities have the format of a N_ch x n matrix,
where N_ch is the number of channels slots and n is the number of spans.
- channel independent quantities have the format of a 1 x n matrix
- channel and span independent quantities are scalars
INPUTS:
Att: attenuation coefficient [Np/m] of channel i of span j,
format: N_ch x n matrix
Att_bar: attenuation coefficient (bar) [Np/m] of channel i of span j,
format: N_ch x n matrix
Cr[i,j]: the slope of the linear regression of the normalized Raman gain spectrum [1/W/m/Hz] of channel i of span j,
format: N_ch x n matrix
Pch[i,j]: the launch power [W] of channel i of span j,
format: N_ch x n matrix
fi[i,j]: center frequency relative to the reference frequency (3e8/RefLambda) [Hz]
of channel i of span j, format: N_ch x n matrix
Bch[i,j]: the bandwidth [Hz] of channel i of span j,
format: N_ch x n matrix
Length[j]: the span length [m] of span j,
format: 1 x n vector
D[j]: the dispersion coefficient [s/m^2] of span j,
format: 1 x n vector
S[j]: the span length [s/m^3] of span j,
format: 1 x n vector
gamma[j]: the span length [1/W/m] of span j,
format: 1 x n vector
RefLambda: is the reference wavelength (where beta2, beta3 are defined) [m],
format: 1 x 1 vector
coherent: boolean for coherent or incoherent NLI accumulation across multiple fiber spans
RETURNS:
NLI: Nonlinear Interference Power[W],
format: N_ch x 1 vector
eta_n: Nonlinear Interference coeffcient [1/W^2],
format: N_ch x 1 matrix
"""
channels,n = fi.shape
c = 3e8;
a = Att
a_bar = Att_bar
L = Length
P_ij = Pch
Ptot = sum(P_ij,axis=0)
beta2 = -D*RefLambda**2/(2*pi*c)
beta3 = RefLambda**2/(2*pi*c)**2*(RefLambda**2*S+2*RefLambda*D)
# Average Coherence Factor
mean_att_i = mean(a, axis=1) #average attenuation coefficent for channel i
mean_L = mean(L) # average fiber length
if coherent:
# closed-for formula for average coherence factor extended by dispersion slope, cf. Ref. [2, Eq. (22)]
eps = lambda B_i, f_i, a_i: (3/10)*log(1+(6/a_i)/(mean_L*arcsinh(pi**2/2*abs( mean(beta2) + 2*pi*mean(beta3)*f_i )/a_i*B_i**2)))
else:
eps = lambda B_i, f_i, a_i: 0
# SPM and XPM Closed-form Formula Definition
SPM = lambda phi_i, T_i, B_i, a, a_bar, gamma:\
4/9*gamma**2/B_i**2*pi/(phi_i*a_bar*(2*a+a_bar)) \
*( (T_i-a**2)/a*arcsinh(phi_i*B_i**2/a/pi) + ((a+a_bar)**2-T_i)/(a+a_bar)*arcsinh(phi_i*B_i**2/(a+a_bar)/pi) )
# closed-form formula for SPM contribution, see Ref. [1, Eq. (9-10)]
XPM = lambda Pi, Pk, phi_ik, T_k, B_i, B_k, a, a_bar, gamma:\
32/27*sum(removenan( (Pk/Pi)**2*gamma**2 / ( B_k*phi_ik*a_bar*(2*a+a_bar) )
*( (T_k-a**2)/a*arctan(phi_ik*B_i/a)
+((a+a_bar)**2-T_k)/(a+a_bar)*arctan(phi_ik*B_i/(a+a_bar)) )
))
# closed-form formula for XPM contribution, see Ref. [1, Eq. (11)]
NLI = []
eta_n = []
eta_SPM = zeros([channels,n])
eta_XPM = zeros([channels,n])
for j in arange(n):
""" Calculation of nonlinear interference (NLI) power in fiber span j """
for i in arange(channels):
""" Compute the NLI of each COI """
not_i = arange(channels)!=i
a_i = a[i,j] # \alpha of COI in fiber span j
a_k = a[not_i,j] # \alpha of INT in fiber span j
a_bar_i = a_bar[i,j] # \bar{\alpha} of COI in fiber span j
a_bar_k = a_bar[not_i,j] # \bar{\alpha} of INT in fiber span j
f_i = fi[i,j] # f_i of COI in fiber span j
f_k = fi[not_i,j] # f_k of INT in fiber span j
B_i = Bch[i,j] # B_i of COI in fiber span j
B_k = Bch[not_i,j] # B_k of INT in fiber span j
Cr_i = Cr[i,j] # Cr of COI in fiber span j
Cr_k = Cr[not_i,j] # Cr of INT in fiber span j
P_i = P_ij[i,j] # P_i of COI in fiber span j
P_k = P_ij[not_i,j] # P_k of INT in fiber span j
phi_i = 3/2*pi**2 *( beta2[j] + pi*beta3[j]*(f_i + f_i) ) # \phi_i of COI in fiber span j
phi_ik = 2*pi**2 *( f_k - f_i )*( beta2[j] + pi*beta3[j]*(f_i + f_k) ) # \phi_ik of COI-INT pair in fiber span j
T_i = (a_i + a_bar_i - f_i*Ptot[j]*Cr_i)**2 # T_i of COI in fiber span j
T_k = (a_k + a_bar_k - f_k*Ptot[j]*Cr_k)**2 # T_k of INT in fiber span j
eta_SPM[i,j] = SPM(phi_i, T_i, B_i, a_i, a_bar_i, gamma[j]) *n**eps(B_i, f_i, mean_att_i[i]) # computation of SPM contribution in fiber span j
eta_XPM[i,j] = XPM(P_i, P_k, phi_ik, T_k, B_i, B_k, a_k, a_bar_k, gamma[j]) # computation of XPM contribution in fiber span j
eta_n = sum( ( P_ij/P_ij[:,0,newaxis] )**2 * ( eta_SPM + eta_XPM ) , axis=1) # computation of NLI normalized to transmitter power, see Ref. [1, Eq. (5)]
NLI = P_ij[:,0]**3 *eta_n # Ref. [1, Eq. (1)]
return NLI, eta_n
def removenan(x):
x[~isfinite(x)] = 0
return x
def todB(x):
return 10*log(x)/log(10) | [
"numpy.mean",
"numpy.log",
"numpy.sum",
"numpy.zeros",
"numpy.arcsinh",
"numpy.isfinite",
"numpy.arange",
"numpy.arctan"
] | [((3059, 3076), 'numpy.sum', 'sum', (['P_ij'], {'axis': '(0)'}), '(P_ij, axis=0)\n', (3062, 3076), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((3235, 3250), 'numpy.mean', 'mean', (['a'], {'axis': '(1)'}), '(a, axis=1)\n', (3239, 3250), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((3314, 3321), 'numpy.mean', 'mean', (['L'], {}), '(L)\n', (3318, 3321), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((4524, 4544), 'numpy.zeros', 'zeros', (['[channels, n]'], {}), '([channels, n])\n', (4529, 4544), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((4558, 4578), 'numpy.zeros', 'zeros', (['[channels, n]'], {}), '([channels, n])\n', (4563, 4578), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((4591, 4600), 'numpy.arange', 'arange', (['n'], {}), '(n)\n', (4597, 4600), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((7352, 7420), 'numpy.sum', 'sum', (['((P_ij / P_ij[:, 0, newaxis]) ** 2 * (eta_SPM + eta_XPM))'], {'axis': '(1)'}), '((P_ij / P_ij[:, 0, newaxis]) ** 2 * (eta_SPM + eta_XPM), axis=1)\n', (7355, 7420), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((4701, 4717), 'numpy.arange', 'arange', (['channels'], {}), '(channels)\n', (4707, 4717), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((7764, 7771), 'numpy.log', 'log', (['(10)'], {}), '(10)\n', (7767, 7771), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((7699, 7710), 'numpy.isfinite', 'isfinite', (['x'], {}), '(x)\n', (7707, 7710), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((7757, 7763), 'numpy.log', 'log', (['x'], {}), '(x)\n', (7760, 7763), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((4789, 4805), 'numpy.arange', 'arange', (['channels'], {}), '(channels)\n', (4795, 4805), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((3864, 3898), 'numpy.arcsinh', 'arcsinh', (['(phi_i * B_i ** 2 / a / pi)'], {}), '(phi_i * B_i ** 2 / a / pi)\n', (3871, 3898), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((3922, 3966), 'numpy.arcsinh', 'arcsinh', (['(phi_i * B_i ** 2 / (a + a_bar) / pi)'], {}), '(phi_i * B_i ** 2 / (a + a_bar) / pi)\n', (3929, 3966), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((4241, 4265), 'numpy.arctan', 'arctan', (['(phi_ik * B_i / a)'], {}), '(phi_ik * B_i / a)\n', (4247, 4265), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((4329, 4363), 'numpy.arctan', 'arctan', (['(phi_ik * B_i / (a + a_bar))'], {}), '(phi_ik * B_i / (a + a_bar))\n', (4335, 4363), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((3561, 3572), 'numpy.mean', 'mean', (['beta2'], {}), '(beta2)\n', (3565, 3572), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n'), ((3580, 3591), 'numpy.mean', 'mean', (['beta3'], {}), '(beta3)\n', (3584, 3591), False, 'from numpy import abs, arange, arcsinh, arctan, isfinite, log, mean, newaxis, pi, sum, zeros\n')] |
import random
import paddle
import numpy as np
def setup_seed(seed=42):
random.seed(seed)
np.random.seed(seed)
paddle.seed(seed) | [
"paddle.seed",
"numpy.random.seed",
"random.seed"
] | [((77, 94), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (88, 94), False, 'import random\n'), ((99, 119), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (113, 119), True, 'import numpy as np\n'), ((124, 141), 'paddle.seed', 'paddle.seed', (['seed'], {}), '(seed)\n', (135, 141), False, 'import paddle\n')] |
#!/usr/bin/env python3
"""
Welcome to CARLA manual control.
Use ARROWS or WASD keys for control.
W : throttle
S : brake
A/D : steer left/right
Q : toggle reverse
Space : hand-brake
P : toggle autopilot
M : toggle manual transmission
,/. : gear up/down
CTRL + W : toggle constant velocity mode at 60 km/h
L : toggle next light type
SHIFT + L : toggle high beam
Z/X : toggle right/left blinker
I : toggle interior light
TAB : change sensor position
C : change weather (Shift+C reverse)
Backspace : change vehicle
V : Select next map layer (Shift+V reverse)
B : Load current selected map layer (Shift+B to unload)
R : toggle recording images to disk
T : restart fake sensors
CTRL + R : toggle recording of simulation (replacing any previous)
CTRL + P : start replaying last recorded simulation
CTRL + + : increments the start time of the replay by 1 second (+SHIFT = 10 seconds)
CTRL + - : decrements the start time of the replay by 1 second (+SHIFT = 10 seconds)
F1 : toggle HUD
H/? : toggle help
ESC : quit
"""
# ==============================================================================
# -- find carla module ---------------------------------------------------------
# ==============================================================================
import os
import sys
try:
# Assuming .egg at the same path
this_script_path = os.path.dirname(os.path.realpath(__file__))
egg_file_path = 'carla-0.9.11-py3.7-linux-x86_64.egg'
sys.path.append(os.path.join(this_script_path, egg_file_path))
except IndexError:
pass
# ==============================================================================
# -- imports -------------------------------------------------------------------
# ==============================================================================
from functools import partial
from threading import Thread
import rclpy
from rclpy.node import Node
from object_model_msgs.msg import ObjectModel, Object, Track, Dimensions
from fusion_layer.srv import RegisterSensor, RemoveSensor
import carla
from carla import ColorConverter as cc
import argparse
import collections
import datetime
import logging
import math
import random
import re
import weakref
from copy import deepcopy
try:
import pygame
from pygame.locals import KMOD_CTRL
from pygame.locals import KMOD_SHIFT
from pygame.locals import K_0
from pygame.locals import K_9
from pygame.locals import K_BACKQUOTE
from pygame.locals import K_BACKSPACE
from pygame.locals import K_COMMA
from pygame.locals import K_DOWN
from pygame.locals import K_ESCAPE
from pygame.locals import K_F1
from pygame.locals import K_LEFT
from pygame.locals import K_PERIOD
from pygame.locals import K_RIGHT
from pygame.locals import K_SLASH
from pygame.locals import K_SPACE
from pygame.locals import K_TAB
from pygame.locals import K_UP
from pygame.locals import K_a
from pygame.locals import K_b
from pygame.locals import K_c
from pygame.locals import K_d
from pygame.locals import K_g
from pygame.locals import K_h
from pygame.locals import K_i
from pygame.locals import K_l
from pygame.locals import K_m
from pygame.locals import K_n
from pygame.locals import K_p
from pygame.locals import K_q
from pygame.locals import K_r
from pygame.locals import K_s
from pygame.locals import K_t
from pygame.locals import K_v
from pygame.locals import K_w
from pygame.locals import K_x
from pygame.locals import K_z
from pygame.locals import K_MINUS
from pygame.locals import K_EQUALS
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
try:
import numpy as np
except ImportError:
raise RuntimeError('cannot import numpy, make sure numpy package is installed')
# ==============================================================================
# -- Global functions ----------------------------------------------------------
# ==============================================================================
# Simple time reference from since when this script is running
STARTED_TIME = None
SURROUND_SENSOR_RANGE = 50
def find_weather_presets():
rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')
name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))
presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]
return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]
def get_actor_display_name(actor, truncate=250):
name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:])
return (name[:truncate - 1] + u'\u2026') if len(name) > truncate else name
def wrap_angle(theta):
return (theta + np.pi) % (2*np.pi) - np.pi
def get_relative_obj(reference_obj: Object, obj: Object) -> Object:
angle = reference_obj.track.state[Track.STATE_YAW_IDX]
cos_angle = np.cos(angle)
sin_angle = np.sin(angle)
rotation_reference = np.array([
[cos_angle, -sin_angle, 0, 0, 0, 0, 0, 0],
[sin_angle, cos_angle, 0, 0, 0, 0, 0, 0],
[0, 0, cos_angle, -sin_angle, 0, 0, 0, 0],
[0, 0, sin_angle, cos_angle, 0, 0, 0, 0],
[0, 0, 0, 0, cos_angle, -sin_angle, 0, 0],
[0, 0, 0, 0, sin_angle, cos_angle, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
], dtype='float64')
rotated_reference = rotation_reference @ reference_obj.track.state
transformation = np.array([
[cos_angle, -sin_angle, 0, 0, 0, 0, 0, 0, -rotated_reference[Track.STATE_X_IDX]],
[-sin_angle, -cos_angle, 0, 0, 0, 0, 0, 0, rotated_reference[Track.STATE_Y_IDX]],
[0, 0, cos_angle, -sin_angle, 0, 0, 0, 0, -rotated_reference[Track.STATE_VELOCITY_X_IDX]],
[0, 0, -sin_angle, -cos_angle, 0, 0, 0, 0, rotated_reference[Track.STATE_VELOCITY_Y_IDX]],
[0, 0, 0, 0, cos_angle, -sin_angle, 0, 0, -rotated_reference[Track.STATE_ACCELERATION_X_IDX]],
[0, 0, 0, 0, -sin_angle, -cos_angle, 0, 0, rotated_reference[Track.STATE_ACCELERATION_Y_IDX]],
[0, 0, 0, 0, 0, 0, 1, 0, -rotated_reference[Track.STATE_YAW_IDX]],
[0, 0, 0, 0, 0, 0, 0, 1, -rotated_reference[Track.STATE_YAW_RATE_IDX]],
[0, 0, 0, 0, 0, 0, 0, 0, 1],
], dtype='float64')
to_align = np.hstack((obj.track.state, 1)).T
product = transformation @ to_align
obj_state_aligned = np.delete(product, -1, 0).astype('float32')
result = deepcopy(obj)
result.track.state = obj_state_aligned
result.track.state[Track.STATE_YAW_IDX] = wrap_angle(result.track.state[Track.STATE_YAW_IDX])
# print('========>', list(np.round(result.track.state, 5)))
return result
def actor_to_object_model(actor: carla.Actor) -> Object:
pos = actor.get_location()
rot = actor.get_transform().rotation
vel = actor.get_velocity()
ang_vel = actor.get_angular_velocity()
acc = actor.get_acceleration()
bb = actor.bounding_box
obj = Object()
obj.track.state[Track.STATE_X_IDX] = pos.x
obj.track.state[Track.STATE_Y_IDX] = pos.y
obj.track.state[Track.STATE_VELOCITY_X_IDX] = vel.x
obj.track.state[Track.STATE_VELOCITY_Y_IDX] = vel.y
obj.track.state[Track.STATE_ACCELERATION_X_IDX] = acc.x
obj.track.state[Track.STATE_ACCELERATION_Y_IDX] = acc.y
obj.track.state[Track.STATE_YAW_IDX] = -math.radians(rot.yaw)
obj.track.state[Track.STATE_YAW_RATE_IDX] = -math.radians(ang_vel.z)
obj.dimensions.values[Dimensions.DIMENSIONS_WIDTH_IDX] = bb.extent.y*2
obj.dimensions.values[Dimensions.DIMENSIONS_LENGHT_IDX] = bb.extent.x*2
return obj
# ==============================================================================
# -- World ---------------------------------------------------------------------
# ==============================================================================
class World:
def __init__(self, carla_world, hud, ros_node, args):
self._csv = None
self._extra_objs_csv = None # Extra information about the detected objects
self.open_csv()
self.open_extra_objs_csv()
self.world = carla_world
self.actor_role_name = args.rolename
self.ros_node = ros_node
try:
self.map = self.world.get_map()
except RuntimeError as error:
print('RuntimeError: {}'.format(error))
print(' The server could not send the OpenDRIVE (.xodr) file:')
print(' Make sure it exists, has the same name of your town, and is correct.')
sys.exit(1)
self.hud = hud
self.player = None
self.surround_sensor = None
self.surround_sensor2 = None
self.imu_sensor = None
self.camera_manager = None
self._weather_presets = find_weather_presets()
self._weather_index = 0
self._actor_filter = args.filter
self._gamma = args.gamma
self.restart()
self.world.on_tick(hud.on_world_tick)
self.recording_enabled = False
self.recording_start = 0
self.constant_velocity_enabled = False
self.current_map_layer = 0
self.map_layer_names = [
carla.MapLayer.NONE,
carla.MapLayer.Buildings,
carla.MapLayer.Decals,
carla.MapLayer.Foliage,
carla.MapLayer.Ground,
carla.MapLayer.ParkedVehicles,
carla.MapLayer.Particles,
carla.MapLayer.Props,
carla.MapLayer.StreetLights,
carla.MapLayer.Walls,
carla.MapLayer.All
]
def open_csv(self, name="sensor_layer.csv"):
self._csv = open(name, "w")
def open_extra_objs_csv(self, name="sensor_layer_extra_objs.csv"):
self._extra_objs_csv = open(name, "w")
def close_csv(self):
self._csv.close()
def close_extra_objs_csv(self):
self._extra_objs_csv.close()
def add_info_csv(self, surround_sensor, ego_obj, msg):
time_ = self.ros_node.get_clock().now().from_msg(msg.header.stamp)
list_ = [time_.nanoseconds, surround_sensor.name, len(msg.object_model)]
list_.extend(list(np.round(ego_obj.track.state, 5)))
self._csv.write(','.join(map(str, list_)) + '\n')
self._csv.flush()
def add_info_extra_objs_csv(self):
actors = (list(self.world.get_actors().filter('vehicle.*'))
+ list(self.world.get_actors().filter('walker.pedestrian.*')))
player_location = self.player.get_location()
time_ = self.ros_node.get_clock().now()
def get_distance(a):
location = a.get_location()
return math.hypot(location.x - player_location.x, location.y - player_location.y)
for actor in actors:
distance = get_distance(actor)
if distance <= SURROUND_SENSOR_RANGE and actor.id != self.player.id:
location = actor.get_location()
bb = actor.bounding_box
attrs = [time_.nanoseconds, actor.id, actor.type_id, distance, bb.extent.x*2, bb.extent.y*2, location.x, location.y]
line = ','.join(map(str, attrs)) + '\n'
self._extra_objs_csv.write(line)
self._extra_objs_csv.flush()
def restart(self):
self.player_max_speed = 1.589
self.player_max_speed_fast = 3.713
# Keep same camera config if the camera manager exists.
cam_index = self.camera_manager.index if self.camera_manager is not None else 0
cam_pos_index = self.camera_manager.transform_index if self.camera_manager is not None else 0
# Get a random blueprint.
blueprint = random.choice(self.world.get_blueprint_library().filter(self._actor_filter))
blueprint.set_attribute('role_name', self.actor_role_name)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
if blueprint.has_attribute('is_invincible'):
blueprint.set_attribute('is_invincible', 'true')
# set the max speed
if blueprint.has_attribute('speed'):
self.player_max_speed = float(blueprint.get_attribute('speed').recommended_values[1])
self.player_max_speed_fast = float(blueprint.get_attribute('speed').recommended_values[2])
else:
print("No recommended values for 'speed' attribute")
# Spawn the player.
if self.player is not None:
spawn_point = self.player.get_transform()
spawn_point.location.z += 2.0
spawn_point.rotation.roll = 0.0
spawn_point.rotation.pitch = 0.0
self.destroy()
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
self.modify_vehicle_physics(self.player)
while self.player is None:
if not self.map.get_spawn_points():
print('There are no spawn points available in your map/town.')
print('Please add some Vehicle Spawn Point to your UE4 scene.')
sys.exit(1)
spawn_points = self.map.get_spawn_points()
spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform()
spawn_point = carla.Transform(
carla.Location(-149, -80, 2),
rotation=carla.Rotation(yaw=90)
)
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
self.modify_vehicle_physics(self.player)
self.surround_sensor = SurroundSensor(self, 1.0, -2.0, np.pi/4, 'sensor1')
self.surround_sensor2 = SurroundSensor(self, -1.0, 0.5, -np.pi/3, 'sensor2')
self.imu_sensor = IMUSensor(self.player)
self.camera_manager = CameraManager(self.player, self.hud, self._gamma)
self.camera_manager.transform_index = cam_pos_index
self.camera_manager.set_sensor(cam_index, notify=False)
actor_type = get_actor_display_name(self.player)
self.hud.notification(actor_type)
def next_weather(self, reverse=False):
self._weather_index += -1 if reverse else 1
self._weather_index %= len(self._weather_presets)
preset = self._weather_presets[self._weather_index]
self.hud.notification('Weather: %s' % preset[1])
self.player.get_world().set_weather(preset[0])
def next_map_layer(self, reverse=False):
self.current_map_layer += -1 if reverse else 1
self.current_map_layer %= len(self.map_layer_names)
selected = self.map_layer_names[self.current_map_layer]
self.hud.notification('LayerMap selected: %s' % selected)
def load_map_layer(self, unload=False):
selected = self.map_layer_names[self.current_map_layer]
if unload:
self.hud.notification('Unloading map layer: %s' % selected)
self.world.unload_map_layer(selected)
else:
self.hud.notification('Loading map layer: %s' % selected)
self.world.load_map_layer(selected)
def modify_vehicle_physics(self, vehicle):
physics_control = vehicle.get_physics_control()
physics_control.use_sweep_wheel_collision = True
vehicle.apply_physics_control(physics_control)
def tick(self, clock):
self.hud.tick(self, clock)
def render(self, display):
self.camera_manager.render(display)
self.hud.render(display)
def destroy_sensors(self):
self.camera_manager.sensor.destroy()
self.camera_manager.sensor = None
self.camera_manager.index = None
def destroy(self):
sensors = [self.camera_manager.sensor, self.imu_sensor.sensor]
for sensor in sensors:
if sensor is not None:
sensor.stop()
sensor.destroy()
if self.player is not None:
self.player.destroy()
self.close_csv()
self.close_extra_objs_csv()
# ==============================================================================
# -- KeyboardControl -----------------------------------------------------------
# ==============================================================================
class KeyboardControl:
"""Class that handles keyboard input."""
def __init__(self, world, start_in_autopilot):
self._autopilot_enabled = start_in_autopilot
if isinstance(world.player, carla.Vehicle):
self._control = carla.VehicleControl()
self._lights = carla.VehicleLightState.NONE
world.player.set_autopilot(self._autopilot_enabled)
world.player.set_light_state(self._lights)
elif isinstance(world.player, carla.Walker):
self._control = carla.WalkerControl()
self._autopilot_enabled = False
self._rotation = world.player.get_transform().rotation
else:
raise NotImplementedError("Actor type not supported")
self._steer_cache = 0.0
world.hud.notification("Press 'H' or '?' for help.", seconds=4.0)
def parse_events(self, client, world, clock):
if isinstance(self._control, carla.VehicleControl):
current_lights = self._lights
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if self._is_quit_shortcut(event.key):
return True
elif event.key == K_BACKSPACE:
if self._autopilot_enabled:
world.player.set_autopilot(False)
world.restart()
world.player.set_autopilot(True)
else:
world.restart()
elif event.key == K_F1:
world.hud.toggle_info()
elif event.key == K_v and pygame.key.get_mods() & KMOD_SHIFT:
world.next_map_layer(reverse=True)
elif event.key == K_v:
world.next_map_layer()
elif event.key == K_b and pygame.key.get_mods() & KMOD_SHIFT:
world.load_map_layer(unload=True)
elif event.key == K_b:
world.load_map_layer()
elif event.key == K_h or (event.key == K_SLASH and pygame.key.get_mods() & KMOD_SHIFT):
world.hud.help.toggle()
elif event.key == K_TAB:
world.camera_manager.toggle_camera()
elif event.key == K_c and pygame.key.get_mods() & KMOD_SHIFT:
world.next_weather(reverse=True)
elif event.key == K_c:
world.next_weather()
elif event.key == K_BACKQUOTE:
world.camera_manager.next_sensor()
elif event.key == K_n:
world.camera_manager.next_sensor()
elif event.key == K_w and (pygame.key.get_mods() & KMOD_CTRL):
if world.constant_velocity_enabled:
world.player.disable_constant_velocity()
world.constant_velocity_enabled = False
world.hud.notification("Disabled Constant Velocity Mode")
else:
world.player.enable_constant_velocity(carla.Vector3D(17, 0, 0))
world.constant_velocity_enabled = True
world.hud.notification("Enabled Constant Velocity Mode at 60 km/h")
elif event.key > K_0 and event.key <= K_9:
world.camera_manager.set_sensor(event.key - 1 - K_0)
elif event.key == K_r and not (pygame.key.get_mods() & KMOD_CTRL):
world.camera_manager.toggle_recording()
elif event.key == K_r and (pygame.key.get_mods() & KMOD_CTRL):
if (world.recording_enabled):
client.stop_recorder()
world.recording_enabled = False
world.hud.notification("Recorder is OFF")
else:
client.start_recorder("manual_recording.rec", True)
world.recording_enabled = True
world.hud.notification("Recorder is ON")
elif event.key == K_p and (pygame.key.get_mods() & KMOD_CTRL):
# stop recorder
client.stop_recorder()
world.recording_enabled = False
# work around to fix camera at start of replaying
current_index = world.camera_manager.index
world.destroy_sensors()
# disable autopilot
self._autopilot_enabled = False
world.player.set_autopilot(self._autopilot_enabled)
world.hud.notification("Replaying file 'manual_recording.rec'")
# replayer
client.replay_file("manual_recording.rec", world.recording_start, 0, 0)
world.camera_manager.set_sensor(current_index)
elif event.key == K_MINUS and (pygame.key.get_mods() & KMOD_CTRL):
if pygame.key.get_mods() & KMOD_SHIFT:
world.recording_start -= 10
else:
world.recording_start -= 1
world.hud.notification("Recording start time is %d" % (world.recording_start))
elif event.key == K_EQUALS and (pygame.key.get_mods() & KMOD_CTRL):
if pygame.key.get_mods() & KMOD_SHIFT:
world.recording_start += 10
else:
world.recording_start += 1
world.hud.notification("Recording start time is %d" % (world.recording_start))
if isinstance(self._control, carla.VehicleControl):
if event.key == K_q:
self._control.gear = 1 if self._control.reverse else -1
elif event.key == K_m:
self._control.manual_gear_shift = not self._control.manual_gear_shift
self._control.gear = world.player.get_control().gear
world.hud.notification('%s Transmission' %
('Manual' if self._control.manual_gear_shift else 'Automatic'))
elif self._control.manual_gear_shift and event.key == K_COMMA:
self._control.gear = max(-1, self._control.gear - 1)
elif self._control.manual_gear_shift and event.key == K_PERIOD:
self._control.gear = self._control.gear + 1
elif event.key == K_p and not pygame.key.get_mods() & KMOD_CTRL:
self._autopilot_enabled = not self._autopilot_enabled
world.player.set_autopilot(self._autopilot_enabled)
world.hud.notification(
'Autopilot %s' % ('On' if self._autopilot_enabled else 'Off'))
elif event.key == K_l and pygame.key.get_mods() & KMOD_CTRL:
current_lights ^= carla.VehicleLightState.Special1
elif event.key == K_l and pygame.key.get_mods() & KMOD_SHIFT:
current_lights ^= carla.VehicleLightState.HighBeam
elif event.key == K_l:
# Use 'L' key to switch between lights:
# closed -> position -> low beam -> fog
if not self._lights & carla.VehicleLightState.Position:
world.hud.notification("Position lights")
current_lights |= carla.VehicleLightState.Position
else:
world.hud.notification("Low beam lights")
current_lights |= carla.VehicleLightState.LowBeam
if self._lights & carla.VehicleLightState.LowBeam:
world.hud.notification("Fog lights")
current_lights |= carla.VehicleLightState.Fog
if self._lights & carla.VehicleLightState.Fog:
world.hud.notification("Lights off")
current_lights ^= carla.VehicleLightState.Position
current_lights ^= carla.VehicleLightState.LowBeam
current_lights ^= carla.VehicleLightState.Fog
elif event.key == K_i:
current_lights ^= carla.VehicleLightState.Interior
elif event.key == K_z:
current_lights ^= carla.VehicleLightState.LeftBlinker
elif event.key == K_x:
current_lights ^= carla.VehicleLightState.RightBlinker
elif event.key == K_t:
world.hud.notification("Restarting Fake Sensors")
world.close_csv()
world.open_csv()
world.surround_sensor.destroy()
world.surround_sensor2.destroy()
world.surround_sensor = SurroundSensor(
world,
world.surround_sensor.relative_x,
world.surround_sensor.relative_y,
world.surround_sensor.relative_angle,
world.surround_sensor.name
)
world.surround_sensor2 = SurroundSensor(
world,
world.surround_sensor2.relative_x,
world.surround_sensor2.relative_y,
world.surround_sensor2.relative_angle,
world.surround_sensor2.name
)
if not self._autopilot_enabled:
if isinstance(self._control, carla.VehicleControl):
self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())
self._control.reverse = self._control.gear < 0
# Set automatic control-related vehicle lights
if self._control.brake:
current_lights |= carla.VehicleLightState.Brake
else: # Remove the Brake flag
current_lights &= ~carla.VehicleLightState.Brake
if self._control.reverse:
current_lights |= carla.VehicleLightState.Reverse
else: # Remove the Reverse flag
current_lights &= ~carla.VehicleLightState.Reverse
if current_lights != self._lights: # Change the light state only if necessary
self._lights = current_lights
world.player.set_light_state(carla.VehicleLightState(self._lights))
elif isinstance(self._control, carla.WalkerControl):
self._parse_walker_keys(pygame.key.get_pressed(), clock.get_time(), world)
world.player.apply_control(self._control)
def _parse_vehicle_keys(self, keys, milliseconds):
if keys[K_UP] or keys[K_w]:
self._control.throttle = min(self._control.throttle + 0.01, 1)
else:
self._control.throttle = 0.0
if keys[K_DOWN] or keys[K_s]:
self._control.brake = min(self._control.brake + 0.2, 1)
else:
self._control.brake = 0
steer_increment = 5e-4 * milliseconds
if keys[K_LEFT] or keys[K_a]:
if self._steer_cache > 0:
self._steer_cache = 0
else:
self._steer_cache -= steer_increment
elif keys[K_RIGHT] or keys[K_d]:
if self._steer_cache < 0:
self._steer_cache = 0
else:
self._steer_cache += steer_increment
else:
self._steer_cache = 0.0
self._steer_cache = min(0.7, max(-0.7, self._steer_cache))
self._control.steer = round(self._steer_cache, 1)
self._control.hand_brake = keys[K_SPACE]
@staticmethod
def _is_quit_shortcut(key):
return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL)
# ==============================================================================
# -- HUD -----------------------------------------------------------------------
# ==============================================================================
class HUD:
def __init__(self, width, height):
self.dim = (width, height)
font = pygame.font.Font(pygame.font.get_default_font(), 20)
font_name = 'courier' if os.name == 'nt' else 'mono'
fonts = [x for x in pygame.font.get_fonts() if font_name in x]
default_font = 'ubuntumono'
mono = default_font if default_font in fonts else fonts[0]
mono = pygame.font.match_font(mono)
self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)
self._notifications = FadingText(font, (width, 40), (0, height - 40))
self.help = HelpText(pygame.font.Font(mono, 16), width, height)
self.server_fps = 0
self.frame = 0
self.simulation_time = 0
self._show_info = True
self._info_text = []
self._server_clock = pygame.time.Clock()
def on_world_tick(self, timestamp):
self._server_clock.tick()
self.server_fps = self._server_clock.get_fps()
self.frame = timestamp.frame
self.simulation_time = timestamp.elapsed_seconds
def tick(self, world, clock):
if world is None:
return
self._notifications.tick(world, clock)
if not self._show_info:
return
t = world.player.get_transform()
v = world.player.get_velocity()
compass = world.imu_sensor.compass
heading = 'N' if compass > 270.5 or compass < 89.5 else ''
heading += 'S' if 90.5 < compass < 269.5 else ''
heading += 'E' if 0.5 < compass < 179.5 else ''
heading += 'W' if 180.5 < compass < 359.5 else ''
ego_obj = actor_to_object_model(world.player)
self._info_text = [
'Server: % 16.0f FPS' % self.server_fps,
'Client: % 16.0f FPS' % clock.get_fps(),
'',
'Vehicle: % 20s' % get_actor_display_name(world.player, truncate=20),
'',
'Speed: % 15.0f km/h' % (3.6 * math.hypot(v.x, v.y)),
'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (t.location.x, t.location.y)),
u'Compass:% 17.5f\N{DEGREE SIGN} % 2s' % (compass, heading),
'',
'Object Model main attributes:',
' X: % 7.5f m' % (ego_obj.track.state[Track.STATE_X_IDX]),
' Y: % 7.5f m' % (ego_obj.track.state[Track.STATE_Y_IDX]),
' Vx: % 7.5f m/s' % (ego_obj.track.state[Track.STATE_VELOCITY_X_IDX]),
' Vy: % 7.5f m/s' % (ego_obj.track.state[Track.STATE_VELOCITY_Y_IDX]),
' Ax: % 7.5f m/s^2' % (ego_obj.track.state[Track.STATE_ACCELERATION_X_IDX]),
' Ay: % 7.5f m/s^2' % (ego_obj.track.state[Track.STATE_ACCELERATION_Y_IDX]),
' yaw: % 7.5f rad' % (ego_obj.track.state[Track.STATE_YAW_IDX]),
' yaw_rate: % 7.5f rad/s' % (ego_obj.track.state[Track.STATE_YAW_RATE_IDX]),
' length: % 7.5f m' % (ego_obj.dimensions.values[Dimensions.DIMENSIONS_LENGHT_IDX]),
' width: % 7.5f m' % (ego_obj.dimensions.values[Dimensions.DIMENSIONS_WIDTH_IDX]),
''
]
vehicles = world.world.get_actors().filter('vehicle.*')
self._info_text += [
'',
'Number of vehicles: % 8d' % len(vehicles)
]
# nearby_vehicles = world.surround_sensor.read(vehicles)
actors = list(world.world.get_actors().filter('walker.pedestrian.*'))
actors += list(vehicles)
nearby_actors = world.surround_sensor.read(actors)
if nearby_actors:
self._info_text += ['', 'Surrounding actors:']
for distance, actor in nearby_actors:
actor_type = get_actor_display_name(actor, truncate=22)
self._info_text.append(' % 4dm %s' % (distance, actor_type))
def toggle_info(self):
self._show_info = not self._show_info
def notification(self, text, seconds=2.0):
self._notifications.set_text(text, seconds=seconds)
def error(self, text):
self._notifications.set_text('Error: %s' % text, (255, 0, 0))
def render(self, display):
if self._show_info:
info_surface = pygame.Surface((250, self.dim[1]))
info_surface.set_alpha(150)
display.blit(info_surface, (0, 0))
v_offset = 4
bar_h_offset = 100
bar_width = 106
for item in self._info_text:
if v_offset + 18 > self.dim[1]:
break
if isinstance(item, list):
if len(item) > 1:
points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)]
pygame.draw.lines(display, (255, 136, 0), False, points, 2)
item = None
v_offset += 18
elif isinstance(item, tuple):
if isinstance(item[1], bool):
rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6))
pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)
else:
rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6))
pygame.draw.rect(display, (255, 255, 255), rect_border, 1)
f = (item[1] - item[2]) / (item[3] - item[2])
if item[2] < 0.0:
rect = pygame.Rect((bar_h_offset + f * (bar_width - 6), v_offset + 8), (6, 6))
else:
rect = pygame.Rect((bar_h_offset, v_offset + 8), (f * bar_width, 6))
pygame.draw.rect(display, (255, 255, 255), rect)
item = item[0]
if item: # At this point has to be a str.
surface = self._font_mono.render(item, True, (255, 255, 255))
display.blit(surface, (8, v_offset))
v_offset += 18
self._notifications.render(display)
self.help.render(display)
# ==============================================================================
# -- FadingText ----------------------------------------------------------------
# ==============================================================================
class FadingText:
def __init__(self, font, dim, pos):
self.font = font
self.dim = dim
self.pos = pos
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
def set_text(self, text, color=(255, 255, 255), seconds=2.0):
text_texture = self.font.render(text, True, color)
self.surface = pygame.Surface(self.dim)
self.seconds_left = seconds
self.surface.fill((0, 0, 0, 0))
self.surface.blit(text_texture, (10, 11))
def tick(self, _, clock):
delta_seconds = 1e-3 * clock.get_time()
self.seconds_left = max(0.0, self.seconds_left - delta_seconds)
self.surface.set_alpha(500.0 * self.seconds_left)
def render(self, display):
display.blit(self.surface, self.pos)
# ==============================================================================
# -- HelpText ------------------------------------------------------------------
# ==============================================================================
class HelpText:
"""Helper class to handle text output using pygame"""
def __init__(self, font, width, height):
lines = __doc__.split('\n')
self.font = font
self.line_space = 18
self.dim = (780, len(lines) * self.line_space + 12)
self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
self.surface.fill((0, 0, 0, 0))
for n, line in enumerate(lines):
text_texture = self.font.render(line, True, (255, 255, 255))
self.surface.blit(text_texture, (22, n * self.line_space))
self._render = False
self.surface.set_alpha(220)
def toggle(self):
self._render = not self._render
def render(self, display):
if self._render:
display.blit(self.surface, self.pos)
# ==============================================================================
# -- SurroundSensor ----------------------------------------------------------------
# ==============================================================================
class SurroundSensor:
def __init__(self, world, x=0., y=0., angle=0., name="surround_sensor"):
self.name = name
self.world = world
self.node = world.ros_node
self.time_last_measurement = None
self.last_measurement = None
self.relative_x = x
self.relative_y = y
self.relative_angle = angle
self.capable = [True] * Track.STATE_SIZE
# self.measurement_noise_matrix = np.diag(
# [1.5**2, 1.5**2, 1**2, 0.5**2, 0.02**2, 0.1**2
# ]).astype('float32')
self.measurement_noise_matrix = np.diag(
[0, 0, 0, 0, 0, 0]
).astype('float32')
self.publisher = self.node.create_publisher(
ObjectModel,
'objectlevel_fusion/fusion_layer/fusion/submit',
10
)
self.sensor_registration_client = self.node.create_client(
RegisterSensor, 'fusion_layer/register_sensor'
)
while not self.sensor_registration_client.wait_for_service(timeout_sec=5.0):
self.node.get_logger().error(
'Failed to connect for sensor registration, trying again...'
)
self.sensor_remover_client = self.node.create_client(
RemoveSensor, 'fusion_layer/remove_sensor'
)
while not self.sensor_remover_client.wait_for_service(timeout_sec=5.0):
self.node.get_logger().error(
'Failed to connect for sensor removing, trying again...'
)
self._register()
# Nested to use 'self' from the context
def _callback(_):
# Not using WorldSnapshot because it doesn't have type_id information
# and ActorSnapshot doesn't have get_location()
global STARTED_TIME
actors = list(world.world.get_actors().filter('walker.pedestrian.*'))
actors += list(self.world.world.get_actors().filter('vehicle.*'))
nearby_actors = [v for d, v in self.read(actors)]
if len(nearby_actors) == 0:
return
# Discard first seconds of simulation, they're noisy
if STARTED_TIME is None:
STARTED_TIME = datetime.datetime.now()
return
if datetime.datetime.now() - STARTED_TIME < datetime.timedelta(seconds=2):
return
self.publish(nearby_actors)
self._callback_id = self.world.world.on_tick(_callback)
@property
def x(self):
return self.world.player.get_location().x + self.relative_x
@property
def y(self):
return self.world.player.get_location().y + self.relative_y
@property
def angle(self):
return self.world.player.get_transform().rotation.yaw + self.relative_angle
def _register(self):
request = RegisterSensor.Request()
request.name = self.name
request.x = self.relative_x
request.y = self.relative_y
request.angle = self.relative_angle
request.capable = self.capable
request.measurement_noise_matrix = self.measurement_noise_matrix.reshape(-1)
self.sensor_registration_client.call_async(request)
self.node.get_logger().info(f"Sensor {self.name} registered successfully!")
def destroy(self):
self.world.world.remove_on_tick(self._callback_id)
self._unregister()
def _unregister(self):
request = RemoveSensor.Request()
request.name = self.name
return self.sensor_remover_client.call_async(request)
def read(self, actors=None, distance=SURROUND_SENSOR_RANGE):
if self.world is None:
return []
def get_distance(location):
return math.hypot(location.x - self.x, location.y - self.y)
actors = actors or (list(self.world.world.get_actors().filter('vehicle.*'))
+ list(self.world.world.get_actors().filter('walker.pedestrian.*')))
distances = ((get_distance(a.get_location()), a)
for a in actors if a.id != self.world.player.id)
with_distances = sorted([(d, a) for d, a in distances if d <= distance])
self.time_last_measurement = self.node.get_clock().now()
return with_distances
def publish(self, measurement):
ego_obj = actor_to_object_model(self.world.player)
msg = self._measurement_to_msg(measurement, ego_obj)
self.publisher.publish(msg)
self.world.add_info_csv(self, ego_obj, msg)
self.world.add_info_extra_objs_csv()
def get_relative(self, obj: Object) -> Object:
angle = self.relative_angle
cos_angle = np.cos(angle)
sin_angle = np.sin(angle)
transformation = np.array([
[cos_angle, -sin_angle, 0, 0, 0, 0, 0, 0, self.relative_x],
[sin_angle, cos_angle, 0, 0, 0, 0, 0, 0, self.relative_y],
[0, 0, cos_angle, -sin_angle, 0, 0, 0, 0, 0],
[0, 0, sin_angle, cos_angle, 0, 0, 0, 0, 0],
[0, 0, 0, 0, cos_angle, -sin_angle, 0, 0, 0],
[0, 0, 0, 0, sin_angle, cos_angle, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, self.relative_angle],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1],
], dtype='float32')
result = deepcopy(obj)
to_align = np.hstack((obj.track.state, 1)).T
transformation = np.linalg.inv(transformation)
product = transformation @ to_align
result.track.state = np.delete(product, -1, 0).astype('float32')
result.track.state[Track.STATE_YAW_IDX] = wrap_angle(result.track.state[Track.STATE_YAW_IDX])
return result
def _measurement_to_msg(self, measurement, ego_obj):
msg = ObjectModel()
msg.header.frame_id = self.name
msg.header.stamp = self.time_last_measurement.to_msg()
relative_to_ego = partial(get_relative_obj, ego_obj)
object_model = map(actor_to_object_model, measurement)
object_model = map(relative_to_ego, object_model)
object_model = map(self.get_relative, object_model)
msg.object_model = list(object_model)
return msg
# ==============================================================================
# -- CameraManager -------------------------------------------------------------
# ==============================================================================
class CameraManager:
def __init__(self, parent_actor, hud, gamma_correction):
self.sensor = None
self.surface = None
self._parent = parent_actor
self.hud = hud
self.recording = False
bound_y = 0.5 + self._parent.bounding_box.extent.y
Attachment = carla.AttachmentType
self._camera_transforms = [
(carla.Transform(carla.Location(x=-5.5, z=2.5), carla.Rotation(pitch=8.0)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=1.6, z=1.7)), Attachment.Rigid),
(carla.Transform(carla.Location(x=5.5, y=1.5, z=1.5)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=-8.0, z=6.0), carla.Rotation(pitch=6.0)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=-1, y=-bound_y, z=0.5)), Attachment.Rigid)]
self.transform_index = 1
world = self._parent.get_world()
bp_library = world.get_blueprint_library()
self.sensors = [['sensor.camera.rgb', cc.Raw, 'Camera RGB', {}]]
for item in self.sensors:
bp = bp_library.find(item[0])
if item[0].startswith('sensor.camera'):
bp.set_attribute('image_size_x', str(hud.dim[0]))
bp.set_attribute('image_size_y', str(hud.dim[1]))
if bp.has_attribute('gamma'):
bp.set_attribute('gamma', str(gamma_correction))
for attr_name, attr_value in item[3].items():
bp.set_attribute(attr_name, attr_value)
item.append(bp)
self.index = None
def toggle_camera(self):
self.transform_index = (self.transform_index + 1) % len(self._camera_transforms)
self.set_sensor(self.index, notify=False, force_respawn=True)
def set_sensor(self, index, notify=True, force_respawn=False):
index = index % len(self.sensors)
needs_respawn = True if self.index is None else \
(force_respawn or (self.sensors[index][2] != self.sensors[self.index][2]))
if needs_respawn:
if self.sensor is not None:
self.sensor.destroy()
self.surface = None
self.sensor = self._parent.get_world().spawn_actor(
self.sensors[index][-1],
self._camera_transforms[self.transform_index][0],
attach_to=self._parent,
attachment_type=self._camera_transforms[self.transform_index][1])
# We need to pass the lambda a weak reference to self to avoid
# circular reference.
weak_self = weakref.ref(self)
self.sensor.listen(lambda image: CameraManager._parse_image(weak_self, image))
if notify:
self.hud.notification(self.sensors[index][2])
self.index = index
def next_sensor(self):
self.set_sensor(self.index + 1)
def toggle_recording(self):
self.recording = not self.recording
self.hud.notification('Recording %s' % ('On' if self.recording else 'Off'))
def render(self, display):
if self.surface is not None:
display.blit(self.surface, (0, 0))
@staticmethod
def _parse_image(weak_self, image):
self = weak_self()
if not self:
return
if self.sensors[self.index][0].startswith('sensor.camera.dvs'):
# Example of converting the raw_data from a carla.DVSEventArray
# sensor into a NumPy array and using it as an image
dvs_events = np.frombuffer(image.raw_data, dtype=np.dtype([
('x', np.uint16), ('y', np.uint16), ('t', np.int64), ('pol', np.bool)]))
dvs_img = np.zeros((image.height, image.width, 3), dtype=np.uint8)
# Blue is positive, red is negative
dvs_img[dvs_events[:]['y'], dvs_events[:]['x'], dvs_events[:]['pol'] * 2] = 255
self.surface = pygame.surfarray.make_surface(dvs_img.swapaxes(0, 1))
else:
image.convert(self.sensors[self.index][1])
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if self.recording:
image.save_to_disk('_out/%08d' % image.frame)
# ==============================================================================
# -- IMUSensor -----------------------------------------------------------------
# ==============================================================================
class IMUSensor:
def __init__(self, parent_actor):
self.sensor = None
self._parent = parent_actor
self.accelerometer = (0.0, 0.0, 0.0)
self.gyroscope = (0.0, 0.0, 0.0)
self.compass = 0.0
world = self._parent.get_world()
bp = world.get_blueprint_library().find('sensor.other.imu')
self.sensor = world.spawn_actor(
bp, carla.Transform(), attach_to=self._parent)
# We need to pass the lambda a weak reference to self to avoid circular
# reference.
weak_self = weakref.ref(self)
self.sensor.listen(
lambda sensor_data: IMUSensor._IMU_callback(weak_self, sensor_data))
@staticmethod
def _IMU_callback(weak_self, sensor_data):
self = weak_self()
if not self:
return
limits = (-99.9, 99.9)
self.accelerometer = (
max(limits[0], min(limits[1], sensor_data.accelerometer.x)),
max(limits[0], min(limits[1], sensor_data.accelerometer.y)),
max(limits[0], min(limits[1], sensor_data.accelerometer.z)))
self.gyroscope = (
max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.x))),
max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.y))),
max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.z))))
self.compass = math.degrees(sensor_data.compass)
# ==============================================================================
# -- game_loop() ---------------------------------------------------------------
# ==============================================================================
def game_loop(args):
rclpy.init()
pygame.init()
pygame.font.init()
world = None
ros_node = Node('manual_control_node')
ros_executor = rclpy.executors.MultiThreadedExecutor()
ros_executor.add_node(ros_node)
ros_thread = Thread(target=ros_executor.spin)
ros_thread.start()
try:
client = carla.Client(args.host, args.port)
client.set_timeout(2.0)
display = pygame.display.set_mode(
(args.width, args.height),
pygame.HWSURFACE | pygame.DOUBLEBUF)
display.fill((0,0,0))
pygame.display.flip()
hud = HUD(args.width, args.height)
world = World(client.get_world(), hud, ros_node, args)
controller = KeyboardControl(world, args.autopilot)
clock = pygame.time.Clock()
while True:
clock.tick_busy_loop(60)
if controller.parse_events(client, world, clock):
return
world.tick(clock)
world.render(display)
pygame.display.flip()
finally:
print("Destroying things...")
if (world and world.recording_enabled):
client.stop_recorder()
if world is not None:
world.surround_sensor.destroy()
world.surround_sensor2.destroy()
ros_node.destroy_node()
rclpy.shutdown()
world.destroy()
world = None
else:
ros_node.destroy_node()
rclpy.shutdown()
pygame.quit()
ros_thread.join()
# ==============================================================================
# -- main() --------------------------------------------------------------------
# ==============================================================================
def main():
argparser = argparse.ArgumentParser(
description='CARLA Manual Control Client')
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--host',
metavar='H',
default='127.0.0.1',
help='IP of the host server (default: 127.0.0.1)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-a', '--autopilot',
action='store_true',
help='enable autopilot')
argparser.add_argument(
'--res',
metavar='WIDTHxHEIGHT',
default='1280x720',
help='window resolution (default: 1280x720)')
argparser.add_argument(
'--filter',
metavar='PATTERN',
default='vehicle.tesla.model3',
help='actor filter (default: "vehicle.*")')
argparser.add_argument(
'--rolename',
metavar='NAME',
default='ego_vehicle',
help='actor role name (default: "ego_vehicle")')
argparser.add_argument(
'--gamma',
default=2.2,
type=float,
help='Gamma correction of the camera (default: 2.2)')
args = argparser.parse_args()
args.width, args.height = [int(x) for x in args.res.split('x')]
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('listening to server %s:%s', args.host, args.port)
print(__doc__)
try:
game_loop(args)
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
if __name__ == '__main__':
main()
| [
"rclpy.executors.MultiThreadedExecutor",
"pygame.init",
"pygame.quit",
"re.compile",
"numpy.hstack",
"pygame.key.get_mods",
"object_model_msgs.msg.ObjectModel",
"numpy.array",
"copy.deepcopy",
"numpy.sin",
"rclpy.init",
"pygame.font.Font",
"math.hypot",
"logging.info",
"pygame.font.get_d... | [((4593, 4660), 're.compile', 're.compile', (['""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)"""'], {}), "('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')\n", (4603, 4660), False, 'import re\n'), ((5310, 5323), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (5316, 5323), True, 'import numpy as np\n'), ((5340, 5353), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (5346, 5353), True, 'import numpy as np\n'), ((5380, 5732), 'numpy.array', 'np.array', (['[[cos_angle, -sin_angle, 0, 0, 0, 0, 0, 0], [sin_angle, cos_angle, 0, 0, 0,\n 0, 0, 0], [0, 0, cos_angle, -sin_angle, 0, 0, 0, 0], [0, 0, sin_angle,\n cos_angle, 0, 0, 0, 0], [0, 0, 0, 0, cos_angle, -sin_angle, 0, 0], [0, \n 0, 0, 0, sin_angle, cos_angle, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, \n 0, 0, 0, 0, 0, 1]]'], {'dtype': '"""float64"""'}), "([[cos_angle, -sin_angle, 0, 0, 0, 0, 0, 0], [sin_angle, cos_angle,\n 0, 0, 0, 0, 0, 0], [0, 0, cos_angle, -sin_angle, 0, 0, 0, 0], [0, 0,\n sin_angle, cos_angle, 0, 0, 0, 0], [0, 0, 0, 0, cos_angle, -sin_angle, \n 0, 0], [0, 0, 0, 0, sin_angle, cos_angle, 0, 0], [0, 0, 0, 0, 0, 0, 1, \n 0], [0, 0, 0, 0, 0, 0, 0, 1]], dtype='float64')\n", (5388, 5732), True, 'import numpy as np\n'), ((5917, 6696), 'numpy.array', 'np.array', (['[[cos_angle, -sin_angle, 0, 0, 0, 0, 0, 0, -rotated_reference[Track.\n STATE_X_IDX]], [-sin_angle, -cos_angle, 0, 0, 0, 0, 0, 0,\n rotated_reference[Track.STATE_Y_IDX]], [0, 0, cos_angle, -sin_angle, 0,\n 0, 0, 0, -rotated_reference[Track.STATE_VELOCITY_X_IDX]], [0, 0, -\n sin_angle, -cos_angle, 0, 0, 0, 0, rotated_reference[Track.\n STATE_VELOCITY_Y_IDX]], [0, 0, 0, 0, cos_angle, -sin_angle, 0, 0, -\n rotated_reference[Track.STATE_ACCELERATION_X_IDX]], [0, 0, 0, 0, -\n sin_angle, -cos_angle, 0, 0, rotated_reference[Track.\n STATE_ACCELERATION_Y_IDX]], [0, 0, 0, 0, 0, 0, 1, 0, -rotated_reference\n [Track.STATE_YAW_IDX]], [0, 0, 0, 0, 0, 0, 0, 1, -rotated_reference[\n Track.STATE_YAW_RATE_IDX]], [0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {'dtype': '"""float64"""'}), "([[cos_angle, -sin_angle, 0, 0, 0, 0, 0, 0, -rotated_reference[\n Track.STATE_X_IDX]], [-sin_angle, -cos_angle, 0, 0, 0, 0, 0, 0,\n rotated_reference[Track.STATE_Y_IDX]], [0, 0, cos_angle, -sin_angle, 0,\n 0, 0, 0, -rotated_reference[Track.STATE_VELOCITY_X_IDX]], [0, 0, -\n sin_angle, -cos_angle, 0, 0, 0, 0, rotated_reference[Track.\n STATE_VELOCITY_Y_IDX]], [0, 0, 0, 0, cos_angle, -sin_angle, 0, 0, -\n rotated_reference[Track.STATE_ACCELERATION_X_IDX]], [0, 0, 0, 0, -\n sin_angle, -cos_angle, 0, 0, rotated_reference[Track.\n STATE_ACCELERATION_Y_IDX]], [0, 0, 0, 0, 0, 0, 1, 0, -rotated_reference\n [Track.STATE_YAW_IDX]], [0, 0, 0, 0, 0, 0, 0, 1, -rotated_reference[\n Track.STATE_YAW_RATE_IDX]], [0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype='float64')\n", (5925, 6696), True, 'import numpy as np\n'), ((6953, 6966), 'copy.deepcopy', 'deepcopy', (['obj'], {}), '(obj)\n', (6961, 6966), False, 'from copy import deepcopy\n'), ((7471, 7479), 'object_model_msgs.msg.Object', 'Object', ([], {}), '()\n', (7477, 7479), False, 'from object_model_msgs.msg import ObjectModel, Object, Track, Dimensions\n'), ((50958, 50970), 'rclpy.init', 'rclpy.init', ([], {}), '()\n', (50968, 50970), False, 'import rclpy\n'), ((50976, 50989), 'pygame.init', 'pygame.init', ([], {}), '()\n', (50987, 50989), False, 'import pygame\n'), ((50994, 51012), 'pygame.font.init', 'pygame.font.init', ([], {}), '()\n', (51010, 51012), False, 'import pygame\n'), ((51046, 51073), 'rclpy.node.Node', 'Node', (['"""manual_control_node"""'], {}), "('manual_control_node')\n", (51050, 51073), False, 'from rclpy.node import Node\n'), ((51093, 51132), 'rclpy.executors.MultiThreadedExecutor', 'rclpy.executors.MultiThreadedExecutor', ([], {}), '()\n', (51130, 51132), False, 'import rclpy\n'), ((51187, 51219), 'threading.Thread', 'Thread', ([], {'target': 'ros_executor.spin'}), '(target=ros_executor.spin)\n', (51193, 51219), False, 'from threading import Thread\n'), ((52753, 52819), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CARLA Manual Control Client"""'}), "(description='CARLA Manual Control Client')\n", (52776, 52819), False, 'import argparse\n'), ((54224, 54297), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s: %(message)s"""', 'level': 'log_level'}), "(format='%(levelname)s: %(message)s', level=log_level)\n", (54243, 54297), False, 'import logging\n'), ((54303, 54366), 'logging.info', 'logging.info', (['"""listening to server %s:%s"""', 'args.host', 'args.port'], {}), "('listening to server %s:%s', args.host, args.port)\n", (54315, 54366), False, 'import logging\n'), ((1707, 1733), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1723, 1733), False, 'import os\n'), ((1815, 1860), 'os.path.join', 'os.path.join', (['this_script_path', 'egg_file_path'], {}), '(this_script_path, egg_file_path)\n', (1827, 1860), False, 'import os\n'), ((6797, 6828), 'numpy.hstack', 'np.hstack', (['(obj.track.state, 1)'], {}), '((obj.track.state, 1))\n', (6806, 6828), True, 'import numpy as np\n'), ((7851, 7872), 'math.radians', 'math.radians', (['rot.yaw'], {}), '(rot.yaw)\n', (7863, 7872), False, 'import math\n'), ((7922, 7945), 'math.radians', 'math.radians', (['ang_vel.z'], {}), '(ang_vel.z)\n', (7934, 7945), False, 'import math\n'), ((17956, 17974), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (17972, 17974), False, 'import pygame\n'), ((29826, 29854), 'pygame.font.match_font', 'pygame.font.match_font', (['mono'], {}), '(mono)\n', (29848, 29854), False, 'import pygame\n'), ((29881, 29934), 'pygame.font.Font', 'pygame.font.Font', (['mono', "(12 if os.name == 'nt' else 14)"], {}), "(mono, 12 if os.name == 'nt' else 14)\n", (29897, 29934), False, 'import pygame\n'), ((30258, 30277), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (30275, 30277), False, 'import pygame\n'), ((35952, 35976), 'pygame.Surface', 'pygame.Surface', (['self.dim'], {}), '(self.dim)\n', (35966, 35976), False, 'import pygame\n'), ((36126, 36150), 'pygame.Surface', 'pygame.Surface', (['self.dim'], {}), '(self.dim)\n', (36140, 36150), False, 'import pygame\n'), ((37219, 37243), 'pygame.Surface', 'pygame.Surface', (['self.dim'], {}), '(self.dim)\n', (37233, 37243), False, 'import pygame\n'), ((40803, 40827), 'fusion_layer.srv.RegisterSensor.Request', 'RegisterSensor.Request', ([], {}), '()\n', (40825, 40827), False, 'from fusion_layer.srv import RegisterSensor, RemoveSensor\n'), ((41403, 41425), 'fusion_layer.srv.RemoveSensor.Request', 'RemoveSensor.Request', ([], {}), '()\n', (41423, 41425), False, 'from fusion_layer.srv import RegisterSensor, RemoveSensor\n'), ((42640, 42653), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (42646, 42653), True, 'import numpy as np\n'), ((42674, 42687), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (42680, 42687), True, 'import numpy as np\n'), ((42714, 43175), 'numpy.array', 'np.array', (['[[cos_angle, -sin_angle, 0, 0, 0, 0, 0, 0, self.relative_x], [sin_angle,\n cos_angle, 0, 0, 0, 0, 0, 0, self.relative_y], [0, 0, cos_angle, -\n sin_angle, 0, 0, 0, 0, 0], [0, 0, sin_angle, cos_angle, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, cos_angle, -sin_angle, 0, 0, 0], [0, 0, 0, 0, sin_angle,\n cos_angle, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, self.relative_angle], [0,\n 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]]'], {'dtype': '"""float32"""'}), "([[cos_angle, -sin_angle, 0, 0, 0, 0, 0, 0, self.relative_x], [\n sin_angle, cos_angle, 0, 0, 0, 0, 0, 0, self.relative_y], [0, 0,\n cos_angle, -sin_angle, 0, 0, 0, 0, 0], [0, 0, sin_angle, cos_angle, 0, \n 0, 0, 0, 0], [0, 0, 0, 0, cos_angle, -sin_angle, 0, 0, 0], [0, 0, 0, 0,\n sin_angle, cos_angle, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, self.\n relative_angle], [0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, \n 1]], dtype='float32')\n", (42722, 43175), True, 'import numpy as np\n'), ((43473, 43486), 'copy.deepcopy', 'deepcopy', (['obj'], {}), '(obj)\n', (43481, 43486), False, 'from copy import deepcopy\n'), ((43566, 43595), 'numpy.linalg.inv', 'np.linalg.inv', (['transformation'], {}), '(transformation)\n', (43579, 43595), True, 'import numpy as np\n'), ((43911, 43924), 'object_model_msgs.msg.ObjectModel', 'ObjectModel', ([], {}), '()\n', (43922, 43924), False, 'from object_model_msgs.msg import ObjectModel, Object, Track, Dimensions\n'), ((44055, 44089), 'functools.partial', 'partial', (['get_relative_obj', 'ego_obj'], {}), '(get_relative_obj, ego_obj)\n', (44062, 44089), False, 'from functools import partial\n'), ((49813, 49830), 'weakref.ref', 'weakref.ref', (['self'], {}), '(self)\n', (49824, 49830), False, 'import weakref\n'), ((50653, 50686), 'math.degrees', 'math.degrees', (['sensor_data.compass'], {}), '(sensor_data.compass)\n', (50665, 50686), False, 'import math\n'), ((51270, 51304), 'carla.Client', 'carla.Client', (['args.host', 'args.port'], {}), '(args.host, args.port)\n', (51282, 51304), False, 'import carla\n'), ((51356, 51447), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(args.width, args.height)', '(pygame.HWSURFACE | pygame.DOUBLEBUF)'], {}), '((args.width, args.height), pygame.HWSURFACE |\n pygame.DOUBLEBUF)\n', (51379, 51447), False, 'import pygame\n'), ((51507, 51528), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (51526, 51528), False, 'import pygame\n'), ((51713, 51732), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (51730, 51732), False, 'import pygame\n'), ((52437, 52450), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (52448, 52450), False, 'import pygame\n'), ((4786, 4808), 're.match', 're.match', (['"""[A-Z].+"""', 'x'], {}), "('[A-Z].+', x)\n", (4794, 4808), False, 'import re\n'), ((6895, 6920), 'numpy.delete', 'np.delete', (['product', '(-1)', '(0)'], {}), '(product, -1, 0)\n', (6904, 6920), True, 'import numpy as np\n'), ((11143, 11217), 'math.hypot', 'math.hypot', (['(location.x - player_location.x)', '(location.y - player_location.y)'], {}), '(location.x - player_location.x, location.y - player_location.y)\n', (11153, 11217), False, 'import math\n'), ((17184, 17206), 'carla.VehicleControl', 'carla.VehicleControl', ([], {}), '()\n', (17204, 17206), False, 'import carla\n'), ((29540, 29570), 'pygame.font.get_default_font', 'pygame.font.get_default_font', ([], {}), '()\n', (29568, 29570), False, 'import pygame\n'), ((30042, 30068), 'pygame.font.Font', 'pygame.font.Font', (['mono', '(16)'], {}), '(mono, 16)\n', (30058, 30068), False, 'import pygame\n'), ((33631, 33665), 'pygame.Surface', 'pygame.Surface', (['(250, self.dim[1])'], {}), '((250, self.dim[1]))\n', (33645, 33665), False, 'import pygame\n'), ((41698, 41750), 'math.hypot', 'math.hypot', (['(location.x - self.x)', '(location.y - self.y)'], {}), '(location.x - self.x, location.y - self.y)\n', (41708, 41750), False, 'import math\n'), ((43507, 43538), 'numpy.hstack', 'np.hstack', (['(obj.track.state, 1)'], {}), '((obj.track.state, 1))\n', (43516, 43538), True, 'import numpy as np\n'), ((47195, 47212), 'weakref.ref', 'weakref.ref', (['self'], {}), '(self)\n', (47206, 47212), False, 'import weakref\n'), ((48275, 48331), 'numpy.zeros', 'np.zeros', (['(image.height, image.width, 3)'], {'dtype': 'np.uint8'}), '((image.height, image.width, 3), dtype=np.uint8)\n', (48283, 48331), True, 'import numpy as np\n'), ((48717, 48766), 'numpy.reshape', 'np.reshape', (['array', '(image.height, image.width, 4)'], {}), '(array, (image.height, image.width, 4))\n', (48727, 48766), True, 'import numpy as np\n'), ((49649, 49666), 'carla.Transform', 'carla.Transform', ([], {}), '()\n', (49664, 49666), False, 'import carla\n'), ((51951, 51972), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (51970, 51972), False, 'import pygame\n'), ((52278, 52294), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (52292, 52294), False, 'import rclpy\n'), ((52411, 52427), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (52425, 52427), False, 'import rclpy\n'), ((9042, 9053), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9050, 9053), False, 'import sys\n'), ((10643, 10675), 'numpy.round', 'np.round', (['ego_obj.track.state', '(5)'], {}), '(ego_obj.track.state, 5)\n', (10651, 10675), True, 'import numpy as np\n'), ((13815, 13826), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (13823, 13826), False, 'import sys\n'), ((13908, 13935), 'random.choice', 'random.choice', (['spawn_points'], {}), '(spawn_points)\n', (13921, 13935), False, 'import random\n'), ((13957, 13974), 'carla.Transform', 'carla.Transform', ([], {}), '()\n', (13972, 13974), False, 'import carla\n'), ((14035, 14063), 'carla.Location', 'carla.Location', (['(-149)', '(-80)', '(2)'], {}), '(-149, -80, 2)\n', (14049, 14063), False, 'import carla\n'), ((17463, 17484), 'carla.WalkerControl', 'carla.WalkerControl', ([], {}), '()\n', (17482, 17484), False, 'import carla\n'), ((29665, 29688), 'pygame.font.get_fonts', 'pygame.font.get_fonts', ([], {}), '()\n', (29686, 29688), False, 'import pygame\n'), ((38551, 38578), 'numpy.diag', 'np.diag', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (38558, 38578), True, 'import numpy as np\n'), ((40176, 40199), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (40197, 40199), False, 'import datetime\n'), ((40279, 40308), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(2)'}), '(seconds=2)\n', (40297, 40308), False, 'import datetime\n'), ((43670, 43695), 'numpy.delete', 'np.delete', (['product', '(-1)', '(0)'], {}), '(product, -1, 0)\n', (43679, 43695), True, 'import numpy as np\n'), ((14090, 14112), 'carla.Rotation', 'carla.Rotation', ([], {'yaw': '(90)'}), '(yaw=90)\n', (14104, 14112), False, 'import carla\n'), ((26949, 26973), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (26971, 26973), False, 'import pygame\n'), ((29141, 29162), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (29160, 29162), False, 'import pygame\n'), ((31396, 31416), 'math.hypot', 'math.hypot', (['v.x', 'v.y'], {}), '(v.x, v.y)\n', (31406, 31416), False, 'import math\n'), ((40238, 40261), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (40259, 40261), False, 'import datetime\n'), ((44979, 45008), 'carla.Location', 'carla.Location', ([], {'x': '(-5.5)', 'z': '(2.5)'}), '(x=-5.5, z=2.5)\n', (44993, 45008), False, 'import carla\n'), ((45010, 45035), 'carla.Rotation', 'carla.Rotation', ([], {'pitch': '(8.0)'}), '(pitch=8.0)\n', (45024, 45035), False, 'import carla\n'), ((45090, 45118), 'carla.Location', 'carla.Location', ([], {'x': '(1.6)', 'z': '(1.7)'}), '(x=1.6, z=1.7)\n', (45104, 45118), False, 'import carla\n'), ((45169, 45204), 'carla.Location', 'carla.Location', ([], {'x': '(5.5)', 'y': '(1.5)', 'z': '(1.5)'}), '(x=5.5, y=1.5, z=1.5)\n', (45183, 45204), False, 'import carla\n'), ((45259, 45288), 'carla.Location', 'carla.Location', ([], {'x': '(-8.0)', 'z': '(6.0)'}), '(x=-8.0, z=6.0)\n', (45273, 45288), False, 'import carla\n'), ((45290, 45315), 'carla.Rotation', 'carla.Rotation', ([], {'pitch': '(6.0)'}), '(pitch=6.0)\n', (45304, 45315), False, 'import carla\n'), ((45370, 45409), 'carla.Location', 'carla.Location', ([], {'x': '(-1)', 'y': '(-bound_y)', 'z': '(0.5)'}), '(x=-1, y=-bound_y, z=0.5)\n', (45384, 45409), False, 'import carla\n'), ((48153, 48239), 'numpy.dtype', 'np.dtype', (["[('x', np.uint16), ('y', np.uint16), ('t', np.int64), ('pol', np.bool)]"], {}), "([('x', np.uint16), ('y', np.uint16), ('t', np.int64), ('pol', np.\n bool)])\n", (48161, 48239), True, 'import numpy as np\n'), ((48678, 48695), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (48686, 48695), True, 'import numpy as np\n'), ((50423, 50460), 'math.degrees', 'math.degrees', (['sensor_data.gyroscope.x'], {}), '(sensor_data.gyroscope.x)\n', (50435, 50460), False, 'import math\n'), ((50506, 50543), 'math.degrees', 'math.degrees', (['sensor_data.gyroscope.y'], {}), '(sensor_data.gyroscope.y)\n', (50518, 50543), False, 'import math\n'), ((50589, 50626), 'math.degrees', 'math.degrees', (['sensor_data.gyroscope.z'], {}), '(sensor_data.gyroscope.z)\n', (50601, 50626), False, 'import math\n'), ((27766, 27803), 'carla.VehicleLightState', 'carla.VehicleLightState', (['self._lights'], {}), '(self._lights)\n', (27789, 27803), False, 'import carla\n'), ((27910, 27934), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (27932, 27934), False, 'import pygame\n'), ((34159, 34218), 'pygame.draw.lines', 'pygame.draw.lines', (['display', '(255, 136, 0)', '(False)', 'points', '(2)'], {}), '(display, (255, 136, 0), False, points, 2)\n', (34176, 34218), False, 'import pygame\n'), ((34413, 34462), 'pygame.Rect', 'pygame.Rect', (['(bar_h_offset, v_offset + 8)', '(6, 6)'], {}), '((bar_h_offset, v_offset + 8), (6, 6))\n', (34424, 34462), False, 'import pygame\n'), ((34487, 34556), 'pygame.draw.rect', 'pygame.draw.rect', (['display', '(255, 255, 255)', 'rect', '(0 if item[1] else 1)'], {}), '(display, (255, 255, 255), rect, 0 if item[1] else 1)\n', (34503, 34556), False, 'import pygame\n'), ((34621, 34678), 'pygame.Rect', 'pygame.Rect', (['(bar_h_offset, v_offset + 8)', '(bar_width, 6)'], {}), '((bar_h_offset, v_offset + 8), (bar_width, 6))\n', (34632, 34678), False, 'import pygame\n'), ((34703, 34761), 'pygame.draw.rect', 'pygame.draw.rect', (['display', '(255, 255, 255)', 'rect_border', '(1)'], {}), '(display, (255, 255, 255), rect_border, 1)\n', (34719, 34761), False, 'import pygame\n'), ((35132, 35180), 'pygame.draw.rect', 'pygame.draw.rect', (['display', '(255, 255, 255)', 'rect'], {}), '(display, (255, 255, 255), rect)\n', (35148, 35180), False, 'import pygame\n'), ((34909, 34980), 'pygame.Rect', 'pygame.Rect', (['(bar_h_offset + f * (bar_width - 6), v_offset + 8)', '(6, 6)'], {}), '((bar_h_offset + f * (bar_width - 6), v_offset + 8), (6, 6))\n', (34920, 34980), False, 'import pygame\n'), ((35046, 35107), 'pygame.Rect', 'pygame.Rect', (['(bar_h_offset, v_offset + 8)', '(f * bar_width, 6)'], {}), '((bar_h_offset, v_offset + 8), (f * bar_width, 6))\n', (35057, 35107), False, 'import pygame\n'), ((18619, 18640), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (18638, 18640), False, 'import pygame\n'), ((18834, 18855), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (18853, 18855), False, 'import pygame\n'), ((23579, 23600), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (23598, 23600), False, 'import pygame\n'), ((23953, 23974), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (23972, 23974), False, 'import pygame\n'), ((24109, 24130), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (24128, 24130), False, 'import pygame\n'), ((19073, 19094), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (19092, 19094), False, 'import pygame\n'), ((19294, 19315), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (19313, 19315), False, 'import pygame\n'), ((19702, 19723), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (19721, 19723), False, 'import pygame\n'), ((20093, 20117), 'carla.Vector3D', 'carla.Vector3D', (['(17)', '(0)', '(0)'], {}), '(17, 0, 0)\n', (20107, 20117), False, 'import carla\n'), ((20453, 20474), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (20472, 20474), False, 'import pygame\n'), ((20592, 20613), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (20611, 20613), False, 'import pygame\n'), ((21112, 21133), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (21131, 21133), False, 'import pygame\n'), ((21941, 21962), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (21960, 21962), False, 'import pygame\n'), ((22000, 22021), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (22019, 22021), False, 'import pygame\n'), ((22312, 22333), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (22331, 22333), False, 'import pygame\n'), ((22371, 22392), 'pygame.key.get_mods', 'pygame.key.get_mods', ([], {}), '()\n', (22390, 22392), False, 'import pygame\n')] |
'''
Description:
Author: HCQ
Company(School): UCAS
Email: <EMAIL>
Date: 2021-05-23 18:53:02
LastEditTime: 2021-05-23 21:25:10
FilePath: /sklearn/PointCloud_Classification_using_ML-master/Training/train_randomforest.py
'''
# encoding=utf-8
#######################
# train random forest #
#######################
from sklearn.ensemble import RandomForestClassifier
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import fbeta_score, make_scorer
# load data for training
feature_matrix = np.loadtxt('/home/hcq/data/KITTI_Detection/cutObject/train.txt')
print('the shape of the loaded feature matrix is ', feature_matrix.shape)
data = feature_matrix[:, :-1]
target = feature_matrix[:, -1]
# grid search for tuning hyperparameters
# coarse tune
# params = {
# 'max_depth':[6, 8, 10, 12, 15, 18, 20],
# 'n_estimators':[10, 20, 30, 40, 50, 60, 70],
# 'max_features':[15, 20, 25, 30, 35, 40]
# }
# fine tune
params = {
'max_depth':[10, 11, 12, 13, 14, 15], # optimal 12
'n_estimators':[54, 55, 56, 57, 58], # optimal 56
'max_features':[18, 19, 20, 21, 22] # optimal 20
}
rfc = RandomForestClassifier(
# max_depth=10,
random_state=0,
# n_estimators=10,
# max_features=30,
oob_score=True,
bootstrap=True,
class_weight='balanced'
)
fone_scorer = make_scorer(fbeta_score, beta=1, average='weighted')
clf = GridSearchCV (
rfc,
params,
scoring=fone_scorer,
n_jobs=4,
cv=5,
iid=True,
refit=True
)
clf.fit(data, target) # 训练数据
# print important info
# print(rfc.feature_importances_)
# print(rfc.oob_score_)
print('clf.cv_results_', clf.cv_results_)
print('clf.best_params_', clf.best_params_)
print('clf.best_estimator_', clf.best_estimator_)
print('clf.grid_scores_', clf.grid_scores_)
print('best score', clf.grid_scores_[clf.best_index_])
# save the trained model
from sklearn.externals import joblib
joblib.dump(clf, '/home/hcq/python/sklearn/PointCloud_Classification_using_ML-master/Training/rf20210523.pkl') # 保存模型参数 | [
"sklearn.model_selection.GridSearchCV",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.metrics.make_scorer",
"sklearn.externals.joblib.dump",
"numpy.loadtxt"
] | [((531, 595), 'numpy.loadtxt', 'np.loadtxt', (['"""/home/hcq/data/KITTI_Detection/cutObject/train.txt"""'], {}), "('/home/hcq/data/KITTI_Detection/cutObject/train.txt')\n", (541, 595), True, 'import numpy as np\n'), ((1209, 1308), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(0)', 'oob_score': '(True)', 'bootstrap': '(True)', 'class_weight': '"""balanced"""'}), "(random_state=0, oob_score=True, bootstrap=True,\n class_weight='balanced')\n", (1231, 1308), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1633, 1685), 'sklearn.metrics.make_scorer', 'make_scorer', (['fbeta_score'], {'beta': '(1)', 'average': '"""weighted"""'}), "(fbeta_score, beta=1, average='weighted')\n", (1644, 1685), False, 'from sklearn.metrics import fbeta_score, make_scorer\n'), ((1692, 1780), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['rfc', 'params'], {'scoring': 'fone_scorer', 'n_jobs': '(4)', 'cv': '(5)', 'iid': '(True)', 'refit': '(True)'}), '(rfc, params, scoring=fone_scorer, n_jobs=4, cv=5, iid=True,\n refit=True)\n', (1704, 1780), False, 'from sklearn.model_selection import GridSearchCV\n'), ((2378, 2497), 'sklearn.externals.joblib.dump', 'joblib.dump', (['clf', '"""/home/hcq/python/sklearn/PointCloud_Classification_using_ML-master/Training/rf20210523.pkl"""'], {}), "(clf,\n '/home/hcq/python/sklearn/PointCloud_Classification_using_ML-master/Training/rf20210523.pkl'\n )\n", (2389, 2497), False, 'from sklearn.externals import joblib\n')] |
"""
Geographic coordinate conversion.
"""
import numpy as np
from . import get_ellipsoid
def geodetic_to_spherical(longitude, latitude, height):
"""
Convert from geodetic to geocentric spherical coordinates.
The geodetic datum is defined by the default :class:`harmonica.ReferenceEllipsoid`
set by the :func:`harmonica.set_ellipsoid` function.
The coordinates are converted following [Vermeille2002]_.
Parameters
----------
longitude : array
Longitude coordinates on geodetic coordinate system in degrees.
latitude : array
Latitude coordinates on geodetic coordinate system in degrees.
height : array
Ellipsoidal heights in meters.
Returns
-------
longitude : array
Longitude coordinates on geocentric spherical coordinate system in degrees.
The longitude coordinates are not modified during this conversion.
spherical_latitude : array
Converted latitude coordinates on geocentric spherical coordinate system in
degrees.
radius : array
Converted spherical radius coordinates in meters.
See also
--------
spherical_to_geodetic : Convert from geocentric spherical to geodetic coordinates.
Examples
--------
In the poles, the radius should be the reference ellipsoid's semi-minor axis:
>>> import harmonica as hm
>>> spherical = hm.geodetic_to_spherical(longitude=0, latitude=90, height=0)
>>> print(", ".join("{:.4f}".format(i) for i in spherical))
0.0000, 90.0000, 6356752.3142
>>> print("{:.4f}".format(hm.get_ellipsoid().semiminor_axis))
6356752.3142
In the equator, it should be the semi-major axis:
>>> spherical = hm.geodetic_to_spherical(longitude=0, latitude=0, height=0)
>>> print(", ".join("{:.4f}".format(i) for i in spherical))
0.0000, 0.0000, 6378137.0000
>>> print("{:.4f}".format(hm.get_ellipsoid().semimajor_axis))
6378137.0000
"""
# Get ellipsoid
ellipsoid = get_ellipsoid()
# Convert latitude to radians
latitude_rad = np.radians(latitude)
prime_vertical_radius = ellipsoid.semimajor_axis / np.sqrt(
1 - ellipsoid.first_eccentricity ** 2 * np.sin(latitude_rad) ** 2
)
# Instead of computing X and Y, we only comupute the projection on the XY plane:
# xy_projection = sqrt( X**2 + Y**2 )
xy_projection = (height + prime_vertical_radius) * np.cos(latitude_rad)
z_cartesian = (
height + (1 - ellipsoid.first_eccentricity ** 2) * prime_vertical_radius
) * np.sin(latitude_rad)
radius = np.sqrt(xy_projection ** 2 + z_cartesian ** 2)
spherical_latitude = np.degrees(np.arcsin(z_cartesian / radius))
return longitude, spherical_latitude, radius
def spherical_to_geodetic(longitude, spherical_latitude, radius):
"""
Convert from geocentric spherical to geodetic coordinates.
The geodetic datum is defined by the default :class:`harmonica.ReferenceEllipsoid`
set by the :func:`harmonica.set_ellipsoid` function.
The coordinates are converted following [Vermeille2002]_.
Parameters
----------
longitude : array
Longitude coordinates on geocentric spherical coordinate system in degrees.
spherical_latitude : array
Latitude coordinates on geocentric spherical coordinate system in degrees.
radius : array
Spherical radius coordinates in meters.
Returns
-------
longitude : array
Longitude coordinates on geodetic coordinate system in degrees.
The longitude coordinates are not modified during this conversion.
latitude : array
Converted latitude coordinates on geodetic coordinate system in degrees.
height : array
Converted ellipsoidal height coordinates in meters.
See also
--------
geodetic_to_spherical : Convert from geodetic to geocentric spherical coordinates.
Examples
--------
In the poles and equator, using the semi-minor or semi-major axis of the ellipsoid
as the radius should yield 0 height:
>>> import harmonica as hm
>>> geodetic = hm.spherical_to_geodetic(
... longitude=0, spherical_latitude=90, radius=hm.get_ellipsoid().semiminor_axis
... )
>>> print(", ".join("{:.1f}".format(i) for i in geodetic))
0.0, 90.0, 0.0
>>> geodetic = hm.spherical_to_geodetic(
... longitude=0, spherical_latitude=0, radius=hm.get_ellipsoid().semimajor_axis
... )
>>> print(", ".join("{:.1f}".format(i) for i in geodetic))
0.0, 0.0, 0.0
>>> geodetic = hm.spherical_to_geodetic(
... longitude=0,
... spherical_latitude=-90,
... radius=hm.get_ellipsoid().semiminor_axis + 2
... )
>>> print(", ".join("{:.1f}".format(i) for i in geodetic))
0.0, -90.0, 2.0
"""
# Get ellipsoid
ellipsoid = get_ellipsoid()
k, big_d, big_z = _spherical_to_geodetic_parameters(spherical_latitude, radius)
latitude = np.degrees(
2 * np.arctan(big_z / (big_d + np.sqrt(big_d ** 2 + big_z ** 2)))
)
height = (
(k + ellipsoid.first_eccentricity ** 2 - 1)
/ k
* np.sqrt(big_d ** 2 + big_z ** 2)
)
return longitude, latitude, height
def _spherical_to_geodetic_parameters(spherical_latitude, radius):
"Compute parameters for spherical to geodetic coordinates conversion"
# Get ellipsoid
ellipsoid = get_ellipsoid()
# Convert latitude to radians
spherical_latitude_rad = np.radians(spherical_latitude)
big_z = radius * np.sin(spherical_latitude_rad)
p_0 = (
radius ** 2
* np.cos(spherical_latitude_rad) ** 2
/ ellipsoid.semimajor_axis ** 2
)
q_0 = (
(1 - ellipsoid.first_eccentricity ** 2)
/ ellipsoid.semimajor_axis ** 2
* big_z ** 2
)
r_0 = (p_0 + q_0 - ellipsoid.first_eccentricity ** 4) / 6
s_0 = ellipsoid.first_eccentricity ** 4 * p_0 * q_0 / 4 / r_0 ** 3
t_0 = np.cbrt(1 + s_0 + np.sqrt(2 * s_0 + s_0 ** 2))
u_0 = r_0 * (1 + t_0 + 1 / t_0)
v_0 = np.sqrt(u_0 ** 2 + q_0 * ellipsoid.first_eccentricity ** 4)
w_0 = ellipsoid.first_eccentricity ** 2 * (u_0 + v_0 - q_0) / 2 / v_0
k = np.sqrt(u_0 + v_0 + w_0 ** 2) - w_0
big_d = (
k
* radius
* np.cos(spherical_latitude_rad)
/ (k + ellipsoid.first_eccentricity ** 2)
)
return k, big_d, big_z
| [
"numpy.radians",
"numpy.sqrt",
"numpy.arcsin",
"numpy.cos",
"numpy.sin"
] | [((2065, 2085), 'numpy.radians', 'np.radians', (['latitude'], {}), '(latitude)\n', (2075, 2085), True, 'import numpy as np\n'), ((2576, 2622), 'numpy.sqrt', 'np.sqrt', (['(xy_projection ** 2 + z_cartesian ** 2)'], {}), '(xy_projection ** 2 + z_cartesian ** 2)\n', (2583, 2622), True, 'import numpy as np\n'), ((5465, 5495), 'numpy.radians', 'np.radians', (['spherical_latitude'], {}), '(spherical_latitude)\n', (5475, 5495), True, 'import numpy as np\n'), ((6035, 6094), 'numpy.sqrt', 'np.sqrt', (['(u_0 ** 2 + q_0 * ellipsoid.first_eccentricity ** 4)'], {}), '(u_0 ** 2 + q_0 * ellipsoid.first_eccentricity ** 4)\n', (6042, 6094), True, 'import numpy as np\n'), ((2412, 2432), 'numpy.cos', 'np.cos', (['latitude_rad'], {}), '(latitude_rad)\n', (2418, 2432), True, 'import numpy as np\n'), ((2542, 2562), 'numpy.sin', 'np.sin', (['latitude_rad'], {}), '(latitude_rad)\n', (2548, 2562), True, 'import numpy as np\n'), ((2659, 2690), 'numpy.arcsin', 'np.arcsin', (['(z_cartesian / radius)'], {}), '(z_cartesian / radius)\n', (2668, 2690), True, 'import numpy as np\n'), ((5129, 5161), 'numpy.sqrt', 'np.sqrt', (['(big_d ** 2 + big_z ** 2)'], {}), '(big_d ** 2 + big_z ** 2)\n', (5136, 5161), True, 'import numpy as np\n'), ((5517, 5547), 'numpy.sin', 'np.sin', (['spherical_latitude_rad'], {}), '(spherical_latitude_rad)\n', (5523, 5547), True, 'import numpy as np\n'), ((6177, 6206), 'numpy.sqrt', 'np.sqrt', (['(u_0 + v_0 + w_0 ** 2)'], {}), '(u_0 + v_0 + w_0 ** 2)\n', (6184, 6206), True, 'import numpy as np\n'), ((5960, 5987), 'numpy.sqrt', 'np.sqrt', (['(2 * s_0 + s_0 ** 2)'], {}), '(2 * s_0 + s_0 ** 2)\n', (5967, 5987), True, 'import numpy as np\n'), ((6264, 6294), 'numpy.cos', 'np.cos', (['spherical_latitude_rad'], {}), '(spherical_latitude_rad)\n', (6270, 6294), True, 'import numpy as np\n'), ((5590, 5620), 'numpy.cos', 'np.cos', (['spherical_latitude_rad'], {}), '(spherical_latitude_rad)\n', (5596, 5620), True, 'import numpy as np\n'), ((2198, 2218), 'numpy.sin', 'np.sin', (['latitude_rad'], {}), '(latitude_rad)\n', (2204, 2218), True, 'import numpy as np\n'), ((4999, 5031), 'numpy.sqrt', 'np.sqrt', (['(big_d ** 2 + big_z ** 2)'], {}), '(big_d ** 2 + big_z ** 2)\n', (5006, 5031), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
:File: constants.py
:Purpose: File containing the constants that will be used in the other files. This will
allow to avoid "magic numbers" in the code and also to easily change these
constants if we later need them more or less precises
:Author: <NAME> 2018
"""
# # Imports
import numpy as np
#: General
#: How many days per year
days_per_year = 365 # [days/years]
#: How many radians per micro arcsec
rad_per_mas = 2*np.pi/(1000*360*3600) # [radiants/mas] radiants per milli-arcsec
rad_per_arcsec = 2*np.pi/(360*3600) # [radiants/arcsec] radiants per arcsec
rad_per_deg = (2*np.pi)/360 # [radiants/degrees]
pc_per_km = 3.24078e-14 # [km/pc] kilometers per parsec
sec_per_day = 3600*24 # [sec/day] seconds per day
km_per_Au = 149598000 # number of kilometers in one Au
AU_per_pc = 4.8481705933824e-6 # [au/pc] austronomical unit per parsec
Au_per_km = 1/km_per_Au
c = 299.792458e6 # [m/s]
Au_per_Au = 1 # useless, just to make computations explicit?
# # Proper to Gaia
# constant specific to gaia that have been chosen. (see e.g. )
epsilon = 23 + 26/60 + 21.448/3600 # [deg] obiquity of equator chosen to be 23º 26' 21.448''
Gamma_c = np.radians(106.5) # [rad] basic angle, Gamma_c = arccos(f_p' f_F)
xi = 55 # [deg] angle between the z-axis and s (s being the nominal sun direction)
S = 4.035 # [deg/day] for a xi of 55°. S=|dz/dlambda|
w_z = 60 # [arcsec/s] z component of the inertial spin vector w (small omega)
#: Epoch time
#: The reference epoch is J2000 but it is taken into account in how we count time thus t_ep is 0
t_ep = 0
# temporary
sat_angle = np.radians(45) # when simulating the attitude, the rotated angle
def useless_function():
"""
This function does nothing. Here only for testing purpose.
"""
pass
| [
"numpy.radians"
] | [((1183, 1200), 'numpy.radians', 'np.radians', (['(106.5)'], {}), '(106.5)\n', (1193, 1200), True, 'import numpy as np\n'), ((1613, 1627), 'numpy.radians', 'np.radians', (['(45)'], {}), '(45)\n', (1623, 1627), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
from lifelines.fitters import UnivariateFitter
from lifelines.fitters.nelson_aalen_fitter import NelsonAalenFitter
from lifelines.utils import median_survival_times
class BreslowFlemingHarringtonFitter(UnivariateFitter):
"""
Class for fitting the Breslow-Fleming-Harrington estimate for the survival function. This estimator
is a biased estimator of the survival function but is more stable when the popualtion is small and
there are too few early truncation times, it may happen that is the number of patients at risk and
the number of deaths is the same.
Mathematically, the NAF estimator is the negative logarithm of the BFH estimator.
BreslowFlemingHarringtonFitter(alpha=0.95)
alpha: The alpha value associated with the confidence intervals.
"""
def fit(self, durations, event_observed=None, timeline=None, entry=None,
label='BFH_estimate', alpha=None, ci_labels=None):
"""
Parameters:
duration: an array, or pd.Series, of length n -- duration subject was observed for
timeline: return the best estimate at the values in timelines (postively increasing)
event_observed: an array, or pd.Series, of length n -- True if the the death was observed, False if the event
was lost (right-censored). Defaults all True if event_observed==None
entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is
useful for left-truncated observations, i.e the birth event was not observed.
If None, defaults to all 0 (all birth events observed.)
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
ci_labels: add custom column names to the generated confidence intervals
as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
Returns:
self, with new properties like 'survival_function_'.
"""
self._label = label
alpha = alpha if alpha is not None else self.alpha
naf = NelsonAalenFitter(alpha)
naf.fit(durations, event_observed=event_observed, timeline=timeline, label=label, entry=entry, ci_labels=ci_labels)
self.durations, self.event_observed, self.timeline, self.entry, self.event_table = \
naf.durations, naf.event_observed, naf.timeline, naf.entry, naf.event_table
# estimation
self.survival_function_ = np.exp(-naf.cumulative_hazard_)
self.confidence_interval_ = np.exp(-naf.confidence_interval_)
self.median_ = median_survival_times(self.survival_function_)
# estimation methods
self._estimation_method = "survival_function_"
self._estimate_name = "survival_function_"
self._predict_label = label
self._update_docstrings()
# plotting functions
self.plot_survival_function = self.plot
return self
| [
"numpy.exp",
"lifelines.fitters.nelson_aalen_fitter.NelsonAalenFitter",
"lifelines.utils.median_survival_times"
] | [((2297, 2321), 'lifelines.fitters.nelson_aalen_fitter.NelsonAalenFitter', 'NelsonAalenFitter', (['alpha'], {}), '(alpha)\n', (2314, 2321), False, 'from lifelines.fitters.nelson_aalen_fitter import NelsonAalenFitter\n'), ((2683, 2714), 'numpy.exp', 'np.exp', (['(-naf.cumulative_hazard_)'], {}), '(-naf.cumulative_hazard_)\n', (2689, 2714), True, 'import numpy as np\n'), ((2751, 2784), 'numpy.exp', 'np.exp', (['(-naf.confidence_interval_)'], {}), '(-naf.confidence_interval_)\n', (2757, 2784), True, 'import numpy as np\n'), ((2808, 2854), 'lifelines.utils.median_survival_times', 'median_survival_times', (['self.survival_function_'], {}), '(self.survival_function_)\n', (2829, 2854), False, 'from lifelines.utils import median_survival_times\n')] |
import pickle
import unittest
import numpy as np
from softlearning.replay_pools.trajectory_replay_pool import (
TrajectoryReplayPool)
def create_pool(max_size=100):
return TrajectoryReplayPool(
observation_space=None,
action_space=None,
max_size=max_size,
)
def verify_pools_match(pool1, pool2):
for key in pool2.__dict__:
if key == '_trajectories':
pool1_trajectories = pool1.__dict__[key]
pool2_trajectories = pool2.__dict__[key]
for pool1_trajectory, pool2_trajectory in (
zip(pool1_trajectories, pool2_trajectories)):
assert pool1_trajectory.keys() == pool2_trajectory.keys()
for field_name in pool1_trajectory.keys():
np.testing.assert_array_equal(
pool1_trajectory[field_name],
pool2_trajectory[field_name],
f"key '{key}', field_name '{field_name}' doesn't match"
)
else:
np.testing.assert_array_equal(
pool1.__dict__[key],
pool2.__dict__[key],
f"key '{key}' doesn't match")
class TrajectoryReplayPoolTest(unittest.TestCase):
def setUp(self):
self.pool = create_pool(10)
def test_save_load_latest_experience(self):
self.assertEqual(self.pool._trajectories_since_save, 0)
num_trajectories_per_save = self.pool._max_size // 2
trajectory_length = 10
trajectories = [
{
'field1': np.arange(trajectory_length)[:, None],
'field2': -np.arange(trajectory_length)[:, None] * 2,
}
for _ in range(num_trajectories_per_save)
]
self.pool.add_paths(trajectories)
self.assertEqual(self.pool.num_trajectories, num_trajectories_per_save)
self.assertEqual(self.pool.size,
num_trajectories_per_save * trajectory_length)
self.assertEqual(self.pool._trajectories_since_save,
num_trajectories_per_save)
self.pool.save_latest_experience('./tmp/pool_1.pkl')
self.assertEqual(self.pool._trajectories_since_save, 0)
self.pool.add_paths(trajectories)
self.assertEqual(self.pool.size,
self.pool._max_size * trajectory_length)
self.assertEqual(self.pool._trajectories_since_save,
num_trajectories_per_save)
self.pool.save_latest_experience('./tmp/pool_2.pkl')
self.pool.add_paths(trajectories)
self.assertEqual(self.pool.size,
self.pool._max_size * trajectory_length)
self.assertEqual(self.pool._trajectories_since_save,
num_trajectories_per_save)
self.pool.save_latest_experience('./tmp/pool_3.pkl')
pool = create_pool(self.pool._max_size)
self.assertEqual(pool.size, 0)
pool.load_experience('./tmp/pool_1.pkl')
self.assertEqual(pool.num_trajectories, self.pool._max_size // 2)
self.assertEqual(pool.size,
(self.pool._max_size // 2) * trajectory_length)
pool.load_experience('./tmp/pool_2.pkl')
self.assertEqual(pool.num_trajectories, self.pool._max_size)
self.assertEqual(pool.size,
(self.pool._max_size) * trajectory_length)
self.assertEqual(pool.size, self.pool.size)
pool.load_experience('./tmp/pool_3.pkl')
self.assertEqual(pool.size, self.pool.size)
self.assertEqual(pool.size,
(self.pool._max_size) * trajectory_length)
for trajectory1, trajectory2 in zip(
pool._trajectories, self.pool._trajectories):
self.assertEqual(trajectory1.keys(), trajectory2.keys())
for key in trajectory1:
np.testing.assert_array_equal(trajectory1[key], trajectory2[key])
def test_save_load_latest_experience_empty_pool(self):
self.assertEqual(self.pool._trajectories_since_save, 0)
self.pool.save_latest_experience('./tmp/pool_1.pkl')
pool = create_pool(self.pool._max_size)
pool.load_experience('./tmp/pool_1.pkl')
self.assertEqual(pool.size, 0)
def test_save_latest_experience_with_overflown_pool(self):
self.assertEqual(self.pool._trajectories_since_save, 0)
num_trajectories = self.pool._max_size + 2
trajectory_length = 10
trajectories = [
{
'field1': np.arange(trajectory_length)[:, None],
'field2': -np.arange(trajectory_length)[:, None] * 2,
}
for _ in range(num_trajectories)
]
self.pool.add_paths(trajectories)
self.assertEqual(self.pool.num_trajectories, self.pool._max_size)
self.assertEqual(self.pool._trajectories_since_save,
self.pool._max_size + 2)
self.pool.save_latest_experience('./tmp/pool_1.pkl')
pool = create_pool(self.pool._max_size)
self.assertEqual(pool.size, 0)
import gzip
with gzip.open('./tmp/pool_1.pkl', 'rb') as f:
latest_trajectories = pickle.load(f)
self.assertEqual(len(latest_trajectories), self.pool._max_size)
pool.load_experience('./tmp/pool_1.pkl')
self.assertEqual(pool.size,
self.pool._max_size * trajectory_length)
for trajectory1, trajectory2 in zip(
trajectories, self.pool._trajectories):
self.assertEqual(trajectory1.keys(), trajectory2.keys())
for field_name in trajectory1:
np.testing.assert_array_equal(
trajectory1[field_name], trajectory2[field_name])
def test_serialize_deserialize_full(self):
# Fill fields with random data
num_trajectories = self.pool._max_size + 2
trajectory_length = 10
trajectories = [
{
'field1': np.arange(trajectory_length)[:, None],
'field2': -np.arange(trajectory_length)[:, None] * 2,
}
for _ in range(num_trajectories)
]
self.pool.add_paths(trajectories)
self.assertEqual(self.pool.num_trajectories, self.pool._max_size)
self.assertEqual(self.pool.size,
trajectory_length * self.pool._max_size)
serialized = pickle.dumps(self.pool)
deserialized = pickle.loads(serialized)
verify_pools_match(self.pool, deserialized)
self.assertNotEqual(id(self.pool), id(deserialized))
self.assertEqual(deserialized.num_trajectories, self.pool._max_size)
self.assertEqual(deserialized.size,
trajectory_length * self.pool._max_size)
def test_serialize_deserialize_not_full(self):
# Fill fields with random data
num_trajectories = self.pool._max_size - 2
trajectory_length = 10
trajectories = [
{
'field1': np.arange(trajectory_length)[:, None],
'field2': -np.arange(trajectory_length)[:, None] * 2,
}
for _ in range(num_trajectories)
]
self.pool.add_paths(trajectories)
self.assertEqual(self.pool.num_trajectories, num_trajectories)
self.assertEqual(self.pool.size,
num_trajectories * trajectory_length)
serialized = pickle.dumps(self.pool)
deserialized = pickle.loads(serialized)
verify_pools_match(self.pool, deserialized)
self.assertNotEqual(id(self.pool), id(deserialized))
self.assertEqual(deserialized.num_trajectories, num_trajectories)
self.assertEqual(deserialized.size,
num_trajectories * trajectory_length)
def test_serialize_deserialize_empty(self):
# Fill fields with random data
self.assertEqual(self.pool.num_trajectories, 0)
self.assertEqual(self.pool.size, 0)
serialized = pickle.dumps(self.pool)
deserialized = pickle.loads(serialized)
verify_pools_match(self.pool, deserialized)
self.assertNotEqual(id(self.pool), id(deserialized))
self.assertEqual(deserialized.num_trajectories, 0)
self.assertEqual(deserialized.size, 0)
def test_add_path(self):
for value in range(self.pool._max_size):
path = {
'field1': np.array([value]),
'field2': np.array([-value*2]),
}
self.pool.add_path(path)
self.assertEqual(len(self.pool._trajectories), self.pool._max_size)
for i, trajectory in enumerate(self.pool._trajectories):
np.testing.assert_array_equal(trajectory['field1'], [i])
np.testing.assert_array_equal(trajectory['field2'], [-i * 2])
def test_add_paths(self):
num_trajectories = 4
path_length = 10
paths = [
{
'field1': np.arange(path_length)[:, None],
'field2': -np.arange(path_length)[:, None] * 2,
}
for _ in range(num_trajectories)
]
self.pool.add_paths(paths)
self.assertEqual(self.pool.num_trajectories, num_trajectories)
self.assertEqual(self.pool.size, num_trajectories * path_length)
for trajectory in self.pool._trajectories:
np.testing.assert_array_equal(
trajectory['field1'],
np.arange(path_length)[:, None])
np.testing.assert_array_equal(
trajectory['field2'],
-np.arange(path_length)[:, None] * 2)
def test_random_batch(self):
empty_pool_batch = self.pool.random_batch(4)
self.assertFalse(empty_pool_batch)
num_trajectories = 4
trajectory_length = 10
trajectories = [
{
'field1': np.arange(trajectory_length)[:, None],
'field2': -np.arange(trajectory_length)[:, None] * 2,
}
for _ in range(num_trajectories)
]
self.pool.add_paths(trajectories)
full_pool_batch = self.pool.random_batch(4)
for key, values in full_pool_batch.items():
self.assertEqual(values.shape, (4, 1))
self.assertTrue(np.all(full_pool_batch['field1'] >= 0))
self.assertTrue(np.all(full_pool_batch['field2'] % 2 == 0))
self.assertTrue(np.all(full_pool_batch['field2'] <= 0))
def test_random_batch_with_variable_length_trajectories(self):
batch_size = 256
num_trajectories = 20
trajectories = [
{
'field1': np.arange(np.random.randint(50, 1000))[:, None],
}
for _ in range(num_trajectories)
]
self.pool.add_paths(trajectories)
batch = self.pool.random_batch(batch_size)
for key, values in batch.items():
self.assertEqual(values.shape, (batch_size, 1))
def test_last_n_batch(self):
empty_pool_batch = self.pool.last_n_batch(4)
self.assertFalse(empty_pool_batch)
num_trajectories = 4
trajectory_length = 10
trajectories = [
{
'field1': i * np.arange(trajectory_length)[:, None],
'field2': -i * np.arange(trajectory_length)[:, None] * 2,
}
for i in range(num_trajectories)
]
self.pool.add_paths(trajectories)
full_pool_batch = self.pool.last_n_batch(int(trajectory_length * 2.5))
for key, values in full_pool_batch.items():
expected = np.concatenate((
trajectories[-3][key][trajectory_length // 2:],
trajectories[-2][key],
trajectories[-1][key]
))
np.testing.assert_array_equal(values, expected)
self.assertEqual(values.shape, (2.5 * trajectory_length, 1))
self.pool.add_paths(trajectories)
full_pool_batch = self.pool.last_n_batch(int(trajectory_length * 2))
for key, values in full_pool_batch.items():
expected = np.concatenate((
trajectories[-2][key],
trajectories[-1][key]
))
np.testing.assert_array_equal(values, expected)
self.assertEqual(values.shape, (2 * trajectory_length, 1))
def test_last_n_batch_with_overflown_pool(self):
num_trajectories = self.pool._max_size + 2
trajectory_length = 10
trajectories = [
{
'field1': i * np.arange(trajectory_length)[:, None],
'field2': -i * np.arange(trajectory_length)[:, None] * 2,
}
for i in range(num_trajectories)
]
self.pool.add_paths(trajectories)
full_pool_batch = self.pool.last_n_batch(int(trajectory_length * 2.5))
for key, values in full_pool_batch.items():
expected = np.concatenate((
trajectories[-3][key][trajectory_length // 2:],
trajectories[-2][key],
trajectories[-1][key]
))
np.testing.assert_array_equal(values, expected)
self.assertEqual(values.shape, (2.5 * trajectory_length, 1))
def test_batch_by_indices(self):
with self.assertRaises(TypeError):
self.pool.batch_by_indices(np.array([-1, 2, 4]))
num_trajectories = 4
trajectory_length = 10
trajectories = [
{
'field1': np.arange(trajectory_length)[:, None],
'field2': -np.arange(trajectory_length)[:, None] * 2,
}
for _ in range(num_trajectories)
]
self.pool.add_paths(trajectories)
batch = self.pool.batch_by_indices(
np.repeat(np.arange(num_trajectories), trajectory_length),
np.tile(np.flip(np.arange(trajectory_length)), num_trajectories))
for field_name, values in batch.items():
field_expected = np.concatenate([
np.flip(trajectory[field_name]) for trajectory in trajectories])
np.testing.assert_array_equal(
batch[field_name],
field_expected)
if __name__ == '__main__':
unittest.main()
| [
"numpy.flip",
"softlearning.replay_pools.trajectory_replay_pool.TrajectoryReplayPool",
"gzip.open",
"pickle.dumps",
"pickle.loads",
"numpy.arange",
"pickle.load",
"numpy.array",
"numpy.random.randint",
"numpy.concatenate",
"unittest.main",
"numpy.all",
"numpy.testing.assert_array_equal"
] | [((183, 270), 'softlearning.replay_pools.trajectory_replay_pool.TrajectoryReplayPool', 'TrajectoryReplayPool', ([], {'observation_space': 'None', 'action_space': 'None', 'max_size': 'max_size'}), '(observation_space=None, action_space=None, max_size=\n max_size)\n', (203, 270), False, 'from softlearning.replay_pools.trajectory_replay_pool import TrajectoryReplayPool\n'), ((14336, 14351), 'unittest.main', 'unittest.main', ([], {}), '()\n', (14349, 14351), False, 'import unittest\n'), ((6489, 6512), 'pickle.dumps', 'pickle.dumps', (['self.pool'], {}), '(self.pool)\n', (6501, 6512), False, 'import pickle\n'), ((6536, 6560), 'pickle.loads', 'pickle.loads', (['serialized'], {}), '(serialized)\n', (6548, 6560), False, 'import pickle\n'), ((7518, 7541), 'pickle.dumps', 'pickle.dumps', (['self.pool'], {}), '(self.pool)\n', (7530, 7541), False, 'import pickle\n'), ((7565, 7589), 'pickle.loads', 'pickle.loads', (['serialized'], {}), '(serialized)\n', (7577, 7589), False, 'import pickle\n'), ((8096, 8119), 'pickle.dumps', 'pickle.dumps', (['self.pool'], {}), '(self.pool)\n', (8108, 8119), False, 'import pickle\n'), ((8143, 8167), 'pickle.loads', 'pickle.loads', (['serialized'], {}), '(serialized)\n', (8155, 8167), False, 'import pickle\n'), ((1051, 1156), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['pool1.__dict__[key]', 'pool2.__dict__[key]', 'f"""key \'{key}\' doesn\'t match"""'], {}), '(pool1.__dict__[key], pool2.__dict__[key],\n f"key \'{key}\' doesn\'t match")\n', (1080, 1156), True, 'import numpy as np\n'), ((5180, 5215), 'gzip.open', 'gzip.open', (['"""./tmp/pool_1.pkl"""', '"""rb"""'], {}), "('./tmp/pool_1.pkl', 'rb')\n", (5189, 5215), False, 'import gzip\n'), ((5256, 5270), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5267, 5270), False, 'import pickle\n'), ((8787, 8843), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["trajectory['field1']", '[i]'], {}), "(trajectory['field1'], [i])\n", (8816, 8843), True, 'import numpy as np\n'), ((8856, 8917), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["trajectory['field2']", '[-i * 2]'], {}), "(trajectory['field2'], [-i * 2])\n", (8885, 8917), True, 'import numpy as np\n'), ((10384, 10422), 'numpy.all', 'np.all', (["(full_pool_batch['field1'] >= 0)"], {}), "(full_pool_batch['field1'] >= 0)\n", (10390, 10422), True, 'import numpy as np\n'), ((10449, 10491), 'numpy.all', 'np.all', (["(full_pool_batch['field2'] % 2 == 0)"], {}), "(full_pool_batch['field2'] % 2 == 0)\n", (10455, 10491), True, 'import numpy as np\n'), ((10517, 10555), 'numpy.all', 'np.all', (["(full_pool_batch['field2'] <= 0)"], {}), "(full_pool_batch['field2'] <= 0)\n", (10523, 10555), True, 'import numpy as np\n'), ((11700, 11814), 'numpy.concatenate', 'np.concatenate', (['(trajectories[-3][key][trajectory_length // 2:], trajectories[-2][key],\n trajectories[-1][key])'], {}), '((trajectories[-3][key][trajectory_length // 2:],\n trajectories[-2][key], trajectories[-1][key]))\n', (11714, 11814), True, 'import numpy as np\n'), ((11885, 11932), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['values', 'expected'], {}), '(values, expected)\n', (11914, 11932), True, 'import numpy as np\n'), ((12202, 12264), 'numpy.concatenate', 'np.concatenate', (['(trajectories[-2][key], trajectories[-1][key])'], {}), '((trajectories[-2][key], trajectories[-1][key]))\n', (12216, 12264), True, 'import numpy as np\n'), ((12323, 12370), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['values', 'expected'], {}), '(values, expected)\n', (12352, 12370), True, 'import numpy as np\n'), ((13027, 13141), 'numpy.concatenate', 'np.concatenate', (['(trajectories[-3][key][trajectory_length // 2:], trajectories[-2][key],\n trajectories[-1][key])'], {}), '((trajectories[-3][key][trajectory_length // 2:],\n trajectories[-2][key], trajectories[-1][key]))\n', (13041, 13141), True, 'import numpy as np\n'), ((13212, 13259), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['values', 'expected'], {}), '(values, expected)\n', (13241, 13259), True, 'import numpy as np\n'), ((14205, 14269), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['batch[field_name]', 'field_expected'], {}), '(batch[field_name], field_expected)\n', (14234, 14269), True, 'import numpy as np\n'), ((3928, 3993), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['trajectory1[key]', 'trajectory2[key]'], {}), '(trajectory1[key], trajectory2[key])\n', (3957, 3993), True, 'import numpy as np\n'), ((5729, 5808), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['trajectory1[field_name]', 'trajectory2[field_name]'], {}), '(trajectory1[field_name], trajectory2[field_name])\n', (5758, 5808), True, 'import numpy as np\n'), ((8514, 8531), 'numpy.array', 'np.array', (['[value]'], {}), '([value])\n', (8522, 8531), True, 'import numpy as np\n'), ((8559, 8581), 'numpy.array', 'np.array', (['[-value * 2]'], {}), '([-value * 2])\n', (8567, 8581), True, 'import numpy as np\n'), ((13453, 13473), 'numpy.array', 'np.array', (['[-1, 2, 4]'], {}), '([-1, 2, 4])\n', (13461, 13473), True, 'import numpy as np\n'), ((13889, 13916), 'numpy.arange', 'np.arange', (['num_trajectories'], {}), '(num_trajectories)\n', (13898, 13916), True, 'import numpy as np\n'), ((784, 938), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['pool1_trajectory[field_name]', 'pool2_trajectory[field_name]', 'f"""key \'{key}\', field_name \'{field_name}\' doesn\'t match"""'], {}), '(pool1_trajectory[field_name],\n pool2_trajectory[field_name],\n f"key \'{key}\', field_name \'{field_name}\' doesn\'t match")\n', (813, 938), True, 'import numpy as np\n'), ((1583, 1611), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (1592, 1611), True, 'import numpy as np\n'), ((4591, 4619), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (4600, 4619), True, 'import numpy as np\n'), ((6064, 6092), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (6073, 6092), True, 'import numpy as np\n'), ((7100, 7128), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (7109, 7128), True, 'import numpy as np\n'), ((9061, 9083), 'numpy.arange', 'np.arange', (['path_length'], {}), '(path_length)\n', (9070, 9083), True, 'import numpy as np\n'), ((9557, 9579), 'numpy.arange', 'np.arange', (['path_length'], {}), '(path_length)\n', (9566, 9579), True, 'import numpy as np\n'), ((9981, 10009), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (9990, 10009), True, 'import numpy as np\n'), ((13601, 13629), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (13610, 13629), True, 'import numpy as np\n'), ((13966, 13994), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (13975, 13994), True, 'import numpy as np\n'), ((14128, 14159), 'numpy.flip', 'np.flip', (['trajectory[field_name]'], {}), '(trajectory[field_name])\n', (14135, 14159), True, 'import numpy as np\n'), ((10755, 10782), 'numpy.random.randint', 'np.random.randint', (['(50)', '(1000)'], {}), '(50, 1000)\n', (10772, 10782), True, 'import numpy as np\n'), ((11320, 11348), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (11329, 11348), True, 'import numpy as np\n'), ((12647, 12675), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (12656, 12675), True, 'import numpy as np\n'), ((1649, 1677), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (1658, 1677), True, 'import numpy as np\n'), ((4657, 4685), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (4666, 4685), True, 'import numpy as np\n'), ((6130, 6158), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (6139, 6158), True, 'import numpy as np\n'), ((7166, 7194), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (7175, 7194), True, 'import numpy as np\n'), ((9121, 9143), 'numpy.arange', 'np.arange', (['path_length'], {}), '(path_length)\n', (9130, 9143), True, 'import numpy as np\n'), ((9688, 9710), 'numpy.arange', 'np.arange', (['path_length'], {}), '(path_length)\n', (9697, 9710), True, 'import numpy as np\n'), ((10047, 10075), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (10056, 10075), True, 'import numpy as np\n'), ((11390, 11418), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (11399, 11418), True, 'import numpy as np\n'), ((12717, 12745), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (12726, 12745), True, 'import numpy as np\n'), ((13667, 13695), 'numpy.arange', 'np.arange', (['trajectory_length'], {}), '(trajectory_length)\n', (13676, 13695), True, 'import numpy as np\n')] |
import os
import math
import numpy as np
import datetime as dt
from numpy import newaxis
from core.utils import Timer
from keras.layers import Dense, Activation, Dropout, LSTM
from keras.models import Sequential, load_model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.regularizers import l1,l2,l1_l2
import tensorflow as tf
import tempfile
import tensorflow_model_optimization as tfmot
from tensorflow.keras.models import load_model
import sys
class Model():
"""A class for an building and inferencing an lstm model"""
def __init__(self):
self.model = Sequential()
def load_model(self, filepath):
print('[Model] Loading model from file %s' % filepath)
self.model = load_model(filepath)
def build_model(self, configs):
timer = Timer()
timer.start()
for layer in configs['model']['layers']:
neurons = layer['neurons'] if 'neurons' in layer else None
dropout_rate = layer['rate'] if 'rate' in layer else None
activation = layer['activation'] if 'activation' in layer else None
return_seq = layer['return_seq'] if 'return_seq' in layer else None
input_timesteps = layer['input_timesteps'] if 'input_timesteps' in layer else None
input_dim = layer['input_dim'] if 'input_dim' in layer else None
L1 = layer['L1'] if 'L1' in layer else None
L2 = layer['L2'] if 'L1' in layer else None
reg=None
print("boop")
if ((L1!=None) and (L2!=None)):
reg=l1_l2(l1=L1,l2=L2)
elif (L1 != None):
reg=l1(l1=L1)
elif (L2 != None):
reg=l2(l2=L2)
if layer['type'] == 'dense':
self.model.add(Dense(neurons, activation=activation,kernel_regularizer=reg))
if layer['type'] == 'lstm':
self.model.add(LSTM(neurons, input_shape=(input_timesteps, input_dim), return_sequences=return_seq,kernel_regularizer=reg))
if layer['type'] == 'dropout':
self.model.add(Dropout(dropout_rate))
self.model.compile(loss=configs['model']['loss'], optimizer=configs['model']['optimizer'])
print('[Model] Model Compiled')
timer.stop()
def train(self, x, y, epochs, batch_size, save_dir):
configs = json.load(open('config.json', 'r'))
timer = Timer()
timer.start()
print('[Model] Training Started')
print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
if 'trained_model_name' in configs['data']:
save_fname = os.path.join(save_dir,configs['data']['trained_model_name'] ,'trained_at_%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
else:
save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
callbacks = [
EarlyStopping(monitor='val_loss', patience=2),
ModelCheckpoint(filepath=save_fname, monitor='val_loss', save_best_only=True)
]
self.model.fit(
x,
y,
epochs=epochs,
batch_size=batch_size,
callbacks=callbacks
)
self.model.save(save_fname)
print('[Model] Training Completed. Model saved as %s' % save_fname)
timer.stop()
def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch, save_dir):
timer = Timer()
timer.start()
print('[Model] Training Started')
print('[Model] %s epochs, %s batch size, %s batches per epoch' % (epochs, batch_size, steps_per_epoch))
save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
callbacks = [
ModelCheckpoint(filepath=save_fname, monitor='loss', save_best_only=True)
]
self.model.fit_generator(
data_gen,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=callbacks,
workers=1
)
print('[Model] Training Completed. Model saved as %s' % save_fname)
timer.stop()
def predict_point_by_point(self, data):
#Predict each timestep given the last sequence of true data, in effect only predicting 1 step ahead each time
print('[Model] Predicting Point-by-Point...')
predicted = self.model.predict(data)
predicted = np.reshape(predicted, (predicted.size,))
return predicted
def predict_sequences_multiple(self, data, window_size, prediction_len):
#Predict sequence of 50 steps before shifting prediction run forward by 50 steps
print('[Model] Predicting Sequences Multiple...')
prediction_seqs = []
for i in range(int(len(data)/prediction_len)):
curr_frame = data[i*prediction_len]
predicted = []
for j in range(prediction_len):
predicted.append(self.model.predict(curr_frame[newaxis,:,:])[0,0])
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-2], predicted[-1], axis=0)
prediction_seqs.append(predicted)
return prediction_seqs
def predict_sequence_full(self, data, window_size):
#Shift the window by 1 new prediction each time, re-run predictions on new window
print('[Model] Predicting Sequences Full...')
curr_frame = data[0]
predicted = []
for i in range(len(data)):
predicted.append(self.model.predict(curr_frame[newaxis,:,:])[0,0])
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-2], predicted[-1], axis=0)
return predicted
def get_gzipped_model_size(file):
# Returns size of gzipped model, in bytes.
import os
import zipfile
_, zipped_file = tempfile.mkstemp('.zip')
with zipfile.ZipFile(zipped_file, 'w', compression=zipfile.ZIP_DEFLATED) as f:
f.write(file)
return os.path.getsize(zipped_file)
def sparsity_pruning(configs,data,model,save_dir):
prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude
save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), "pruned"))
# Compute end step to finish pruning after 2 epochs.
batch_size = configs["training"]["batch_size"]
epochs = configs["training"]["epochs"]
validation_split = configs["training"]["validation_split"]
x, y = data.get_train_data(
seq_len=configs['data']['sequence_length'],
normalise=configs['data']['normalise']
)
num_data_points = x.shape[0] * (1 - validation_split)
end_step = np.ceil(num_data_points / batch_size).astype(np.int32) * epochs
# Define model for pruning.
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=configs["pruning_parameters"]["initial_sparsity"],
final_sparsity=configs["pruning_parameters"]["final_sparsity"],
begin_step=0,
end_step=end_step)
}
model_for_pruning = prune_low_magnitude(model, **pruning_params)
# `prune_low_magnitude` requires a recompile.
model_for_pruning.compile(optimizer=configs["model"]["optimizer"],
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model_for_pruning.summary()
logdir = tempfile.mkdtemp()
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
]
model_for_pruning.fit(x, y,
batch_size=batch_size, epochs=epochs, validation_split=validation_split,
callbacks=callbacks)
try:
model_for_pruning.save(save_fname)
except:
model_for_pruning.save(save_dir)
return save_fname
def small_model(configs,data,model,save_dir):
#if not os.path.exists(configs['model']['save_dir']): os.makedirs(configs['model']['save_dir'])
#fname=sparsity_pruning(configs,data,model,save_dir)
#model=load_model(fname)
prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude
save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), "pruned"))
# Compute end step to finish pruning after 2 epochs.
batch_size = configs["training"]["batch_size"]
epochs = configs["training"]["epochs"]
validation_split = configs["training"]["validation_split"]
x, y = data.get_train_data(
seq_len=configs['data']['sequence_length'],
normalise=configs['data']['normalise']
)
num_data_points = x.shape[0] * (1 - validation_split)
end_step = np.ceil(num_data_points / batch_size).astype(np.int32) * epochs
# Define model for pruning.
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=configs["pruning_parameters"]["initial_sparsity"],
final_sparsity=configs["pruning_parameters"]["final_sparsity"],
begin_step=0,
end_step=end_step)
}
model_for_pruning = prune_low_magnitude(model, **pruning_params)
# `prune_low_magnitude` requires a recompile.
model_for_pruning.compile(optimizer=configs["model"]["optimizer"],
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model_for_pruning.summary()
logdir = tempfile.mkdtemp()
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
]
model_for_pruning.fit(x, y,
batch_size=batch_size, epochs=epochs, validation_split=validation_split,
callbacks=callbacks)
model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)
_, pruned_keras_file = tempfile.mkstemp('.h5')
tf.keras.models.save_model(model_for_export, pruned_keras_file, include_optimizer=False)
print('Saved pruned Keras model to:', pruned_keras_file)
converter = tf.lite.TFLiteConverter.from_keras_model(model_for_export)
pruned_tflite_model = converter.convert()
_, pruned_tflite_file = tempfile.mkstemp('.tflite')
with open(pruned_tflite_file, 'wb') as f:
f.write(pruned_tflite_model)
print('Saved pruned TFLite model to:', pruned_tflite_file)
return pruned_tflite_file
def very_small_model(configs,data,model,save_dir):
old=sys.stdout
sys.stdout=open("pruning-engine.txt",'w')
timer = Timer()
timer.start()
prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude
save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), "pruned"))
# Compute end step to finish pruning after 2 epochs.
batch_size = configs["training"]["batch_size"]
epochs = configs["training"]["epochs"]
validation_split = configs["training"]["validation_split"]
x, y = data.get_train_data(
seq_len=configs['data']['sequence_length'],
normalise=configs['data']['normalise']
)
num_data_points = x.shape[0] * (1 - validation_split)
end_step = np.ceil(num_data_points / batch_size).astype(np.int32) * epochs
# Define model for pruning.
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=configs["pruning_parameters"]["initial_sparsity"],
final_sparsity=configs["pruning_parameters"]["final_sparsity"],
begin_step=0,
end_step=end_step)
}
model_for_pruning = prune_low_magnitude(model, **pruning_params)
# `prune_low_magnitude` requires a recompile.
model_for_pruning.compile(optimizer=configs["model"]["optimizer"],
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model_for_pruning.summary()
logdir = tempfile.mkdtemp()
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
]
model_for_pruning.fit(x, y,
batch_size=batch_size, epochs=epochs, validation_split=validation_split,
callbacks=callbacks)
model_for_export = tfmot.sparsity.keras.strip_pruning(model_for_pruning)
_, pruned_keras_file = tempfile.mkstemp('.h5')
tf.keras.models.save_model(model_for_export, pruned_keras_file, include_optimizer=False)
print('Saved pruned Keras model to:', pruned_keras_file)
converter = tf.lite.TFLiteConverter.from_keras_model(model_for_export)
pruned_tflite_model = converter.convert()
_, pruned_tflite_file = tempfile.mkstemp('.tflite')
with open(pruned_tflite_file, 'wb') as f:
f.write(pruned_tflite_model)
print('Saved pruned TFLite model to:', pruned_tflite_file)
converter = tf.lite.TFLiteConverter.from_keras_model(model_for_export)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
quantized_and_pruned_tflite_model = converter.convert()
_, quantized_and_pruned_tflite_file = tempfile.mkstemp('.tflite')
with open(quantized_and_pruned_tflite_file, 'wb') as f:
f.write(quantized_and_pruned_tflite_model)
print('Saved quantized and pruned TFLite model to:', quantized_and_pruned_tflite_file)
#print("Size of gzipped baseline Keras model: %.2f bytes" % (get_gzipped_model_size(keras_file)))
print("Size of gzipped pruned and quantized TFlite model: %.2f bytes" % (get_gzipped_model_size(quantized_and_pruned_tflite_file)))
timer.stop()
sys.stdout.close()
sys.stdout=old | [
"tensorflow_model_optimization.sparsity.keras.PolynomialDecay",
"zipfile.ZipFile",
"tensorflow_model_optimization.sparsity.keras.PruningSummaries",
"tensorflow.keras.models.load_model",
"keras.layers.Dense",
"tensorflow.keras.models.save_model",
"tensorflow_model_optimization.sparsity.keras.UpdatePrunin... | [((5317, 5341), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".zip"""'], {}), "('.zip')\n", (5333, 5341), False, 'import tempfile\n'), ((5455, 5483), 'os.path.getsize', 'os.path.getsize', (['zipped_file'], {}), '(zipped_file)\n', (5470, 5483), False, 'import os\n'), ((7033, 7051), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (7049, 7051), False, 'import tempfile\n'), ((9183, 9201), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (9199, 9201), False, 'import tempfile\n'), ((9513, 9566), 'tensorflow_model_optimization.sparsity.keras.strip_pruning', 'tfmot.sparsity.keras.strip_pruning', (['model_for_pruning'], {}), '(model_for_pruning)\n', (9547, 9566), True, 'import tensorflow_model_optimization as tfmot\n'), ((9594, 9617), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (9610, 9617), False, 'import tempfile\n'), ((9620, 9712), 'tensorflow.keras.models.save_model', 'tf.keras.models.save_model', (['model_for_export', 'pruned_keras_file'], {'include_optimizer': '(False)'}), '(model_for_export, pruned_keras_file,\n include_optimizer=False)\n', (9646, 9712), True, 'import tensorflow as tf\n'), ((9784, 9842), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model_for_export'], {}), '(model_for_export)\n', (9824, 9842), True, 'import tensorflow as tf\n'), ((9916, 9943), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".tflite"""'], {}), "('.tflite')\n", (9932, 9943), False, 'import tempfile\n'), ((10236, 10243), 'core.utils.Timer', 'Timer', ([], {}), '()\n', (10241, 10243), False, 'from core.utils import Timer\n'), ((11755, 11773), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (11771, 11773), False, 'import tempfile\n'), ((12085, 12138), 'tensorflow_model_optimization.sparsity.keras.strip_pruning', 'tfmot.sparsity.keras.strip_pruning', (['model_for_pruning'], {}), '(model_for_pruning)\n', (12119, 12138), True, 'import tensorflow_model_optimization as tfmot\n'), ((12166, 12189), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (12182, 12189), False, 'import tempfile\n'), ((12192, 12284), 'tensorflow.keras.models.save_model', 'tf.keras.models.save_model', (['model_for_export', 'pruned_keras_file'], {'include_optimizer': '(False)'}), '(model_for_export, pruned_keras_file,\n include_optimizer=False)\n', (12218, 12284), True, 'import tensorflow as tf\n'), ((12356, 12414), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model_for_export'], {}), '(model_for_export)\n', (12396, 12414), True, 'import tensorflow as tf\n'), ((12488, 12515), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".tflite"""'], {}), "('.tflite')\n", (12504, 12515), False, 'import tempfile\n'), ((12669, 12727), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model_for_export'], {}), '(model_for_export)\n', (12709, 12727), True, 'import tensorflow as tf\n'), ((12883, 12910), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".tflite"""'], {}), "('.tflite')\n", (12899, 12910), False, 'import tempfile\n'), ((13361, 13379), 'sys.stdout.close', 'sys.stdout.close', ([], {}), '()\n', (13377, 13379), False, 'import sys\n'), ((602, 614), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (612, 614), False, 'from keras.models import Sequential, load_model\n'), ((725, 745), 'tensorflow.keras.models.load_model', 'load_model', (['filepath'], {}), '(filepath)\n', (735, 745), False, 'from tensorflow.keras.models import load_model\n'), ((793, 800), 'core.utils.Timer', 'Timer', ([], {}), '()\n', (798, 800), False, 'from core.utils import Timer\n'), ((2192, 2199), 'core.utils.Timer', 'Timer', ([], {}), '()\n', (2197, 2199), False, 'from core.utils import Timer\n'), ((3140, 3147), 'core.utils.Timer', 'Timer', ([], {}), '()\n', (3145, 3147), False, 'from core.utils import Timer\n'), ((4021, 4061), 'numpy.reshape', 'np.reshape', (['predicted', '(predicted.size,)'], {}), '(predicted, (predicted.size,))\n', (4031, 4061), True, 'import numpy as np\n'), ((5350, 5417), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zipped_file', '"""w"""'], {'compression': 'zipfile.ZIP_DEFLATED'}), "(zipped_file, 'w', compression=zipfile.ZIP_DEFLATED)\n", (5365, 5417), False, 'import zipfile\n'), ((6278, 6489), 'tensorflow_model_optimization.sparsity.keras.PolynomialDecay', 'tfmot.sparsity.keras.PolynomialDecay', ([], {'initial_sparsity': "configs['pruning_parameters']['initial_sparsity']", 'final_sparsity': "configs['pruning_parameters']['final_sparsity']", 'begin_step': '(0)', 'end_step': 'end_step'}), "(initial_sparsity=configs[\n 'pruning_parameters']['initial_sparsity'], final_sparsity=configs[\n 'pruning_parameters']['final_sparsity'], begin_step=0, end_step=end_step)\n", (6314, 6489), True, 'import tensorflow_model_optimization as tfmot\n'), ((7073, 7113), 'tensorflow_model_optimization.sparsity.keras.UpdatePruningStep', 'tfmot.sparsity.keras.UpdatePruningStep', ([], {}), '()\n', (7111, 7113), True, 'import tensorflow_model_optimization as tfmot\n'), ((7118, 7171), 'tensorflow_model_optimization.sparsity.keras.PruningSummaries', 'tfmot.sparsity.keras.PruningSummaries', ([], {'log_dir': 'logdir'}), '(log_dir=logdir)\n', (7155, 7171), True, 'import tensorflow_model_optimization as tfmot\n'), ((8428, 8639), 'tensorflow_model_optimization.sparsity.keras.PolynomialDecay', 'tfmot.sparsity.keras.PolynomialDecay', ([], {'initial_sparsity': "configs['pruning_parameters']['initial_sparsity']", 'final_sparsity': "configs['pruning_parameters']['final_sparsity']", 'begin_step': '(0)', 'end_step': 'end_step'}), "(initial_sparsity=configs[\n 'pruning_parameters']['initial_sparsity'], final_sparsity=configs[\n 'pruning_parameters']['final_sparsity'], begin_step=0, end_step=end_step)\n", (8464, 8639), True, 'import tensorflow_model_optimization as tfmot\n'), ((9223, 9263), 'tensorflow_model_optimization.sparsity.keras.UpdatePruningStep', 'tfmot.sparsity.keras.UpdatePruningStep', ([], {}), '()\n', (9261, 9263), True, 'import tensorflow_model_optimization as tfmot\n'), ((9268, 9321), 'tensorflow_model_optimization.sparsity.keras.PruningSummaries', 'tfmot.sparsity.keras.PruningSummaries', ([], {'log_dir': 'logdir'}), '(log_dir=logdir)\n', (9305, 9321), True, 'import tensorflow_model_optimization as tfmot\n'), ((11000, 11211), 'tensorflow_model_optimization.sparsity.keras.PolynomialDecay', 'tfmot.sparsity.keras.PolynomialDecay', ([], {'initial_sparsity': "configs['pruning_parameters']['initial_sparsity']", 'final_sparsity': "configs['pruning_parameters']['final_sparsity']", 'begin_step': '(0)', 'end_step': 'end_step'}), "(initial_sparsity=configs[\n 'pruning_parameters']['initial_sparsity'], final_sparsity=configs[\n 'pruning_parameters']['final_sparsity'], begin_step=0, end_step=end_step)\n", (11036, 11211), True, 'import tensorflow_model_optimization as tfmot\n'), ((11795, 11835), 'tensorflow_model_optimization.sparsity.keras.UpdatePruningStep', 'tfmot.sparsity.keras.UpdatePruningStep', ([], {}), '()\n', (11833, 11835), True, 'import tensorflow_model_optimization as tfmot\n'), ((11840, 11893), 'tensorflow_model_optimization.sparsity.keras.PruningSummaries', 'tfmot.sparsity.keras.PruningSummaries', ([], {'log_dir': 'logdir'}), '(log_dir=logdir)\n', (11877, 11893), True, 'import tensorflow_model_optimization as tfmot\n'), ((2679, 2724), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(2)'}), "(monitor='val_loss', patience=2)\n", (2692, 2724), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((2730, 2807), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'save_fname', 'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), "(filepath=save_fname, monitor='val_loss', save_best_only=True)\n", (2745, 2807), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((3447, 3520), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'save_fname', 'monitor': '"""loss"""', 'save_best_only': '(True)'}), "(filepath=save_fname, monitor='loss', save_best_only=True)\n", (3462, 3520), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint\n'), ((5096, 5159), 'numpy.insert', 'np.insert', (['curr_frame', '[window_size - 2]', 'predicted[-1]'], {'axis': '(0)'}), '(curr_frame, [window_size - 2], predicted[-1], axis=0)\n', (5105, 5159), True, 'import numpy as np\n'), ((6888, 6951), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (6933, 6951), True, 'import tensorflow as tf\n'), ((9038, 9101), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (9083, 9101), True, 'import tensorflow as tf\n'), ((11610, 11673), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (11655, 11673), True, 'import tensorflow as tf\n'), ((1461, 1480), 'keras.regularizers.l1_l2', 'l1_l2', ([], {'l1': 'L1', 'l2': 'L2'}), '(l1=L1, l2=L2)\n', (1466, 1480), False, 'from keras.regularizers import l1, l2, l1_l2\n'), ((4588, 4651), 'numpy.insert', 'np.insert', (['curr_frame', '[window_size - 2]', 'predicted[-1]'], {'axis': '(0)'}), '(curr_frame, [window_size - 2], predicted[-1], axis=0)\n', (4597, 4651), True, 'import numpy as np\n'), ((6134, 6171), 'numpy.ceil', 'np.ceil', (['(num_data_points / batch_size)'], {}), '(num_data_points / batch_size)\n', (6141, 6171), True, 'import numpy as np\n'), ((8284, 8321), 'numpy.ceil', 'np.ceil', (['(num_data_points / batch_size)'], {}), '(num_data_points / batch_size)\n', (8291, 8321), True, 'import numpy as np\n'), ((10856, 10893), 'numpy.ceil', 'np.ceil', (['(num_data_points / batch_size)'], {}), '(num_data_points / batch_size)\n', (10863, 10893), True, 'import numpy as np\n'), ((1512, 1521), 'keras.regularizers.l1', 'l1', ([], {'l1': 'L1'}), '(l1=L1)\n', (1514, 1521), False, 'from keras.regularizers import l1, l2, l1_l2\n'), ((1619, 1680), 'keras.layers.Dense', 'Dense', (['neurons'], {'activation': 'activation', 'kernel_regularizer': 'reg'}), '(neurons, activation=activation, kernel_regularizer=reg)\n', (1624, 1680), False, 'from keras.layers import Dense, Activation, Dropout, LSTM\n'), ((1733, 1846), 'keras.layers.LSTM', 'LSTM', (['neurons'], {'input_shape': '(input_timesteps, input_dim)', 'return_sequences': 'return_seq', 'kernel_regularizer': 'reg'}), '(neurons, input_shape=(input_timesteps, input_dim), return_sequences=\n return_seq, kernel_regularizer=reg)\n', (1737, 1846), False, 'from keras.layers import Dense, Activation, Dropout, LSTM\n'), ((1897, 1918), 'keras.layers.Dropout', 'Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (1904, 1918), False, 'from keras.layers import Dense, Activation, Dropout, LSTM\n'), ((1554, 1563), 'keras.regularizers.l2', 'l2', ([], {'l2': 'L2'}), '(l2=L2)\n', (1556, 1563), False, 'from keras.regularizers import l1, l2, l1_l2\n'), ((5661, 5678), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (5676, 5678), True, 'import datetime as dt\n'), ((7811, 7828), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (7826, 7828), True, 'import datetime as dt\n'), ((10383, 10400), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (10398, 10400), True, 'import datetime as dt\n'), ((3367, 3384), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (3382, 3384), True, 'import datetime as dt\n'), ((2473, 2490), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2488, 2490), True, 'import datetime as dt\n'), ((2596, 2613), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (2611, 2613), True, 'import datetime as dt\n')] |
import matplotlib.pyplot as plt
import numpy as np
_seed = 9
_eps = 200
#####################################################################
# Defination of class
class result():
def __init__(self, _fn):
self.path = "./eval/seed_" + str(_seed) + "_eps_" + str(_eps) + "/" + _fn + ".npy"
self.file = np.load(self.path, allow_pickle=True).squeeze()
self.name = _fn
self.eps = _eps
self.pass_eps = _eps
this_rer, this_ce, this_pe = 0, 0, 0
self.rer, self.ce, self.pe, self.fail = 0, 0, 0, 0
self.rer_lst, self.ce_lst, self.pe_lst = [], [], []
for i in range(_eps):
cmd_count = 0
bf = self.file[i]
dat = bf[-1]
cmd_count = len(bf)
# calculate rer and ce
this_rer = dat['repetitive_exploration_rate'] - 1
this_ce = float(dat['explored_area']) / cmd_count
self.rer += this_rer
self.ce += this_ce
self.rer_lst.append(this_rer)
self.ce_lst.append(this_ce)
# calculate pe
if dat['cumulative_distance'] >= 1:
this_pe = float(dat['explored_area']) / dat['cumulative_distance']
self.pe += this_pe
self.pe_lst.append(this_pe)
else:
self.pe_lst.append(0.0)
# calculate fail rate
if dat['cube_found'] == False:
self.fail += 1
self.np_rer_lst = np.asarray(self.rer_lst)
self.np_ce_lst = np.asarray(self.ce_lst)
self.np_pe_lst = np.asarray(self.pe_lst)
def print_stats(self):
print(self.name)
print(' rer:', self.rer / self.pass_eps, ' std:', np.std(self.np_rer_lst))
print(' ce:', self.ce / self.pass_eps, ' std:', np.std(self.np_ce_lst))
print(' pe:', self.pe / len(self.pe_lst), ' std:', np.std(self.np_pe_lst))
print(' not_found:', self.fail)
#####################################################################
# Create results
result_list = []
result_list.append(result("SAM"))
result_list.append(result("ST-COM"))
result_list.append(result("SAM-VFM (A)"))
result_list.append(result("SAM-VFM (B)"))
result_list.append(result("RAND"))
for res in result_list:
res.print_stats()
#####################################################################
# Make the plot
# color list
color_list = ['b-', 'g-', 'r-', 'y-', 'co', 'mo']
# create x axis
x_axis = range(_eps)
fig, axs = plt.subplots(3, 1)
# Upper image
for i in range(len(result_list)):
res = result_list[i]
axs[0].plot(x_axis, res.np_rer_lst, color_list[i], label=res.name)
axs[0].set(ylabel = 'GRER')
axs[0].set_title('The GRERs of SAM-VFM, SAM, and ST-COM over 200 Testing Episodes')
axs[0].legend()
# Lower image
for i in range(len(result_list)):
res = result_list[i]
axs[1].plot(x_axis, res.np_ce_lst, color_list[i], label=res.name)
axs[1].set(ylabel = 'CE')
axs[1].set_title('The GEs of SAM-VFM, SAM, and ST-COM over 200 Testing Episodes')
#axs[1].legend()
# Lower image
for i in range(len(result_list)):
res = result_list[i]
axs[2].plot(x_axis, res.np_pe_lst, color_list[i], label=res.name)
axs[2].set(xlabel='episodes', ylabel = 'PE')
axs[2].set_title('The PEs of SAM-VFM, SAM, and ST-COM over 200 Testing Episodes')
#axs[2].legend()
plt.show()
| [
"numpy.asarray",
"numpy.std",
"numpy.load",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((2536, 2554), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (2548, 2554), True, 'import matplotlib.pyplot as plt\n'), ((3386, 3396), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3394, 3396), True, 'import matplotlib.pyplot as plt\n'), ((1507, 1531), 'numpy.asarray', 'np.asarray', (['self.rer_lst'], {}), '(self.rer_lst)\n', (1517, 1531), True, 'import numpy as np\n'), ((1561, 1584), 'numpy.asarray', 'np.asarray', (['self.ce_lst'], {}), '(self.ce_lst)\n', (1571, 1584), True, 'import numpy as np\n'), ((1614, 1637), 'numpy.asarray', 'np.asarray', (['self.pe_lst'], {}), '(self.pe_lst)\n', (1624, 1637), True, 'import numpy as np\n'), ((1756, 1779), 'numpy.std', 'np.std', (['self.np_rer_lst'], {}), '(self.np_rer_lst)\n', (1762, 1779), True, 'import numpy as np\n'), ((1840, 1862), 'numpy.std', 'np.std', (['self.np_ce_lst'], {}), '(self.np_ce_lst)\n', (1846, 1862), True, 'import numpy as np\n'), ((1926, 1948), 'numpy.std', 'np.std', (['self.np_pe_lst'], {}), '(self.np_pe_lst)\n', (1932, 1948), True, 'import numpy as np\n'), ((323, 360), 'numpy.load', 'np.load', (['self.path'], {'allow_pickle': '(True)'}), '(self.path, allow_pickle=True)\n', (330, 360), True, 'import numpy as np\n')] |
import numpy as np
class LU:
matrix = [[]]
L = [[]]
U = [[]]
B = []
def __init__(self, matrix, vector):
self.matrix = matrix
self.B = vector
self.L = [[{True:1, False:0}[x==y] for y in range(len(self.matrix))] for x in range(len(self.matrix))]
def solve(self):
if not len(self.matrix) == len(self.matrix[0]):
return
for index in range(len(self.matrix)-1):
pivote = self.matrix[index][index]
for row in range(index+1, len(self.matrix)):
magic_number = self.matrix[row][index]/pivote
self.L[row][index] = magic_number
self.matrix[row] = [self.matrix[row][x] - magic_number * self.matrix[index][x] for x in range(len(self.matrix))]
Linv = np.linalg.inv(np.array(self.L))
self.Y = Linv.dot(np.array(self.B))
print("the solution is Y = " + str(self.Y))
def printL(self):
print("L:")
for row in self.L:
print(row)
def printMatrix(self):
print("matrix:")
for row in self.matrix:
print(row)
test = LU([[9,1,1],[15,1,1],[1,1,1]],[1,1,1])
test.solve()
test.printL()
test.printMatrix()
test2 = LU([[1,4,-2],[3,-2,5],[2,3,1]] , [3,14,11])
test2.solve()
test2.printL()
test2.printMatrix() | [
"numpy.array"
] | [((813, 829), 'numpy.array', 'np.array', (['self.L'], {}), '(self.L)\n', (821, 829), True, 'import numpy as np\n'), ((857, 873), 'numpy.array', 'np.array', (['self.B'], {}), '(self.B)\n', (865, 873), True, 'import numpy as np\n')] |
#!/usr/bin/python
"""
Calls functions with all available arguments to check whether they still
exist. An error from this file means that the public API has been changed.
"""
import numpy as np
from pylatex import (Document, Section, Math, Table, Figure, Package, TikZ,
Axis, Plot)
from pylatex.command import Command
from pylatex.numpy import Matrix, VectorName
from pylatex.utils import (escape_latex, fix_filename, dumps_list, bold,
italic, verbatim)
# Document
doc = Document(
default_filename='default_filename',
documentclass='article',
fontenc='T1',
inputenc='utf8',
author='',
title='',
date='',
data=None,
maketitle=False
)
doc.append('Some text.')
doc.generate_tex(filename='')
doc.generate_pdf(filename='', clean=True)
# SectionBase
s = Section(title='', numbering=True, data=None)
# Math
m = Math(data=None, inline=False)
# Table
t = Table(table_spec='|c|c|', data=None, pos=None, table_type='tabular')
t.add_hline(start=None, end=None)
t.add_row(cells=(1, 2), escape=False)
t.add_multicolumn(size=2, align='|c|', content='Multicol', cells=None,
escape=False)
t.add_multirow(size=3, align='*', content='Multirow', hlines=True, cells=None,
escape=False)
# Command
c = Command('documentclass', arguments=None, options=None, packages=None)
# Figure
f = Figure(data=None, position=None)
f.add_image(filename='', width=r'0.8\textwidth', placement=r'\centering')
f.add_caption('')
# Numpy
v = VectorName(name='')
M = np.matrix([[2, 3, 4],
[0, 0, 1],
[0, 0, 2]])
m = Matrix(matrix=M, name='', mtype='p', alignment=None)
# Package
p = Package(name='', base='usepackage', options=None)
# PGFPlots
tikz = TikZ(data=None)
a = Axis(data=None, options=None)
p = Plot(name=None, func=None, coordinates=None, options=None)
# Utils
escape_latex(s='')
fix_filename(filename='')
dumps_list(l=[], escape=False, token='\n')
bold(s='')
italic(s='')
verbatim(s='', delimiter='|')
| [
"pylatex.numpy.Matrix",
"pylatex.utils.bold",
"pylatex.Package",
"pylatex.Section",
"pylatex.Document",
"pylatex.Table",
"pylatex.Axis",
"pylatex.utils.escape_latex",
"pylatex.utils.dumps_list",
"pylatex.numpy.VectorName",
"pylatex.Figure",
"pylatex.Plot",
"pylatex.TikZ",
"pylatex.utils.it... | [((534, 701), 'pylatex.Document', 'Document', ([], {'default_filename': '"""default_filename"""', 'documentclass': '"""article"""', 'fontenc': '"""T1"""', 'inputenc': '"""utf8"""', 'author': '""""""', 'title': '""""""', 'date': '""""""', 'data': 'None', 'maketitle': '(False)'}), "(default_filename='default_filename', documentclass='article',\n fontenc='T1', inputenc='utf8', author='', title='', date='', data=None,\n maketitle=False)\n", (542, 701), False, 'from pylatex import Document, Section, Math, Table, Figure, Package, TikZ, Axis, Plot\n'), ((858, 902), 'pylatex.Section', 'Section', ([], {'title': '""""""', 'numbering': '(True)', 'data': 'None'}), "(title='', numbering=True, data=None)\n", (865, 902), False, 'from pylatex import Document, Section, Math, Table, Figure, Package, TikZ, Axis, Plot\n'), ((915, 944), 'pylatex.Math', 'Math', ([], {'data': 'None', 'inline': '(False)'}), '(data=None, inline=False)\n', (919, 944), False, 'from pylatex import Document, Section, Math, Table, Figure, Package, TikZ, Axis, Plot\n'), ((958, 1026), 'pylatex.Table', 'Table', ([], {'table_spec': '"""|c|c|"""', 'data': 'None', 'pos': 'None', 'table_type': '"""tabular"""'}), "(table_spec='|c|c|', data=None, pos=None, table_type='tabular')\n", (963, 1026), False, 'from pylatex import Document, Section, Math, Table, Figure, Package, TikZ, Axis, Plot\n'), ((1348, 1417), 'pylatex.command.Command', 'Command', (['"""documentclass"""'], {'arguments': 'None', 'options': 'None', 'packages': 'None'}), "('documentclass', arguments=None, options=None, packages=None)\n", (1355, 1417), False, 'from pylatex.command import Command\n'), ((1432, 1464), 'pylatex.Figure', 'Figure', ([], {'data': 'None', 'position': 'None'}), '(data=None, position=None)\n', (1438, 1464), False, 'from pylatex import Document, Section, Math, Table, Figure, Package, TikZ, Axis, Plot\n'), ((1572, 1591), 'pylatex.numpy.VectorName', 'VectorName', ([], {'name': '""""""'}), "(name='')\n", (1582, 1591), False, 'from pylatex.numpy import Matrix, VectorName\n'), ((1597, 1641), 'numpy.matrix', 'np.matrix', (['[[2, 3, 4], [0, 0, 1], [0, 0, 2]]'], {}), '([[2, 3, 4], [0, 0, 1], [0, 0, 2]])\n', (1606, 1641), True, 'import numpy as np\n'), ((1676, 1728), 'pylatex.numpy.Matrix', 'Matrix', ([], {'matrix': 'M', 'name': '""""""', 'mtype': '"""p"""', 'alignment': 'None'}), "(matrix=M, name='', mtype='p', alignment=None)\n", (1682, 1728), False, 'from pylatex.numpy import Matrix, VectorName\n'), ((1744, 1793), 'pylatex.Package', 'Package', ([], {'name': '""""""', 'base': '"""usepackage"""', 'options': 'None'}), "(name='', base='usepackage', options=None)\n", (1751, 1793), False, 'from pylatex import Document, Section, Math, Table, Figure, Package, TikZ, Axis, Plot\n'), ((1813, 1828), 'pylatex.TikZ', 'TikZ', ([], {'data': 'None'}), '(data=None)\n', (1817, 1828), False, 'from pylatex import Document, Section, Math, Table, Figure, Package, TikZ, Axis, Plot\n'), ((1835, 1864), 'pylatex.Axis', 'Axis', ([], {'data': 'None', 'options': 'None'}), '(data=None, options=None)\n', (1839, 1864), False, 'from pylatex import Document, Section, Math, Table, Figure, Package, TikZ, Axis, Plot\n'), ((1871, 1929), 'pylatex.Plot', 'Plot', ([], {'name': 'None', 'func': 'None', 'coordinates': 'None', 'options': 'None'}), '(name=None, func=None, coordinates=None, options=None)\n', (1875, 1929), False, 'from pylatex import Document, Section, Math, Table, Figure, Package, TikZ, Axis, Plot\n'), ((1939, 1957), 'pylatex.utils.escape_latex', 'escape_latex', ([], {'s': '""""""'}), "(s='')\n", (1951, 1957), False, 'from pylatex.utils import escape_latex, fix_filename, dumps_list, bold, italic, verbatim\n'), ((1959, 1984), 'pylatex.utils.fix_filename', 'fix_filename', ([], {'filename': '""""""'}), "(filename='')\n", (1971, 1984), False, 'from pylatex.utils import escape_latex, fix_filename, dumps_list, bold, italic, verbatim\n'), ((1986, 2028), 'pylatex.utils.dumps_list', 'dumps_list', ([], {'l': '[]', 'escape': '(False)', 'token': '"""\n"""'}), "(l=[], escape=False, token='\\n')\n", (1996, 2028), False, 'from pylatex.utils import escape_latex, fix_filename, dumps_list, bold, italic, verbatim\n'), ((2030, 2040), 'pylatex.utils.bold', 'bold', ([], {'s': '""""""'}), "(s='')\n", (2034, 2040), False, 'from pylatex.utils import escape_latex, fix_filename, dumps_list, bold, italic, verbatim\n'), ((2042, 2054), 'pylatex.utils.italic', 'italic', ([], {'s': '""""""'}), "(s='')\n", (2048, 2054), False, 'from pylatex.utils import escape_latex, fix_filename, dumps_list, bold, italic, verbatim\n'), ((2056, 2085), 'pylatex.utils.verbatim', 'verbatim', ([], {'s': '""""""', 'delimiter': '"""|"""'}), "(s='', delimiter='|')\n", (2064, 2085), False, 'from pylatex.utils import escape_latex, fix_filename, dumps_list, bold, italic, verbatim\n')] |
"""This class read EFM model files
"""
import os
import warnings
import numpy as np
from util import load_dict
class EFMReader:
def __init__(
self,
input_path=None,
alpha=0.85,
num_most_cared_aspects=15,
rating_scale=5,
verbose=False,
):
self.uid_map = load_dict(os.path.join(input_path, "uid_map"), sep=",")
self.iid_map = load_dict(os.path.join(input_path, "iid_map"), sep=",")
self.aspect_id_map = load_dict(
os.path.join(input_path, "aspect_id_map"), sep=","
)
self.U1 = np.load(os.path.join(input_path, "U1.npy"))
self.U2 = np.load(os.path.join(input_path, "U2.npy"))
self.V = np.load(os.path.join(input_path, "V.npy"))
self.H1 = np.load(os.path.join(input_path, "H1.npy"))
self.H2 = np.load(os.path.join(input_path, "H2.npy"))
self.alpha = alpha
self.n_cared_aspects = num_most_cared_aspects
self.rating_scale = rating_scale
self.id2aspect = {v: k for k, v in self.aspect_id_map.items()}
self.verbose = verbose
if self.verbose:
print("Load EFM from %s" % input_path)
@property
def num_items(self):
return len(self.iid_map)
@property
def num_users(self):
return len(self.uid_map)
@property
def raw_uid_map(self):
return {v: k for k, v in self.uid_map.items()}
@property
def raw_iid_map(self):
return {v: k for k, v in self.iid_map.items()}
@property
def raw_aspect_id_map(self):
return {v: k for k, v in self.aspect_id_map.items()}
def get_aspect_quality(self, raw_iid, aspect):
iid = self.iid_map.get(raw_iid)
aspect_id = self.aspect_id_map.get(aspect)
return self.U2[iid, :].dot(self.V[aspect_id, :])
def get_aspect_vector(self, raw_uid, raw_iid):
uid = self.uid_map.get(raw_uid)
iid = self.iid_map.get(raw_iid)
return self.U1[uid, :].dot(self.V.T) * self.U2[iid, :].dot(self.V.T)
def get_aspect_score(self, raw_uid, raw_iid, aspect):
uid = self.uid_map.get(raw_uid)
iid = self.iid_map.get(raw_iid)
aspect_id = self.aspect_id_map.get(aspect)
if uid is None or iid is None or aspect_id is None:
warnings.warn(
"Aspect sentiment score is not available for "
+ "user=[%s], item=[%s], aspect=[%s], this function will return 0.0"
% (raw_uid, raw_iid, aspect)
)
return 0.0
return self.U1[uid, :].dot(self.V[aspect_id, :]) * self.U2[iid, :].dot(
self.V[aspect_id, :]
)
def get_most_cared_aspect_ids(self, raw_uid):
uid = self.uid_map.get(raw_uid)
X_ = self.U1[uid, :].dot(self.V.T)
return (-X_).argsort()[: self.n_cared_aspects]
def get_most_cared_aspects(self, raw_uid):
return [
self.id2aspect.get(aid) for aid in self.get_most_cared_aspect_ids(raw_uid)
]
def is_unk_user(self, raw_uid):
return self.get_uid(raw_uid) is None
def is_unk_item(self, raw_iid):
return self.get_iid(raw_iid) is None
def get_uid(self, raw_uid):
return self.uid_map.get(raw_uid, None)
def get_iid(self, raw_iid):
return self.iid_map.get(raw_iid, None)
def rank(self, raw_uid, raw_iids=[]):
mapped_iids = []
for raw_iid in raw_iids:
if self.is_unk_item(raw_iid):
continue
mapped_iids.append(self.get_iid(raw_iid))
mapped_iids = np.array(mapped_iids)
mapped_uid = self.get_uid(raw_uid)
X_ = self.U1[mapped_uid, :].dot(self.V.T)
aspect_ids = (-X_).argsort()[: self.n_cared_aspects]
most_cared_X_ = X_[aspect_ids]
most_cared_Y_ = self.U2[mapped_iids, :].dot(self.V[aspect_ids, :].T)
explicit_scores = most_cared_X_.dot(most_cared_Y_.T) / (
self.n_cared_aspects * self.rating_scale
)
rating_scores = self.U1[mapped_uid, :].dot(self.U2[mapped_iids, :].T) + self.H1[
mapped_uid, :
].dot(self.H2[mapped_iids, :].T)
ranking_scores = self.alpha * explicit_scores + (1 - self.alpha) * rating_scores
return mapped_iids[ranking_scores.argsort()[::-1]]
def get_top_k_ranked_items(self, raw_uid, raw_iids=[], top_k=10):
return self.rank(raw_uid, raw_iids)[:top_k]
| [
"warnings.warn",
"numpy.array",
"os.path.join"
] | [((3585, 3606), 'numpy.array', 'np.array', (['mapped_iids'], {}), '(mapped_iids)\n', (3593, 3606), True, 'import numpy as np\n'), ((332, 367), 'os.path.join', 'os.path.join', (['input_path', '"""uid_map"""'], {}), "(input_path, 'uid_map')\n", (344, 367), False, 'import os\n'), ((411, 446), 'os.path.join', 'os.path.join', (['input_path', '"""iid_map"""'], {}), "(input_path, 'iid_map')\n", (423, 446), False, 'import os\n'), ((509, 550), 'os.path.join', 'os.path.join', (['input_path', '"""aspect_id_map"""'], {}), "(input_path, 'aspect_id_map')\n", (521, 550), False, 'import os\n'), ((596, 630), 'os.path.join', 'os.path.join', (['input_path', '"""U1.npy"""'], {}), "(input_path, 'U1.npy')\n", (608, 630), False, 'import os\n'), ((658, 692), 'os.path.join', 'os.path.join', (['input_path', '"""U2.npy"""'], {}), "(input_path, 'U2.npy')\n", (670, 692), False, 'import os\n'), ((719, 752), 'os.path.join', 'os.path.join', (['input_path', '"""V.npy"""'], {}), "(input_path, 'V.npy')\n", (731, 752), False, 'import os\n'), ((780, 814), 'os.path.join', 'os.path.join', (['input_path', '"""H1.npy"""'], {}), "(input_path, 'H1.npy')\n", (792, 814), False, 'import os\n'), ((842, 876), 'os.path.join', 'os.path.join', (['input_path', '"""H2.npy"""'], {}), "(input_path, 'H2.npy')\n", (854, 876), False, 'import os\n'), ((2298, 2467), 'warnings.warn', 'warnings.warn', (["('Aspect sentiment score is not available for ' + \n 'user=[%s], item=[%s], aspect=[%s], this function will return 0.0' % (\n raw_uid, raw_iid, aspect))"], {}), "('Aspect sentiment score is not available for ' + \n 'user=[%s], item=[%s], aspect=[%s], this function will return 0.0' % (\n raw_uid, raw_iid, aspect))\n", (2311, 2467), False, 'import warnings\n')] |
"""A filtered-reference Least-Mean-Square (FxLMS) filter."""
import numpy as np
import matplotlib.pyplot as plt
from adafilt import FastBlockLMSFilter, FIRFilter
from adafilt.io import FakeInterface
from adafilt.utils import wgn
import warnings
warnings.filterwarnings(action="error", category=np.ComplexWarning)
def moving_rms(x, N):
return np.sqrt(np.convolve(x ** 2, np.ones((N,)) / N, mode="valid"))
length = 64 # number of adaptive FIR filter taps
blocklength = 4 # length of I/O buffer and blocksize of filter
n_buffers = 10000 # size of simulation
estimation_phase = 2000
# primary and secondary paths
h_pri = np.zeros(64)
h_pri[60] = 1
h_sec = np.zeros(64)
h_sec[20] = 1
# simulates an audio interface with primary and secondary paths and 40 dB SNR noise
# at the error sensor
signal = np.random.normal(0, 1, size=n_buffers * blocklength)
sim = FakeInterface(
blocklength, signal, h_pri=h_pri, h_sec=h_sec, noise=wgn(signal, 20, "dB")
)
# the adaptive filter
filt = FastBlockLMSFilter(
length, blocklength, stepsize=0.01, leakage=0.99999, power_averaging=0.9
)
filt.locked = True
# secondary path estimate has to account for block size
plant_model = FIRFilter(np.zeros(blocklength + length))
# adaptive plant model
adaptive_plant_model = FastBlockLMSFilter(
length, blocklength, stepsize=0.01, leakage=0.99999
)
# aggregate signals during simulation
elog = []
e_plog = []
wlog = []
ylog = []
glog = []
ulog = []
dlog = []
fxlog = []
y = np.zeros(blocklength) # control signal is zero for first block
for i in range(n_buffers):
# identification noise
if i < estimation_phase:
v = np.random.normal(0, 1, blocklength)
else:
v = np.random.normal(0, 0.01, blocklength)
adaptive_plant_model.stepsize = 0.0001
# record reference signal x and error signal e while playing back y
x, e, u, d = sim.playrec(- y + v)
# adaptive plant model prediction
y_p = adaptive_plant_model.filt(v)
# plant estimation error
e_p = e - y_p
# adapt plant model
adaptive_plant_model.adapt(v, e_p)
# copy plant model
plant_model.w[blocklength:] = adaptive_plant_model.w
# filter the reference signal
fx = plant_model(x)
if i >= estimation_phase:
# adapt filter
filt.adapt(fx, e)
# filter
y = filt.filt(x)
ulog.append(u)
dlog.append(d)
elog.append(e)
fxlog.append(fx)
e_plog.append(e_p)
ylog.append(y)
wlog.append(filt.w.copy())
glog.append(adaptive_plant_model.w.copy())
fig, ax = plt.subplots(ncols=2, nrows=3, figsize=(14, 8), constrained_layout=True)
ax = ax.flatten()
ax[0].set_title("Signals")
ax[0].plot(np.concatenate(ylog), label="y", alpha=0.8)
ax[0].plot(np.concatenate(elog), label="Signal at error mic: e", alpha=0.7)
ax[0].set_xlabel("Sample")
ax[0].legend()
ax[1].set_title("Filter weights")
ax[1].plot(glog, "--")
ax[1].plot(wlog, "-")
ax[1].set_xlabel("Block")
ax[2].set_title("Error Signals")
# ax[2].plot(10 * np.log10(moving_rms(np.concatenate(elog), 512) ** 2), label="e")
# ax[2].plot(10 * np.log10(moving_rms(np.concatenate(e_plog), 512) ** 2), label="e_p")
ax[2].plot(moving_rms(np.concatenate(elog), 512) ** 2, label="e")
ax[2].plot(moving_rms(np.concatenate(e_plog), 512) ** 2, label="e_p")
ax[2].set_xlabel("Sample")
ax[2].set_ylabel("Error [dB]")
ax[2].legend()
# the optimal filter
wopt = -np.fft.irfft(np.fft.rfft(h_pri) / np.fft.rfft(np.roll(h_sec, blocklength)))
ax[3].set_title("Final filter")
ax[3].plot(-filt.w, "x", label="control filter")
ax[3].plot(wopt, "+", label="optimal filter")
ax[3].plot(plant_model.w[blocklength:], "o", label="plant model")
ax[3].plot(h_sec, label="plant")
ax[3].set_xlabel("Tap")
ax[3].legend()
ax[4].set_title("Filtered reference and primary disturbance")
ax[4].plot(np.concatenate(dlog), label="d", alpha=0.7)
ax[4].plot(np.concatenate(fxlog), label="fx", alpha=0.8)
ax[4].set_xlabel("Sample")
ax[4].legend()
pri_path_error = np.sum((wopt + wlog) ** 2, axis=1) / np.sum((wopt) ** 2)
sec_path_error = np.sum((h_sec - glog) ** 2, axis=1) / np.sum((h_sec) ** 2)
ax[5].set_title("Filtered reference and primary disturbance")
ax[5].plot(
10 * np.log10(pri_path_error), label="primary path estimation error", alpha=0.7
)
ax[5].plot(
10 * np.log10(sec_path_error), label="secondary path estimation error", alpha=0.7
)
ax[5].set_xlabel("Sample")
ax[5].legend()
plt.show()
| [
"numpy.random.normal",
"warnings.filterwarnings",
"numpy.log10",
"numpy.roll",
"numpy.ones",
"adafilt.utils.wgn",
"numpy.sum",
"numpy.zeros",
"numpy.fft.rfft",
"numpy.concatenate",
"adafilt.FastBlockLMSFilter",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((248, 315), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""error"""', 'category': 'np.ComplexWarning'}), "(action='error', category=np.ComplexWarning)\n", (271, 315), False, 'import warnings\n'), ((630, 642), 'numpy.zeros', 'np.zeros', (['(64)'], {}), '(64)\n', (638, 642), True, 'import numpy as np\n'), ((665, 677), 'numpy.zeros', 'np.zeros', (['(64)'], {}), '(64)\n', (673, 677), True, 'import numpy as np\n'), ((808, 860), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(n_buffers * blocklength)'}), '(0, 1, size=n_buffers * blocklength)\n', (824, 860), True, 'import numpy as np\n'), ((993, 1089), 'adafilt.FastBlockLMSFilter', 'FastBlockLMSFilter', (['length', 'blocklength'], {'stepsize': '(0.01)', 'leakage': '(0.99999)', 'power_averaging': '(0.9)'}), '(length, blocklength, stepsize=0.01, leakage=0.99999,\n power_averaging=0.9)\n', (1011, 1089), False, 'from adafilt import FastBlockLMSFilter, FIRFilter\n'), ((1271, 1342), 'adafilt.FastBlockLMSFilter', 'FastBlockLMSFilter', (['length', 'blocklength'], {'stepsize': '(0.01)', 'leakage': '(0.99999)'}), '(length, blocklength, stepsize=0.01, leakage=0.99999)\n', (1289, 1342), False, 'from adafilt import FastBlockLMSFilter, FIRFilter\n'), ((1476, 1497), 'numpy.zeros', 'np.zeros', (['blocklength'], {}), '(blocklength)\n', (1484, 1497), True, 'import numpy as np\n'), ((2549, 2621), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'nrows': '(3)', 'figsize': '(14, 8)', 'constrained_layout': '(True)'}), '(ncols=2, nrows=3, figsize=(14, 8), constrained_layout=True)\n', (2561, 2621), True, 'import matplotlib.pyplot as plt\n'), ((4404, 4414), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4412, 4414), True, 'import matplotlib.pyplot as plt\n'), ((1192, 1222), 'numpy.zeros', 'np.zeros', (['(blocklength + length)'], {}), '(blocklength + length)\n', (1200, 1222), True, 'import numpy as np\n'), ((2680, 2700), 'numpy.concatenate', 'np.concatenate', (['ylog'], {}), '(ylog)\n', (2694, 2700), True, 'import numpy as np\n'), ((2735, 2755), 'numpy.concatenate', 'np.concatenate', (['elog'], {}), '(elog)\n', (2749, 2755), True, 'import numpy as np\n'), ((3807, 3827), 'numpy.concatenate', 'np.concatenate', (['dlog'], {}), '(dlog)\n', (3821, 3827), True, 'import numpy as np\n'), ((3862, 3883), 'numpy.concatenate', 'np.concatenate', (['fxlog'], {}), '(fxlog)\n', (3876, 3883), True, 'import numpy as np\n'), ((3968, 4002), 'numpy.sum', 'np.sum', (['((wopt + wlog) ** 2)'], {'axis': '(1)'}), '((wopt + wlog) ** 2, axis=1)\n', (3974, 4002), True, 'import numpy as np\n'), ((4005, 4022), 'numpy.sum', 'np.sum', (['(wopt ** 2)'], {}), '(wopt ** 2)\n', (4011, 4022), True, 'import numpy as np\n'), ((4042, 4077), 'numpy.sum', 'np.sum', (['((h_sec - glog) ** 2)'], {'axis': '(1)'}), '((h_sec - glog) ** 2, axis=1)\n', (4048, 4077), True, 'import numpy as np\n'), ((4080, 4098), 'numpy.sum', 'np.sum', (['(h_sec ** 2)'], {}), '(h_sec ** 2)\n', (4086, 4098), True, 'import numpy as np\n'), ((939, 960), 'adafilt.utils.wgn', 'wgn', (['signal', '(20)', '"""dB"""'], {}), "(signal, 20, 'dB')\n", (942, 960), False, 'from adafilt.utils import wgn\n'), ((1636, 1671), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'blocklength'], {}), '(0, 1, blocklength)\n', (1652, 1671), True, 'import numpy as np\n'), ((1694, 1732), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.01)', 'blocklength'], {}), '(0, 0.01, blocklength)\n', (1710, 1732), True, 'import numpy as np\n'), ((4184, 4208), 'numpy.log10', 'np.log10', (['pri_path_error'], {}), '(pri_path_error)\n', (4192, 4208), True, 'import numpy as np\n'), ((4282, 4306), 'numpy.log10', 'np.log10', (['sec_path_error'], {}), '(sec_path_error)\n', (4290, 4306), True, 'import numpy as np\n'), ((3174, 3194), 'numpy.concatenate', 'np.concatenate', (['elog'], {}), '(elog)\n', (3188, 3194), True, 'import numpy as np\n'), ((3240, 3262), 'numpy.concatenate', 'np.concatenate', (['e_plog'], {}), '(e_plog)\n', (3254, 3262), True, 'import numpy as np\n'), ((3404, 3422), 'numpy.fft.rfft', 'np.fft.rfft', (['h_pri'], {}), '(h_pri)\n', (3415, 3422), True, 'import numpy as np\n'), ((378, 391), 'numpy.ones', 'np.ones', (['(N,)'], {}), '((N,))\n', (385, 391), True, 'import numpy as np\n'), ((3437, 3464), 'numpy.roll', 'np.roll', (['h_sec', 'blocklength'], {}), '(h_sec, blocklength)\n', (3444, 3464), True, 'import numpy as np\n')] |
'''
Copyright (c) 2020 Fractus IT d.o.o. <http://fractus.io>
'''
import keras
import numpy as np
from keras.applications import vgg16, resnet50, mobilenet, inception_v3
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.imagenet_utils import decode_predictions
from keras.applications import imagenet_utils
from keras.applications.mobilenet import preprocess_input
from keras.applications.inception_v3 import preprocess_input
import argparse,textwrap
'''
script shows how to use pretrained Keras models in order to classify image
'''
def main():
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(description='Please specify model.',
usage='use "python %(prog)s --help" for more information',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-m',
'--model',
required=True,
help= textwrap.dedent('''\
Possible MODELS:
vgg
inception
resnet
mobilenet
'''))
args = vars(parser.parse_args())
model_name = args["model"]
if model_name == 'vgg':
classify_image_with_vgg()
elif model_name == 'inception':
classify_image_with_inception()
elif model_name == 'resnet':
classify_with_resnet()
elif model_name == 'mobilenet':
classify_image_with_mobilenet()
def classify_image_with_vgg():
model = vgg16.VGG16(weights="imagenet")
print(model.summary())
image = load_image()
image = vgg16.preprocess_input(image)
predictions = model.predict(image)
print(decode_predictions(predictions))
def classify_image_with_inception():
model = inception_v3.InceptionV3(weights='imagenet')
print(model.summary())
image = load_image()
image = inception_v3.preprocess_input(image)
predicts = model.predict(image)
print(imagenet_utils.decode_predictions(predicts))
def classify_with_resnet():
model = resnet50.ResNet50(weights='imagenet')
print(model.summary())
image = load_image()
image = imagenet_utils.preprocess_input(image)
predicts = model.predict(image)
print(imagenet_utils.decode_predictions(predicts))
def classify_image_with_mobilenet():
model = mobilenet.MobileNet(weights='imagenet')
print(model.summary())
image = load_image()
image = mobilenet.preprocess_input(image)
predicts = model.predict(image)
print(imagenet_utils.decode_predictions(predicts))
def load_image():
filename = 'cat.jpg'
image = load_img(filename, target_size=(224, 224))
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
return image
if __name__ == '__main__':
main()
| [
"keras.preprocessing.image.img_to_array",
"textwrap.dedent",
"keras.applications.vgg16.VGG16",
"argparse.ArgumentParser",
"keras.applications.inception_v3.preprocess_input",
"keras.preprocessing.image.load_img",
"keras.applications.mobilenet.preprocess_input",
"keras.applications.vgg16.preprocess_inpu... | [((659, 684), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (682, 684), False, 'import argparse, textwrap\n'), ((701, 877), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Please specify model."""', 'usage': '"""use "python %(prog)s --help" for more information"""', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=\'Please specify model.\', usage=\n \'use "python %(prog)s --help" for more information\', formatter_class=\n argparse.RawTextHelpFormatter)\n', (724, 877), False, 'import argparse, textwrap\n'), ((1752, 1783), 'keras.applications.vgg16.VGG16', 'vgg16.VGG16', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (1763, 1783), False, 'from keras.applications import vgg16, resnet50, mobilenet, inception_v3\n'), ((1853, 1882), 'keras.applications.vgg16.preprocess_input', 'vgg16.preprocess_input', (['image'], {}), '(image)\n', (1875, 1882), False, 'from keras.applications import vgg16, resnet50, mobilenet, inception_v3\n'), ((2028, 2072), 'keras.applications.inception_v3.InceptionV3', 'inception_v3.InceptionV3', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (2052, 2072), False, 'from keras.applications import vgg16, resnet50, mobilenet, inception_v3\n'), ((2142, 2178), 'keras.applications.inception_v3.preprocess_input', 'inception_v3.preprocess_input', (['image'], {}), '(image)\n', (2171, 2178), False, 'from keras.applications import vgg16, resnet50, mobilenet, inception_v3\n'), ((2324, 2361), 'keras.applications.resnet50.ResNet50', 'resnet50.ResNet50', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (2341, 2361), False, 'from keras.applications import vgg16, resnet50, mobilenet, inception_v3\n'), ((2431, 2469), 'keras.applications.imagenet_utils.preprocess_input', 'imagenet_utils.preprocess_input', (['image'], {}), '(image)\n', (2462, 2469), False, 'from keras.applications import imagenet_utils\n'), ((2622, 2661), 'keras.applications.mobilenet.MobileNet', 'mobilenet.MobileNet', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (2641, 2661), False, 'from keras.applications import vgg16, resnet50, mobilenet, inception_v3\n'), ((2731, 2764), 'keras.applications.mobilenet.preprocess_input', 'mobilenet.preprocess_input', (['image'], {}), '(image)\n', (2757, 2764), False, 'from keras.applications import vgg16, resnet50, mobilenet, inception_v3\n'), ((2926, 2968), 'keras.preprocessing.image.load_img', 'load_img', (['filename'], {'target_size': '(224, 224)'}), '(filename, target_size=(224, 224))\n', (2934, 2968), False, 'from keras.preprocessing.image import load_img\n'), ((2982, 3001), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (2994, 3001), False, 'from keras.preprocessing.image import img_to_array\n'), ((3015, 3044), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (3029, 3044), True, 'import numpy as np\n'), ((1940, 1971), 'keras.applications.imagenet_utils.decode_predictions', 'decode_predictions', (['predictions'], {}), '(predictions)\n', (1958, 1971), False, 'from keras.applications.imagenet_utils import decode_predictions\n'), ((2231, 2274), 'keras.applications.imagenet_utils.decode_predictions', 'imagenet_utils.decode_predictions', (['predicts'], {}), '(predicts)\n', (2264, 2274), False, 'from keras.applications import imagenet_utils\n'), ((2522, 2565), 'keras.applications.imagenet_utils.decode_predictions', 'imagenet_utils.decode_predictions', (['predicts'], {}), '(predicts)\n', (2555, 2565), False, 'from keras.applications import imagenet_utils\n'), ((2817, 2860), 'keras.applications.imagenet_utils.decode_predictions', 'imagenet_utils.decode_predictions', (['predicts'], {}), '(predicts)\n', (2850, 2860), False, 'from keras.applications import imagenet_utils\n'), ((1086, 1335), 'textwrap.dedent', 'textwrap.dedent', (['""" Possible MODELS:\n vgg\n inception\n resnet\n mobilenet \n """'], {}), '(\n """ Possible MODELS:\n vgg\n inception\n resnet\n mobilenet \n """\n )\n', (1101, 1335), False, 'import argparse, textwrap\n')] |
"""utils for interpreting variant effect prediction for Heritability
"""
import gzip
import os
import sys
from collections import defaultdict
import h5py
import numpy as np
import pandas as pd
def read_vep(vep_dir, check_sanity=False):
_label_fn = [x for x in os.listdir(vep_dir) if x.endswith("_row_labels.txt")]
_data_fn = [x for x in os.listdir(vep_dir) if x.endswith("_abs_diffs.h5")]
assert len(_label_fn) == len(
_data_fn) == 1, "Each folder must have exact one row_labels and one abs_diffs file; found %i row_labels and " \
"%i abs_diffs" % (len(_label_fn), len(_data_fn))
label_fn = os.path.join(vep_dir, _label_fn[0])
data_fn = os.path.join(vep_dir, _data_fn[0])
vep_df = pd.read_csv(label_fn, sep='\t')
data_fh = h5py.File(data_fn, 'r')
try:
vep_data = data_fh['data'].value
except:
print("read in h5 file failed")
sys.exit(250)
if check_sanity:
assert vep_data.shape[0] == np.sum(vep_df['ref_match'])
return vep_df, vep_data
def read_vep_logfc(vep_dir):
_label_fn = [x for x in os.listdir(vep_dir) if x.endswith("_row_labels.txt")]
_data_fn = [x for x in os.listdir(vep_dir) if x.endswith("_abs_logfc.npz")]
_data_fn1 = [x for x in os.listdir(vep_dir) if x.endswith("ref_predictions.h5")]
_data_fn2 = [x for x in os.listdir(vep_dir) if x.endswith("alt_predictions.h5")]
label_fn = os.path.join(vep_dir, _label_fn[0])
vep_df = pd.read_csv(label_fn, sep='\t')
if len(_data_fn):
assert len(_data_fn) == 1
vep_data = np.load(os.path.join(vep_dir, _data_fn[0]))['arr_0']
else:
assert len(_label_fn) == len(_data_fn1) == len(
_data_fn2) == 1, "Each folder must have exact one row_labels and one abs_diffs file; found %i row_labels " \
"and %i, %i abs_diffs" % ( len(_label_fn), len(_data_fn1), len(_data_fn2))
data_fn1 = os.path.join(vep_dir, _data_fn1[0])
data_fn2 = os.path.join(vep_dir, _data_fn2[0])
data_fh1 = h5py.File(data_fn1, 'r')
data_fh2 = h5py.File(data_fn2, 'r')
try:
vep_data1 = data_fh1['data'].value
vep_data2 = data_fh2['data'].value
except:
print("read in h5 file failed")
sys.exit(250)
vep_data1 = np.clip(vep_data1, 0.0001, 0.9999)
vep_data2 = np.clip(vep_data2, 0.0001, 0.9999)
vep_data = np.abs(np.log(vep_data1 / (1 - vep_data1)) - np.log(vep_data2 / (1 - vep_data2)))
colmax = np.apply_along_axis(np.max, 0, vep_data) # vep_data is lower-bounded by 0
vep_data /= colmax
np.savez(os.path.join(vep_dir, "VEP_abs_logfc.npz"), vep_data)
return vep_df, vep_data
def convert_to_ldsc_annot_by_label(vep_df, vep_data, label_fp, baselineLD_dir, output_dir, resume_prev_run=False):
"""read in the h5 vep data snp annot and numerical values, convert to
the existing baselineLD annotations for next steps
"""
baselineLDs = [x for x in os.listdir(baselineLD_dir) if x.endswith("annot.gz")]
# label_df is annotation for output chromatin features
label_df = pd.read_table(label_fp)
# vep_dict is a mapping from chrom,bp to vep_data row index
vep_dict = defaultdict(list)
print('making vep mapper..')
for i in range(vep_df.shape[0]):
vep_dict[(vep_df.chrom[i], str(vep_df.pos[i]))].append(i)
# iterate through each labels in label_df, make an independent ldsc-annot
for k in range(label_df.shape[0]):
label_idx = label_df['label_idx'][k]
label_name = label_df['label_name'][k]
# normalize label names
label_name = label_name.replace('|', '--')
label_name = label_name.replace('(', '_').replace(')', '_')
label_output_dir = os.path.join(output_dir, label_name)
os.makedirs(label_output_dir, exist_ok=True)
print("%i/%i %s" % (k, label_df.shape[0], label_name))
for chrom_fn in baselineLDs:
chrom = chrom_fn.split(".")[-3]
print(chrom)
if resume_prev_run and os.path.isfile(
os.path.join(label_output_dir, "%s.%s.annot.gz" % (label_name, chrom))):
print("found %s, skip" % chrom)
continue
with gzip.GzipFile(os.path.join(baselineLD_dir, chrom_fn), 'rb') as fi, gzip.GzipFile(
os.path.join(label_output_dir, "%s.%s.annot.gz" % (label_name, chrom)), 'wb') as fo:
fi.readline() # pop first line
fo.write(("\t".join(['CHR', 'BP', 'SNP', 'CM', label_name]) + '\n').encode('utf-8'))
# for line in tqdm(fi):
for line in fi:
line = line.decode('utf-8')
ele = line.strip().split()
_chr, _bp, _snp, _cm = ele[0:4]
# _bp = str(int(_bp) - 1)
# _annot_idx = np.where(label_df.eval("pos==%s & chrom=='chr%s'"%(_bp, _chr)))[0]
_annot_idx = vep_dict[("chr%s" % _chr, _bp)]
if len(_annot_idx) == 0:
# this is less than 0.5% - ignored
# warnings.warn("baselineLD variant not found in vep: %s,%s"%(_chr, _bp))
# continue
_annot = "0"
else:
_annot = "%.5f" % np.max(vep_data[_annot_idx, label_idx])
fo.write(("\t".join([
_chr,
_bp,
_snp,
_cm,
_annot]) + '\n').encode('utf-8')
)
def make_vep_mapper(vep_df):
# vep_dict is a mapping from chrom,bp to vep_data row index
vep_dict = defaultdict(list)
print('making vep mapper..')
for i in range(vep_df.shape[0]):
vep_dict[(vep_df.chrom[i], str(vep_df.pos[i]))].append(i)
return vep_dict
def convert_to_ldsc_annot(vep_dir, label_fp, baselineLD_dir, output_dir, chroms_part=None, use_temp=None,
use_logfc=False):
"""read in the h5 vep data snp annot and numerical values, convert to
the existing baselineLD annotations based on a set of baselineLD SNPs
"""
if use_logfc:
vep_df, vep_data = read_vep_logfc(vep_dir)
else:
vep_df, vep_data = read_vep(vep_dir, check_sanity=False)
chroms_target = chroms_part.split(',')
baselineLDs = [x for x in os.listdir(baselineLD_dir) if x.endswith("annot.gz")]
# label_df is annotation for output chromatin features
label_df = pd.read_table(label_fp)
# vep_dict is a mapping from chrom,bp to vep_data row index
vep_dict = defaultdict(list)
print('making vep mapper..')
for i in range(vep_df.shape[0]):
if vep_df.chrom[i].strip('chr') not in chroms_target:
continue
vep_dict[(vep_df.chrom[i], str(vep_df.pos[i]))].append(i)
print("done")
# iterate through each labels in label_df, make a joint ldsc-annot
label_names = []
for k in range(label_df.shape[0]):
label_idx = label_df['label_idx'][k]
label_name = label_df['label_name'][k]
# DO NOT RUN: label names should already
# be normalized
# normalize label names
# label_name = label_name.replace('|', '--')
# label_name = label_name.replace('(', '_').replace(')', '_')
# label_name = label_name.replace('+', '_').replace(' ','')
label_names.append(label_name)
num_labels = len(label_names)
assert num_labels == vep_data.shape[1]
for chrom_fn in baselineLDs:
chrom = chrom_fn.split(".")[-3]
if not chrom in chroms_target:
continue
print(chrom)
if use_temp:
fo = open(os.path.join(use_temp, "joint.%s.annot" % (chrom)), 'w')
else:
fo = open(os.path.join(output_dir, "joint.%s.annot" % (chrom)), 'w')
with gzip.GzipFile(os.path.join(baselineLD_dir, chrom_fn), 'rb') as fi:
fi.readline() # pop first line
fo.write(("\t".join(['CHR', 'BP', 'SNP', 'CM'] + label_names) + '\n'))
# for line in tqdm(fi):
counter = 0
for line in fi:
if counter % 100000 == 0:
print("processed %i" % counter)
counter += 1
line = line.decode('utf-8')
ele = line.strip().split()
_chr, _bp, _snp, _cm = ele[0:4]
_annot_idx = vep_dict[("chr%s" % _chr, _bp)]
if len(_annot_idx) == 0:
# this is less than 0.5% - ignored
# warnings.warn("baselineLD variant not found in vep: %s,%s"%(_chr, _bp))
# continue
_annot = ["0"] * num_labels
else:
# _annot = ["%.5f"%np.mean(vep_data[_annot_idx, label_idx]) for label_idx in range(num_labels)]
if len(_annot_idx) > 1:
_annot = np.apply_along_axis(np.mean, 0, vep_data[_annot_idx, :]).flatten()
else:
_annot = vep_data[_annot_idx, :].flatten()
_annot = ["%.5f" % x for x in _annot]
fo.write(("\t".join([
_chr,
_bp,
_snp,
_cm,
] + _annot) + '\n')
)
fo.close()
def split_labels_to_folders(l2_prefix, output_dir):
l2_df = pd.read_csv(l2_prefix + '.l2.ldscore.gz', sep="\t")
M_df = pd.read_csv(l2_prefix + '.l2.M', sep="\t", header=None)
M_5_50_df = pd.read_csv(l2_prefix + ".l2.M_5_50", header=None, sep="\t")
annot_df = pd.read_csv(l2_prefix + ".annot", sep="\t")
chrom = l2_prefix.split('.')[-1]
for i in range(3, l2_df.shape[1]):
label_name = l2_df.columns.values[i]
label_folder = os.path.join(output_dir, label_name)
os.makedirs(label_folder, exist_ok=True)
l2_df.iloc[:, [0, 1, 2, i]].to_csv(
os.path.join(label_folder, "%s.%s.l2.ldscore.gz" % (label_name, chrom)),
index=False,
sep="\t")
M_df.iloc[0, [i - 3]].to_csv(os.path.join(label_folder, "%s.%s.l2.M" % (label_name, chrom)), index=False,
header=False)
M_5_50_df.iloc[0, [i - 3]].to_csv(os.path.join(label_folder, "%s.%s.l2.M_5_50" % (label_name, chrom)),
index=False, header=False)
annot_df.iloc[:, [0, 1, 2, 3, i + 1]].to_csv(os.path.join(label_folder, "%s.%s.annot.gz" % (label_name, chrom)),
sep="\t", index=False, header=True)
| [
"numpy.clip",
"os.listdir",
"pandas.read_csv",
"os.makedirs",
"numpy.log",
"os.path.join",
"h5py.File",
"numpy.max",
"numpy.sum",
"numpy.apply_along_axis",
"collections.defaultdict",
"pandas.read_table",
"sys.exit"
] | [((643, 678), 'os.path.join', 'os.path.join', (['vep_dir', '_label_fn[0]'], {}), '(vep_dir, _label_fn[0])\n', (655, 678), False, 'import os\n'), ((693, 727), 'os.path.join', 'os.path.join', (['vep_dir', '_data_fn[0]'], {}), '(vep_dir, _data_fn[0])\n', (705, 727), False, 'import os\n'), ((741, 772), 'pandas.read_csv', 'pd.read_csv', (['label_fn'], {'sep': '"""\t"""'}), "(label_fn, sep='\\t')\n", (752, 772), True, 'import pandas as pd\n'), ((787, 810), 'h5py.File', 'h5py.File', (['data_fn', '"""r"""'], {}), "(data_fn, 'r')\n", (796, 810), False, 'import h5py\n'), ((1427, 1462), 'os.path.join', 'os.path.join', (['vep_dir', '_label_fn[0]'], {}), '(vep_dir, _label_fn[0])\n', (1439, 1462), False, 'import os\n'), ((1476, 1507), 'pandas.read_csv', 'pd.read_csv', (['label_fn'], {'sep': '"""\t"""'}), "(label_fn, sep='\\t')\n", (1487, 1507), True, 'import pandas as pd\n'), ((3161, 3184), 'pandas.read_table', 'pd.read_table', (['label_fp'], {}), '(label_fp)\n', (3174, 3184), True, 'import pandas as pd\n'), ((3264, 3281), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3275, 3281), False, 'from collections import defaultdict\n'), ((5807, 5824), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5818, 5824), False, 'from collections import defaultdict\n'), ((6634, 6657), 'pandas.read_table', 'pd.read_table', (['label_fp'], {}), '(label_fp)\n', (6647, 6657), True, 'import pandas as pd\n'), ((6737, 6754), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6748, 6754), False, 'from collections import defaultdict\n'), ((9684, 9735), 'pandas.read_csv', 'pd.read_csv', (["(l2_prefix + '.l2.ldscore.gz')"], {'sep': '"""\t"""'}), "(l2_prefix + '.l2.ldscore.gz', sep='\\t')\n", (9695, 9735), True, 'import pandas as pd\n'), ((9747, 9802), 'pandas.read_csv', 'pd.read_csv', (["(l2_prefix + '.l2.M')"], {'sep': '"""\t"""', 'header': 'None'}), "(l2_prefix + '.l2.M', sep='\\t', header=None)\n", (9758, 9802), True, 'import pandas as pd\n'), ((9819, 9879), 'pandas.read_csv', 'pd.read_csv', (["(l2_prefix + '.l2.M_5_50')"], {'header': 'None', 'sep': '"""\t"""'}), "(l2_prefix + '.l2.M_5_50', header=None, sep='\\t')\n", (9830, 9879), True, 'import pandas as pd\n'), ((9895, 9938), 'pandas.read_csv', 'pd.read_csv', (["(l2_prefix + '.annot')"], {'sep': '"""\t"""'}), "(l2_prefix + '.annot', sep='\\t')\n", (9906, 9938), True, 'import pandas as pd\n'), ((1947, 1982), 'os.path.join', 'os.path.join', (['vep_dir', '_data_fn1[0]'], {}), '(vep_dir, _data_fn1[0])\n', (1959, 1982), False, 'import os\n'), ((2002, 2037), 'os.path.join', 'os.path.join', (['vep_dir', '_data_fn2[0]'], {}), '(vep_dir, _data_fn2[0])\n', (2014, 2037), False, 'import os\n'), ((2057, 2081), 'h5py.File', 'h5py.File', (['data_fn1', '"""r"""'], {}), "(data_fn1, 'r')\n", (2066, 2081), False, 'import h5py\n'), ((2101, 2125), 'h5py.File', 'h5py.File', (['data_fn2', '"""r"""'], {}), "(data_fn2, 'r')\n", (2110, 2125), False, 'import h5py\n'), ((2339, 2373), 'numpy.clip', 'np.clip', (['vep_data1', '(0.0001)', '(0.9999)'], {}), '(vep_data1, 0.0001, 0.9999)\n', (2346, 2373), True, 'import numpy as np\n'), ((2394, 2428), 'numpy.clip', 'np.clip', (['vep_data2', '(0.0001)', '(0.9999)'], {}), '(vep_data2, 0.0001, 0.9999)\n', (2401, 2428), True, 'import numpy as np\n'), ((2547, 2587), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.max', '(0)', 'vep_data'], {}), '(np.max, 0, vep_data)\n', (2566, 2587), True, 'import numpy as np\n'), ((3806, 3842), 'os.path.join', 'os.path.join', (['output_dir', 'label_name'], {}), '(output_dir, label_name)\n', (3818, 3842), False, 'import os\n'), ((3851, 3895), 'os.makedirs', 'os.makedirs', (['label_output_dir'], {'exist_ok': '(True)'}), '(label_output_dir, exist_ok=True)\n', (3862, 3895), False, 'import os\n'), ((10083, 10119), 'os.path.join', 'os.path.join', (['output_dir', 'label_name'], {}), '(output_dir, label_name)\n', (10095, 10119), False, 'import os\n'), ((10128, 10168), 'os.makedirs', 'os.makedirs', (['label_folder'], {'exist_ok': '(True)'}), '(label_folder, exist_ok=True)\n', (10139, 10168), False, 'import os\n'), ((268, 287), 'os.listdir', 'os.listdir', (['vep_dir'], {}), '(vep_dir)\n', (278, 287), False, 'import os\n'), ((349, 368), 'os.listdir', 'os.listdir', (['vep_dir'], {}), '(vep_dir)\n', (359, 368), False, 'import os\n'), ((921, 934), 'sys.exit', 'sys.exit', (['(250)'], {}), '(250)\n', (929, 934), False, 'import sys\n'), ((992, 1019), 'numpy.sum', 'np.sum', (["vep_df['ref_match']"], {}), "(vep_df['ref_match'])\n", (998, 1019), True, 'import numpy as np\n'), ((1107, 1126), 'os.listdir', 'os.listdir', (['vep_dir'], {}), '(vep_dir)\n', (1117, 1126), False, 'import os\n'), ((1188, 1207), 'os.listdir', 'os.listdir', (['vep_dir'], {}), '(vep_dir)\n', (1198, 1207), False, 'import os\n'), ((1269, 1288), 'os.listdir', 'os.listdir', (['vep_dir'], {}), '(vep_dir)\n', (1279, 1288), False, 'import os\n'), ((1354, 1373), 'os.listdir', 'os.listdir', (['vep_dir'], {}), '(vep_dir)\n', (1364, 1373), False, 'import os\n'), ((2666, 2708), 'os.path.join', 'os.path.join', (['vep_dir', '"""VEP_abs_logfc.npz"""'], {}), "(vep_dir, 'VEP_abs_logfc.npz')\n", (2678, 2708), False, 'import os\n'), ((3033, 3059), 'os.listdir', 'os.listdir', (['baselineLD_dir'], {}), '(baselineLD_dir)\n', (3043, 3059), False, 'import os\n'), ((6506, 6532), 'os.listdir', 'os.listdir', (['baselineLD_dir'], {}), '(baselineLD_dir)\n', (6516, 6532), False, 'import os\n'), ((10225, 10296), 'os.path.join', 'os.path.join', (['label_folder', "('%s.%s.l2.ldscore.gz' % (label_name, chrom))"], {}), "(label_folder, '%s.%s.l2.ldscore.gz' % (label_name, chrom))\n", (10237, 10296), False, 'import os\n'), ((10382, 10444), 'os.path.join', 'os.path.join', (['label_folder', "('%s.%s.l2.M' % (label_name, chrom))"], {}), "(label_folder, '%s.%s.l2.M' % (label_name, chrom))\n", (10394, 10444), False, 'import os\n'), ((10552, 10619), 'os.path.join', 'os.path.join', (['label_folder', "('%s.%s.l2.M_5_50' % (label_name, chrom))"], {}), "(label_folder, '%s.%s.l2.M_5_50' % (label_name, chrom))\n", (10564, 10619), False, 'import os\n'), ((10743, 10809), 'os.path.join', 'os.path.join', (['label_folder', "('%s.%s.annot.gz' % (label_name, chrom))"], {}), "(label_folder, '%s.%s.annot.gz' % (label_name, chrom))\n", (10755, 10809), False, 'import os\n'), ((1592, 1626), 'os.path.join', 'os.path.join', (['vep_dir', '_data_fn[0]'], {}), '(vep_dir, _data_fn[0])\n', (1604, 1626), False, 'import os\n'), ((2305, 2318), 'sys.exit', 'sys.exit', (['(250)'], {}), '(250)\n', (2313, 2318), False, 'import sys\n'), ((2455, 2490), 'numpy.log', 'np.log', (['(vep_data1 / (1 - vep_data1))'], {}), '(vep_data1 / (1 - vep_data1))\n', (2461, 2490), True, 'import numpy as np\n'), ((2493, 2528), 'numpy.log', 'np.log', (['(vep_data2 / (1 - vep_data2))'], {}), '(vep_data2 / (1 - vep_data2))\n', (2499, 2528), True, 'import numpy as np\n'), ((7828, 7876), 'os.path.join', 'os.path.join', (['use_temp', "('joint.%s.annot' % chrom)"], {}), "(use_temp, 'joint.%s.annot' % chrom)\n", (7840, 7876), False, 'import os\n'), ((7921, 7971), 'os.path.join', 'os.path.join', (['output_dir', "('joint.%s.annot' % chrom)"], {}), "(output_dir, 'joint.%s.annot' % chrom)\n", (7933, 7971), False, 'import os\n'), ((8007, 8045), 'os.path.join', 'os.path.join', (['baselineLD_dir', 'chrom_fn'], {}), '(baselineLD_dir, chrom_fn)\n', (8019, 8045), False, 'import os\n'), ((4136, 4206), 'os.path.join', 'os.path.join', (['label_output_dir', "('%s.%s.annot.gz' % (label_name, chrom))"], {}), "(label_output_dir, '%s.%s.annot.gz' % (label_name, chrom))\n", (4148, 4206), False, 'import os\n'), ((4313, 4351), 'os.path.join', 'os.path.join', (['baselineLD_dir', 'chrom_fn'], {}), '(baselineLD_dir, chrom_fn)\n', (4325, 4351), False, 'import os\n'), ((4401, 4471), 'os.path.join', 'os.path.join', (['label_output_dir', "('%s.%s.annot.gz' % (label_name, chrom))"], {}), "(label_output_dir, '%s.%s.annot.gz' % (label_name, chrom))\n", (4413, 4471), False, 'import os\n'), ((5409, 5448), 'numpy.max', 'np.max', (['vep_data[_annot_idx, label_idx]'], {}), '(vep_data[_annot_idx, label_idx])\n', (5415, 5448), True, 'import numpy as np\n'), ((9078, 9134), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.mean', '(0)', 'vep_data[_annot_idx, :]'], {}), '(np.mean, 0, vep_data[_annot_idx, :])\n', (9097, 9134), True, 'import numpy as np\n')] |
import xml.etree.ElementTree as xmlet
from pathlib import Path
import numpy as np
import craamvert.utils.iso_time as time
from astropy.io import fits
from craamvert.instruments.sst import SST_FITS_FILE_NAME
from craamvert.utils import SST_INSTRUMENT, OBJECTS_NOT_FROM_SAME_INSTRUMENT, CONCATENATED_DATA, INVALID_FILE_NAME, FILE_ALREADY_EXISTS
from craamvert.instruments import ORIGIN_CRAAM, ORIGIN, TELESCOPE, SST_FULL_NAME, OBSERVATORY, CASLEO, STATION, \
SST_LATITUDE_LONGITUDE_HEIGHT, TIMEZONE, GMT_NEGATIVE_3, OBSERVATION_DATE, START_TIME, END_TIME, FILE_ORIGIN, \
SST_RBD, DATA_TYPE, FREQUENCY, SST_FREQUENCY, add_copyright, HISTORY, add_sst_comments, FITS_FILE_EXTENSION
def concatenate(rbds):
"""Method for concatenating RBDs. It returns a new RBD object
representing the concatenated data ordered by time.
Parameters:
rbds : list, tuple - List or tuple of RBD objects to be concatenated. The objects must be from same type
Raises
------
TypeError
If the objects have different data structures.
"""
try:
new_data = np.concatenate([rbd.body_data for rbd in rbds])
except TypeError:
raise TypeError(OBJECTS_NOT_FROM_SAME_INSTRUMENT.format(SST_INSTRUMENT))
# Order the data by time
new_data = new_data[new_data["time"].argsort()]
rbd = RBD()
filenames = list()
for r in rbds:
if isinstance(r.__filename, list):
filenames.extend(r.__filename)
else:
filenames.append(r.__filename)
filenames = sorted(filenames)
rbd.__filename = filenames
rbd.__type = rbds[0].__type
rbd.__date = rbds[0].date
rbd.__data = new_data
date = filenames[0].split(".")
time = "00:00"
if len(date) > 1:
time = date[1][:2] + ":" + date[1][2:4]
rbd.__time = time
rbd._header = rbds[0]._header
rbd.__history.append(CONCATENATED_DATA)
return rbd
class RBD:
def __init__(self):
self.__filename = ""
self.__type = ""
self.__date = ""
self.__time = ""
self.__data = np.empty((0))
self.__history = list()
self.__original_file_type = "RBD"
def __add__(self, other):
"""
Magic method for concatenating RBDs.
Usage: rbd3 = rbd1 + rbd2
"""
return concatenate((self, other))
@property
def __columns(self):
"""Returns the names of the columns in a tuple."""
return self.__data.dtype.names
def __reduced(self, columns=None):
"""Returns a reduced version of the RBD
By default the reduced version contains:
time : time in Hus
azipos : encoder's azimuth
elepos : encoder's elevation
adc or adcval : receiver's output
opmode : oberving mode
target : target observed
x_off : scan offset in azimuth
y_off : scan offset in elevation
Parameters
----------
columns : list, optional
List of which columns the reduced version should contain.
"""
if not columns:
adc = "adc" if "adc" in self.__columns else "adcval"
columns = ['time', adc, 'elepos', 'azipos',
'opmode', 'target', 'x_off', 'y_off']
rbd = RBD()
rbd.__filename = self.__filename
rbd.__type = self.__type
rbd.__date = self.__date
rbd.__time = self.__time
rbd._header = {column: self._header[column] for column in columns}
rbd.__data = self.__data[[name for name in columns]]
rbd.__history.append("Reduced Data File. Selected Variables saved")
return rbd
def get_time_span(self):
"""
Returns a tuple containing the ISO time of the
first and last record found in the data.
"""
nonzero = self.__data["time"].nonzero()
return (time.time(self.__data["time"][nonzero[0][0]]), time.time(self.__data["time"][nonzero[0][-1]]))
def to_fits(self, name=None, output_path=None):
"""Writes the RBD data to a FITS file.
By default the name of the fits file is defined as:
sst_[integration | subintegration | auxiliary]_YYYY-MM-DDTHH:MM:SS.SSS-HH:MM:SS.SSS_level0.fits
The file has two HDUs. The primary containing just a header with general
information such as the origin, telescope, time zone. The second is a BinaryTable
containing the data and a header with data specific information.
Parameters
----------
name : str, optional
Name of the fits file.
output_path : str, pathlib.Path, optional
Output path of the fits file. By default
is where the script is being called from.
Raises
------
FileExistsError
If a file with the same name already exists
in the output path.
"""
t_start, t_end = self.get_time_span()
if not name:
name = SST_FITS_FILE_NAME.format(self.__type.lower(), self.__date, t_start, t_end)
else:
if not name.endswith(FITS_FILE_EXTENSION):
name += FITS_FILE_EXTENSION
name = Path(name)
if not output_path:
output_path = "."
output_path = Path(output_path).expanduser()
if (output_path / name).exists():
raise FileExistsError(FILE_ALREADY_EXISTS.format(str(name)))
hdu = fits.PrimaryHDU()
hdu.header.append((ORIGIN, ORIGIN_CRAAM, ''))
hdu.header.append((TELESCOPE, SST_FULL_NAME, ''))
hdu.header.append((OBSERVATORY, CASLEO, ''))
hdu.header.append((STATION, SST_LATITUDE_LONGITUDE_HEIGHT, ''))
hdu.header.append((TIMEZONE, GMT_NEGATIVE_3, ''))
hdu.header.append((OBSERVATION_DATE, self.__date, ''))
hdu.header.append((START_TIME, self.__date + 'T' + t_start, ''))
hdu.header.append((END_TIME, self.__date + 'T' + t_end, ''))
hdu.header.append((DATA_TYPE, self.__type, ''))
if isinstance(self.__filename, list):
for fname in self.__filename: hdu.header.append((FILE_ORIGIN, fname, SST_RBD))
else:
hdu.header.append((FILE_ORIGIN, self.__filename, SST_RBD))
hdu.header.append((FREQUENCY, SST_FREQUENCY, ''))
# About the Copyright
add_copyright(hdu)
# History
hdu.header.append((HISTORY, "Converted to FITS level-0 with rbd.py"))
for hist in self.__history:
hdu.header.append((HISTORY, hist))
dscal = 1.0
fits_cols = list()
for column, values in self._header.items():
var_dim = str(values[0])
offset = 0
if values[1] == np.int32:
var_dim += "J"
elif values[1] == np.uint16:
var_dim += "I"
offset = 32768
elif values[1] == np.int16:
var_dim += "I"
elif values[1] == np.byte:
var_dim += "B"
elif values[1] == np.float32:
var_dim += "E"
fits_cols.append(fits.Column(name=column,
format=var_dim,
unit=values[2],
bscale=dscal,
bzero=offset,
array=self.__data[column]))
tbhdu = fits.BinTableHDU.from_columns(fits.ColDefs(fits_cols))
add_sst_comments(tbhdu)
hdulist = fits.HDUList([hdu, tbhdu])
hdulist.writeto(output_path / name)
def __find_header(self, path_to_xml):
"""
Method for finding the correct description file.
Returns a dict representing the description found,
the key is the variable name and the value is a list
containing the var dimension, type and unit respectively.
"""
span_table = xmlet.parse(path_to_xml / Path("SSTDataFormatTimeSpanTable.xml")).getroot()
filetype = "Data" if self.__type == "Integration" or self.__type == "Subintegration" else "Auxiliary"
for child in span_table:
if child[0].text == filetype and child[1].text <= self.__date and child[2].text >= self.__date:
data_description_filename = child[3].text
xml = xmlet.parse(path_to_xml / Path(data_description_filename)).getroot()
header = dict()
for child in xml:
var_name = child[0].text
var_dim = int(child[1].text)
var_type = child[2].text
var_unit = child[3].text
if var_type == "xs:int":
np_type = np.int32
elif var_type == "xs:unsignedShort":
np_type = np.uint16
elif var_type == "xs:short":
np_type = np.int16
elif var_type == "xs:byte":
np_type = np.byte
elif var_type == "xs:float":
np_type = np.float32
header.update({var_name: [var_dim, np_type, var_unit]})
return header
def from_file(self, path, name, path_to_xml):
"""Loads data from a file and returns an `RBD` object.
Parameters
----------
path : pathlib.Path
Location of the RBD file in the file system.
name : str
Name of the RBD file.
path_to_xml : Path, optional
Location of the RBD xml description files in the file system.
Raises
------
ValueError
If the filename is invalid.
"""
self.__filename = name
type_prefix = self.__filename[:2].upper()
if type_prefix == "RS":
self.__type = "Integration"
elif type_prefix == "RF":
self.__type = "Subintegration"
elif type_prefix == "BI":
self.__type = "Auxiliary"
else:
raise ValueError(INVALID_FILE_NAME.format(self.__filename))
date = self.__filename[2:].split(".")
"""
date[0] = date[0][::-1]
day = date[0][:2][::-1]
month = date[0][2:4][::-1]
year = int(date[0][4:][::-1]) + 1900
self.date = "{}-{}-{}".format(year,month,day)
"""
if len(date[0]) == 6:
self.__date = str(int(date[0][:2]) + 1900) + '-' + date[0][2:4] + '-' + date[0][4:6]
elif len(date[0]) == 7:
self.__date = str(int(date[0][:3]) + 1900) + '-' + date[0][3:5] + '-' + date[0][5:7]
else:
raise ValueError(INVALID_FILE_NAME.format(self.__filename))
self.__time = "00:00"
if len(date) > 1:
self.__time = date[1][:2] + ":" + date[1][2:4]
self._header = self.__find_header(path_to_xml)
dt_list = list()
for key, value in self._header.items():
dt_list.append((key, value[1], value[0]))
if isinstance(path, bytes):
self.__data = np.frombuffer(path, dtype=dt_list)
else:
self.__data = np.fromfile(str(path), dtype=dt_list)
return self
| [
"astropy.io.fits.ColDefs",
"craamvert.instruments.add_sst_comments",
"craamvert.utils.iso_time.time",
"astropy.io.fits.PrimaryHDU",
"pathlib.Path",
"astropy.io.fits.HDUList",
"astropy.io.fits.Column",
"craamvert.utils.OBJECTS_NOT_FROM_SAME_INSTRUMENT.format",
"craamvert.utils.INVALID_FILE_NAME.forma... | [((1133, 1180), 'numpy.concatenate', 'np.concatenate', (['[rbd.body_data for rbd in rbds]'], {}), '([rbd.body_data for rbd in rbds])\n', (1147, 1180), True, 'import numpy as np\n'), ((2132, 2143), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (2140, 2143), True, 'import numpy as np\n'), ((5333, 5343), 'pathlib.Path', 'Path', (['name'], {}), '(name)\n', (5337, 5343), False, 'from pathlib import Path\n'), ((5588, 5605), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (5603, 5605), False, 'from astropy.io import fits\n'), ((6483, 6501), 'craamvert.instruments.add_copyright', 'add_copyright', (['hdu'], {}), '(hdu)\n', (6496, 6501), False, 'from craamvert.instruments import ORIGIN_CRAAM, ORIGIN, TELESCOPE, SST_FULL_NAME, OBSERVATORY, CASLEO, STATION, SST_LATITUDE_LONGITUDE_HEIGHT, TIMEZONE, GMT_NEGATIVE_3, OBSERVATION_DATE, START_TIME, END_TIME, FILE_ORIGIN, SST_RBD, DATA_TYPE, FREQUENCY, SST_FREQUENCY, add_copyright, HISTORY, add_sst_comments, FITS_FILE_EXTENSION\n'), ((7660, 7683), 'craamvert.instruments.add_sst_comments', 'add_sst_comments', (['tbhdu'], {}), '(tbhdu)\n', (7676, 7683), False, 'from craamvert.instruments import ORIGIN_CRAAM, ORIGIN, TELESCOPE, SST_FULL_NAME, OBSERVATORY, CASLEO, STATION, SST_LATITUDE_LONGITUDE_HEIGHT, TIMEZONE, GMT_NEGATIVE_3, OBSERVATION_DATE, START_TIME, END_TIME, FILE_ORIGIN, SST_RBD, DATA_TYPE, FREQUENCY, SST_FREQUENCY, add_copyright, HISTORY, add_sst_comments, FITS_FILE_EXTENSION\n'), ((7703, 7729), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[hdu, tbhdu]'], {}), '([hdu, tbhdu])\n', (7715, 7729), False, 'from astropy.io import fits\n'), ((4002, 4047), 'craamvert.utils.iso_time.time', 'time.time', (["self.__data['time'][nonzero[0][0]]"], {}), "(self.__data['time'][nonzero[0][0]])\n", (4011, 4047), True, 'import craamvert.utils.iso_time as time\n'), ((4049, 4095), 'craamvert.utils.iso_time.time', 'time.time', (["self.__data['time'][nonzero[0][-1]]"], {}), "(self.__data['time'][nonzero[0][-1]])\n", (4058, 4095), True, 'import craamvert.utils.iso_time as time\n'), ((7626, 7649), 'astropy.io.fits.ColDefs', 'fits.ColDefs', (['fits_cols'], {}), '(fits_cols)\n', (7638, 7649), False, 'from astropy.io import fits\n'), ((11154, 11188), 'numpy.frombuffer', 'np.frombuffer', (['path'], {'dtype': 'dt_list'}), '(path, dtype=dt_list)\n', (11167, 11188), True, 'import numpy as np\n'), ((1227, 1282), 'craamvert.utils.OBJECTS_NOT_FROM_SAME_INSTRUMENT.format', 'OBJECTS_NOT_FROM_SAME_INSTRUMENT.format', (['SST_INSTRUMENT'], {}), '(SST_INSTRUMENT)\n', (1266, 1282), False, 'from craamvert.utils import SST_INSTRUMENT, OBJECTS_NOT_FROM_SAME_INSTRUMENT, CONCATENATED_DATA, INVALID_FILE_NAME, FILE_ALREADY_EXISTS\n'), ((5426, 5443), 'pathlib.Path', 'Path', (['output_path'], {}), '(output_path)\n', (5430, 5443), False, 'from pathlib import Path\n'), ((7261, 7376), 'astropy.io.fits.Column', 'fits.Column', ([], {'name': 'column', 'format': 'var_dim', 'unit': 'values[2]', 'bscale': 'dscal', 'bzero': 'offset', 'array': 'self.__data[column]'}), '(name=column, format=var_dim, unit=values[2], bscale=dscal,\n bzero=offset, array=self.__data[column])\n', (7272, 7376), False, 'from astropy.io import fits\n'), ((10748, 10789), 'craamvert.utils.INVALID_FILE_NAME.format', 'INVALID_FILE_NAME.format', (['self.__filename'], {}), '(self.__filename)\n', (10772, 10789), False, 'from craamvert.utils import SST_INSTRUMENT, OBJECTS_NOT_FROM_SAME_INSTRUMENT, CONCATENATED_DATA, INVALID_FILE_NAME, FILE_ALREADY_EXISTS\n'), ((8133, 8171), 'pathlib.Path', 'Path', (['"""SSTDataFormatTimeSpanTable.xml"""'], {}), "('SSTDataFormatTimeSpanTable.xml')\n", (8137, 8171), False, 'from pathlib import Path\n'), ((8534, 8565), 'pathlib.Path', 'Path', (['data_description_filename'], {}), '(data_description_filename)\n', (8538, 8565), False, 'from pathlib import Path\n'), ((10135, 10176), 'craamvert.utils.INVALID_FILE_NAME.format', 'INVALID_FILE_NAME.format', (['self.__filename'], {}), '(self.__filename)\n', (10159, 10176), False, 'from craamvert.utils import SST_INSTRUMENT, OBJECTS_NOT_FROM_SAME_INSTRUMENT, CONCATENATED_DATA, INVALID_FILE_NAME, FILE_ALREADY_EXISTS\n')] |
import numpy as np
from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling
from deepneuro.models.ops import leaky_relu, minibatch_state_concat
def generator(model, latent_var, depth=1, initial_size=(4, 4), max_size=None, reuse=False, transition=False, alpha_transition=0, name=None):
"""Summary
Parameters
----------
model : TYPE
Description
latent_var : TYPE
Description
depth : int, optional
Description
initial_size : tuple, optional
Description
max_size : None, optional
Description
reuse : bool, optional
Description
transition : bool, optional
Description
alpha_transition : int, optional
Description
name : None, optional
Description
Returns
-------
TYPE
Description
"""
import tensorflow as tf
with tf.variable_scope(name) as scope:
convs = []
if reuse:
scope.reuse_variables()
convs += [tf.reshape(latent_var, [tf.shape(latent_var)[0]] + [1] * model.dim + [model.latent_size])]
# TODO: refactor the padding on this step. Or replace with a dense layer?
with tf.variable_scope('generator_n_conv_1_{}'.format(convs[-1].shape[1])):
convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=initial_size, stride_size=(1,) * model.dim, padding='Other', dim=model.dim)), model.dim)
convs += [tf.reshape(convs[-1], [tf.shape(latent_var)[0]] + list(initial_size) + [model.get_filter_num(0)])]
with tf.variable_scope('generator_n_conv_2_{}'.format(convs[-1].shape[1])):
convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, dim=model.dim)), dim=model.dim)
for i in range(depth):
# Calculate Next Upsample Ratio
if max_size is None:
upsample_ratio = (2,) * model.dim
else:
upsample_ratio = []
for size_idx, size in enumerate(max_size):
if size >= convs[-1].shape[size_idx + 1] * 2:
upsample_ratio += [2]
else:
upsample_ratio += [1]
upsample_ratio = tuple(upsample_ratio)
# Upsampling, with conversion to RGB if necessary.
if i == depth - 1 and transition:
transition_conv = DnConv(convs[-1], output_dim=model.channels, kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, dim=model.dim, name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]))
transition_conv = DnUpsampling(transition_conv, upsample_ratio, dim=model.dim)
convs += [DnUpsampling(convs[-1], upsample_ratio, dim=model.dim)]
# Convolutional blocks. TODO: Replace with block module.
with tf.variable_scope('generator_n_conv_1_{}'.format(convs[-1].shape[1])):
convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, dim=model.dim)), dim=model.dim)
with tf.variable_scope('generator_n_conv_2_{}'.format(convs[-1].shape[1])):
convs += [DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, dim=model.dim)), dim=model.dim)]
# Conversion to RGB
convs += [DnConv(convs[-1], output_dim=model.channels, kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]), dim=model.dim)]
if transition:
convs[-1] = (1 - alpha_transition) * transition_conv + alpha_transition * convs[-1]
return convs[-1]
def discriminator(model, input_image, reuse=False, initial_size=(4, 4), max_size=None, name=None, depth=1, transition=False, alpha_transition=0, **kwargs):
import tensorflow as tf
"""
"""
with tf.variable_scope(name) as scope:
convs = []
if reuse:
scope.reuse_variables()
# fromRGB
convs += [leaky_relu(DnConv(input_image, output_dim=model.get_filter_num(depth), kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_y_rgb_conv_{}'.format(input_image.shape[1]), dim=model.dim))]
for i in range(depth):
# Convolutional blocks. TODO: Replace with block module.
convs += [leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(depth - i), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim))]
convs += [leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(depth - 1 - i), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=model.dim))]
# Calculate Next Downsample Ratio
# Whoever can calculate this in a less dumb way than this gets a Fields Medal.
if max_size is None:
downsample_ratio = (2,) * model.dim
else:
reference_shape = []
current_shape = input_image.shape
for idx, cshape in enumerate(current_shape):
reference_shape += [current_shape[idx] // initial_size[idx]]
downsample_ratio = []
for size_idx, size in enumerate(max_size):
if size // initial_size[size_idx] > min(reference_shape):
downsample_ratio += [1]
else:
downsample_ratio += [2]
downsample_ratio = tuple(downsample_ratio)
convs[-1] = DnAveragePooling(convs[-1], downsample_ratio, dim=model.dim)
if i == 0 and transition:
transition_conv = DnAveragePooling(input_image, downsample_ratio, dim=model.dim)
transition_conv = leaky_relu(DnConv(transition_conv, output_dim=model.get_filter_num(depth - 1), kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_y_rgb_conv_{}'.format(transition_conv.shape[1]), dim=model.dim))
convs[-1] = alpha_transition * convs[-1] + (1 - alpha_transition) * transition_conv
convs += [minibatch_state_concat(convs[-1])]
convs[-1] = leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(3,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim))
output = tf.reshape(convs[-1], [tf.shape(convs[-1])[0], np.prod(initial_size) * model.get_filter_num(0)])
# Currently erroring
# discriminate_output = dense(output, output_size=1, name='discriminator_n_fully')
discriminate_output = tf.layers.dense(output, 1, name='discriminator_n_1_fully')
return tf.nn.sigmoid(discriminate_output), discriminate_output
def unet(model, input_tensor, backend='tensorflow'):
from keras.layers.merge import concatenate
left_outputs = []
for level in range(model.depth):
filter_num = int(model.max_filter / (2 ** (model.depth - level)) / model.downsize_filters_factor)
if level == 0:
left_outputs += [DnConv(input_tensor, filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_1'.format(level), backend=backend)]
left_outputs[level] = DnConv(left_outputs[level], 2 * filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_2'.format(level), backend=backend)
else:
left_outputs += [DnMaxPooling(left_outputs[level - 1], pool_size=model.pool_size, dim=model.dim, backend=backend)]
left_outputs[level] = DnConv(left_outputs[level], filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_1'.format(level), backend=backend)
left_outputs[level] = DnConv(left_outputs[level], 2 * filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_2'.format(level), backend=backend)
if model.dropout is not None and model.dropout != 0:
left_outputs[level] = DnDropout(model.dropout)(left_outputs[level])
if model.batch_norm:
left_outputs[level] = DnBatchNormalization(left_outputs[level])
right_outputs = [left_outputs[model.depth - 1]]
for level in range(model.depth):
filter_num = int(model.max_filter / (2 ** (level)) / model.downsize_filters_factor)
if level > 0:
right_outputs += [DnUpsampling(right_outputs[level - 1], pool_size=model.pool_size, dim=model.dim, backend=backend)]
right_outputs[level] = concatenate([right_outputs[level], left_outputs[model.depth - level - 1]], axis=model.dim + 1)
right_outputs[level] = DnConv(right_outputs[level], filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_upsampling_conv_{}_1'.format(level), backend=backend)
right_outputs[level] = DnConv(right_outputs[level], int(filter_num / 2), model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_upsampling_conv_{}_2'.format(level), backend=backend)
else:
continue
if model.dropout is not None and model.dropout != 0:
right_outputs[level] = DnDropout(model.dropout)(right_outputs[level])
if model.batch_norm:
right_outputs[level] = DnBatchNormalization()(right_outputs[level])
output_layer = DnConv(right_outputs[level], 1, (1, ) * model.dim, stride_size=(1,) * model.dim, dim=model.dim, name='end_conv', backend=backend)
# TODO: Brainstorm better way to specify outputs
if model.input_tensor is not None:
return output_layer
return model.model | [
"numpy.prod",
"deepneuro.models.dn_ops.DnDropout",
"tensorflow.shape",
"tensorflow.variable_scope",
"keras.layers.merge.concatenate",
"deepneuro.models.dn_ops.DnBatchNormalization",
"deepneuro.models.dn_ops.DnUpsampling",
"deepneuro.models.ops.minibatch_state_concat",
"deepneuro.models.dn_ops.DnConv... | [((10339, 10472), 'deepneuro.models.dn_ops.DnConv', 'DnConv', (['right_outputs[level]', '(1)', '((1,) * model.dim)'], {'stride_size': '((1,) * model.dim)', 'dim': 'model.dim', 'name': '"""end_conv"""', 'backend': 'backend'}), "(right_outputs[level], 1, (1,) * model.dim, stride_size=(1,) * model.\n dim, dim=model.dim, name='end_conv', backend=backend)\n", (10345, 10472), False, 'from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling\n'), ((962, 985), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (979, 985), True, 'import tensorflow as tf\n'), ((4189, 4212), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (4206, 4212), True, 'import tensorflow as tf\n'), ((7081, 7139), 'tensorflow.layers.dense', 'tf.layers.dense', (['output', '(1)'], {'name': '"""discriminator_n_1_fully"""'}), "(output, 1, name='discriminator_n_1_fully')\n", (7096, 7139), True, 'import tensorflow as tf\n'), ((5983, 6043), 'deepneuro.models.dn_ops.DnAveragePooling', 'DnAveragePooling', (['convs[-1]', 'downsample_ratio'], {'dim': 'model.dim'}), '(convs[-1], downsample_ratio, dim=model.dim)\n', (5999, 6043), False, 'from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling\n'), ((6557, 6590), 'deepneuro.models.ops.minibatch_state_concat', 'minibatch_state_concat', (['convs[-1]'], {}), '(convs[-1])\n', (6579, 6590), False, 'from deepneuro.models.ops import leaky_relu, minibatch_state_concat\n'), ((7156, 7190), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['discriminate_output'], {}), '(discriminate_output)\n', (7169, 7190), True, 'import tensorflow as tf\n'), ((8939, 8980), 'deepneuro.models.dn_ops.DnBatchNormalization', 'DnBatchNormalization', (['left_outputs[level]'], {}), '(left_outputs[level])\n', (8959, 8980), False, 'from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling\n'), ((9376, 9474), 'keras.layers.merge.concatenate', 'concatenate', (['[right_outputs[level], left_outputs[model.depth - level - 1]]'], {'axis': '(model.dim + 1)'}), '([right_outputs[level], left_outputs[model.depth - level - 1]],\n axis=model.dim + 1)\n', (9387, 9474), False, 'from keras.layers.merge import concatenate\n'), ((2815, 2875), 'deepneuro.models.dn_ops.DnUpsampling', 'DnUpsampling', (['transition_conv', 'upsample_ratio'], {'dim': 'model.dim'}), '(transition_conv, upsample_ratio, dim=model.dim)\n', (2827, 2875), False, 'from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling\n'), ((2899, 2953), 'deepneuro.models.dn_ops.DnUpsampling', 'DnUpsampling', (['convs[-1]', 'upsample_ratio'], {'dim': 'model.dim'}), '(convs[-1], upsample_ratio, dim=model.dim)\n', (2911, 2953), False, 'from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling\n'), ((6117, 6179), 'deepneuro.models.dn_ops.DnAveragePooling', 'DnAveragePooling', (['input_image', 'downsample_ratio'], {'dim': 'model.dim'}), '(input_image, downsample_ratio, dim=model.dim)\n', (6133, 6179), False, 'from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling\n'), ((8093, 8194), 'deepneuro.models.dn_ops.DnMaxPooling', 'DnMaxPooling', (['left_outputs[level - 1]'], {'pool_size': 'model.pool_size', 'dim': 'model.dim', 'backend': 'backend'}), '(left_outputs[level - 1], pool_size=model.pool_size, dim=model.\n dim, backend=backend)\n', (8105, 8194), False, 'from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling\n'), ((8821, 8845), 'deepneuro.models.dn_ops.DnDropout', 'DnDropout', (['model.dropout'], {}), '(model.dropout)\n', (8830, 8845), False, 'from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling\n'), ((9238, 9340), 'deepneuro.models.dn_ops.DnUpsampling', 'DnUpsampling', (['right_outputs[level - 1]'], {'pool_size': 'model.pool_size', 'dim': 'model.dim', 'backend': 'backend'}), '(right_outputs[level - 1], pool_size=model.pool_size, dim=model\n .dim, backend=backend)\n', (9250, 9340), False, 'from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling\n'), ((10150, 10174), 'deepneuro.models.dn_ops.DnDropout', 'DnDropout', (['model.dropout'], {}), '(model.dropout)\n', (10159, 10174), False, 'from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling\n'), ((10270, 10292), 'deepneuro.models.dn_ops.DnBatchNormalization', 'DnBatchNormalization', ([], {}), '()\n', (10290, 10292), False, 'from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling\n'), ((6855, 6874), 'tensorflow.shape', 'tf.shape', (['convs[-1]'], {}), '(convs[-1])\n', (6863, 6874), True, 'import tensorflow as tf\n'), ((6879, 6900), 'numpy.prod', 'np.prod', (['initial_size'], {}), '(initial_size)\n', (6886, 6900), True, 'import numpy as np\n'), ((1114, 1134), 'tensorflow.shape', 'tf.shape', (['latent_var'], {}), '(latent_var)\n', (1122, 1134), True, 'import tensorflow as tf\n'), ((1592, 1612), 'tensorflow.shape', 'tf.shape', (['latent_var'], {}), '(latent_var)\n', (1600, 1612), True, 'import tensorflow as tf\n')] |
import cv2
import numpy as np
import numpy
import imutils
from math import sqrt
import os
import math
green = np.uint8([[[0,255,0 ]]])
hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV)
cap = cv2.VideoCapture(0)
while cap.isOpened():
ret, frame = cap.read()
# frame = cv2.resize(frame1, (600, 600))
hsv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
Conv_hsv_Gray = cv2.cvtColor(hsv_img, cv2.COLOR_BGR2GRAY)
H,S,V = cv2.split(hsv_img)
cv2.imshow('res frame AfterALL ', frame)
cv2.imshow(' H ', H)
cv2.imshow(' S ', S)
cv2.imshow(' V ', V)
cv2.imshow('sv_Gray ', Conv_hsv_Gray)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
| [
"numpy.uint8",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.split",
"cv2.waitKey"
] | [((117, 142), 'numpy.uint8', 'np.uint8', (['[[[0, 255, 0]]]'], {}), '([[[0, 255, 0]]])\n', (125, 142), True, 'import numpy as np\n'), ((155, 193), 'cv2.cvtColor', 'cv2.cvtColor', (['green', 'cv2.COLOR_BGR2HSV'], {}), '(green, cv2.COLOR_BGR2HSV)\n', (167, 193), False, 'import cv2\n'), ((204, 223), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (220, 223), False, 'import cv2\n'), ((337, 375), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (349, 375), False, 'import cv2\n'), ((397, 438), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv_img', 'cv2.COLOR_BGR2GRAY'], {}), '(hsv_img, cv2.COLOR_BGR2GRAY)\n', (409, 438), False, 'import cv2\n'), ((454, 472), 'cv2.split', 'cv2.split', (['hsv_img'], {}), '(hsv_img)\n', (463, 472), False, 'import cv2\n'), ((478, 518), 'cv2.imshow', 'cv2.imshow', (['"""res frame AfterALL """', 'frame'], {}), "('res frame AfterALL ', frame)\n", (488, 518), False, 'import cv2\n'), ((524, 544), 'cv2.imshow', 'cv2.imshow', (['""" H """', 'H'], {}), "(' H ', H)\n", (534, 544), False, 'import cv2\n'), ((550, 570), 'cv2.imshow', 'cv2.imshow', (['""" S """', 'S'], {}), "(' S ', S)\n", (560, 570), False, 'import cv2\n'), ((576, 596), 'cv2.imshow', 'cv2.imshow', (['""" V """', 'V'], {}), "(' V ', V)\n", (586, 596), False, 'import cv2\n'), ((604, 641), 'cv2.imshow', 'cv2.imshow', (['"""sv_Gray """', 'Conv_hsv_Gray'], {}), "('sv_Gray ', Conv_hsv_Gray)\n", (614, 641), False, 'import cv2\n'), ((656, 671), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (667, 671), False, 'import cv2\n')] |
from .element_counts import element_counts
import numpy as np
def double_bond_equivalent(formula_list):
"""
Docstring for function pyKrev.double_bond_equivalent
====================
This function takes a list of molecular formula strings and returns the double bond equivalent.
Use
----
double_bond_equivalent(Y)
Returns a numpy array of len(Y) in which each item is the double bond equivalent.
Parameters
----------
Y: A list of molecular formula strings.
Info
----------
Double bond equivalent (DBE; UN; degree of unsaturation; PBoR [Pi Bonds or Rings]):
The number of molecules of H2 that would have to be added to a molecule to convert all pi bonds to single bonds,
and all rings to acyclic structures.
The DBE number can be calculated from the formula using the following equation:
DBE = UN = PBoR = C - (H/2) + (N/2) +1,
where: C = number of carbon atoms, H = number of hydrogen and halogen atoms, and N = number of nitrogen atoms.
"""
count_list = element_counts(formula_list)
DBE_array = np.array([])
warning = 0
for count in count_list:
Halogens = ['H','Cl','Br','I','F','At','Ts']
Hal = 0
for el in Halogens:
try:
Hal += count[el]
except KeyError:
pass
DBE_counts = count['C'] - (Hal/2) + (count['N']/2) + 1
if DBE_counts < 0:
warning = 1
DBE_counts = 0
DBE_array = np.append(DBE_array,DBE_counts)
if warning == 1:
print('Warning: negative dbe counts detected and set to zero.')
return DBE_array | [
"numpy.append",
"numpy.array"
] | [((1088, 1100), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1096, 1100), True, 'import numpy as np\n'), ((1544, 1576), 'numpy.append', 'np.append', (['DBE_array', 'DBE_counts'], {}), '(DBE_array, DBE_counts)\n', (1553, 1576), True, 'import numpy as np\n')] |
from __future__ import annotations
import subprocess
from shutil import which
import matplotlib.pyplot as plt
import numpy as np
import plotly.express as px
import pytest
from pymatgen.core import Lattice, Structure
from pymatviz import ROOT
# random regression data
np.random.seed(42)
xs = np.random.rand(100)
y_pred = xs + 0.1 * np.random.normal(size=100)
y_true = xs + 0.1 * np.random.normal(size=100)
# random classification data
y_binary = np.random.choice([0, 1], 100)
y_proba = np.clip(y_binary - 0.1 * np.random.normal(scale=5, size=100), 0.2, 0.9)
y_clf = y_proba.round()
@pytest.fixture(autouse=True)
def run_around_tests():
# runs before each test
yield
# runs after each test
plt.close()
@pytest.fixture
def spg_symbols():
symbols = "C2/m C2/m Fm-3m C2/m Cmc2_1 P4/nmm P-43m P-43m P6_3mc".split()
symbols += "P-43m P6_3mc Cmcm P2_1/m I2_13 P-6m2".split()
return symbols
@pytest.fixture
def structures():
coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
lattice = [[3.8, 0, 0], [1.9, 3.3, 0], [0, -2.2, 3.1]]
Si2 = Structure(lattice, ["Si4+", "Si4+"], coords)
coords = [
[0.25, 0.25, 0.173],
[0.75, 0.75, 0.827],
[0.75, 0.25, 0],
[0.25, 0.75, 0],
[0.25, 0.25, 0.676],
[0.75, 0.75, 0.324],
]
lattice = Lattice.tetragonal(4.192, 6.88)
Si2Ru2Pr2 = Structure(lattice, ["Si", "Si", "Ru", "Ru", "Pr", "Pr"], coords)
return [Si2, Si2Ru2Pr2]
@pytest.fixture
def plotly_scatter():
xs = np.arange(7)
y1 = xs**2
y2 = xs**0.5
fig = px.scatter(x=xs, y=[y1, y2])
return fig
def save_reference_img(save_to: str) -> None:
"""Save a matplotlib figure to a specified fixture path.
Raises:
ValueError: save_to is not inside 'tests/fixtures/' directory.
"""
if not save_to.startswith((f"{ROOT}/tests/fixtures/", "tests/fixtures/")):
raise ValueError(f"{save_to=} must point at 'tests/fixtures/'")
pngquant, zopflipng = which("pngquant"), which("zopflipng")
print(
f"created new fixture {save_to=}, image comparison will run for real on "
"subsequent test runs unless fixture is deleted"
)
plt.savefig(save_to)
plt.close()
if not pngquant:
return print("Warning: pngquant not installed. Cannot compress new fixture.")
if not zopflipng:
return print("Warning: zopflipng not installed. Cannot compress new fixture.")
subprocess.run(
f"{pngquant} 32 --skip-if-larger --ext .png --force".split() + [save_to],
check=False,
capture_output=True,
)
subprocess.run(
[zopflipng, "-y", save_to, save_to],
check=True,
capture_output=True,
)
| [
"numpy.random.normal",
"plotly.express.scatter",
"matplotlib.pyplot.savefig",
"numpy.random.rand",
"numpy.random.choice",
"subprocess.run",
"shutil.which",
"matplotlib.pyplot.close",
"pymatgen.core.Structure",
"numpy.random.seed",
"pytest.fixture",
"numpy.arange",
"pymatgen.core.Lattice.tetr... | [((272, 290), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (286, 290), True, 'import numpy as np\n'), ((296, 315), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (310, 315), True, 'import numpy as np\n'), ((451, 480), 'numpy.random.choice', 'np.random.choice', (['[0, 1]', '(100)'], {}), '([0, 1], 100)\n', (467, 480), True, 'import numpy as np\n'), ((590, 618), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (604, 618), False, 'import pytest\n'), ((714, 725), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (723, 725), True, 'import matplotlib.pyplot as plt\n'), ((1071, 1115), 'pymatgen.core.Structure', 'Structure', (['lattice', "['Si4+', 'Si4+']", 'coords'], {}), "(lattice, ['Si4+', 'Si4+'], coords)\n", (1080, 1115), False, 'from pymatgen.core import Lattice, Structure\n'), ((1318, 1349), 'pymatgen.core.Lattice.tetragonal', 'Lattice.tetragonal', (['(4.192)', '(6.88)'], {}), '(4.192, 6.88)\n', (1336, 1349), False, 'from pymatgen.core import Lattice, Structure\n'), ((1366, 1430), 'pymatgen.core.Structure', 'Structure', (['lattice', "['Si', 'Si', 'Ru', 'Ru', 'Pr', 'Pr']", 'coords'], {}), "(lattice, ['Si', 'Si', 'Ru', 'Ru', 'Pr', 'Pr'], coords)\n", (1375, 1430), False, 'from pymatgen.core import Lattice, Structure\n'), ((1508, 1520), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (1517, 1520), True, 'import numpy as np\n'), ((1563, 1591), 'plotly.express.scatter', 'px.scatter', ([], {'x': 'xs', 'y': '[y1, y2]'}), '(x=xs, y=[y1, y2])\n', (1573, 1591), True, 'import plotly.express as px\n'), ((2185, 2205), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_to'], {}), '(save_to)\n', (2196, 2205), True, 'import matplotlib.pyplot as plt\n'), ((2210, 2221), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2219, 2221), True, 'import matplotlib.pyplot as plt\n'), ((2602, 2690), 'subprocess.run', 'subprocess.run', (["[zopflipng, '-y', save_to, save_to]"], {'check': '(True)', 'capture_output': '(True)'}), "([zopflipng, '-y', save_to, save_to], check=True,\n capture_output=True)\n", (2616, 2690), False, 'import subprocess\n'), ((336, 362), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (352, 362), True, 'import numpy as np\n'), ((383, 409), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100)'}), '(size=100)\n', (399, 409), True, 'import numpy as np\n'), ((1986, 2003), 'shutil.which', 'which', (['"""pngquant"""'], {}), "('pngquant')\n", (1991, 2003), False, 'from shutil import which\n'), ((2005, 2023), 'shutil.which', 'which', (['"""zopflipng"""'], {}), "('zopflipng')\n", (2010, 2023), False, 'from shutil import which\n'), ((516, 551), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(5)', 'size': '(100)'}), '(scale=5, size=100)\n', (532, 551), True, 'import numpy as np\n')] |
#!/usr/bin/python
import os
import json
import numpy as np
# import dysts
# from dysts.utils import find_significant_frequencies
# from dysts.flows import *
# from dysts.base import *
import sktime.datasets
from sktime.transformations.panel.tsfresh import TSFreshFeatureExtractor
from sklearn.linear_model import RidgeClassifierCV
from sktime.utils.data_processing import from_nested_to_3d_numpy, from_3d_numpy_to_nested
all_scores = dict()
np.random.seed(0)
# cwd = os.getcwd()
cwd = os.path.dirname(os.path.realpath(__file__))
output_path = cwd + "/results/baseline_transfer_learning.json"
print("Saving data to: ", output_path)
dataset_names = np.genfromtxt(cwd + "/resources/ucr_ea_names.txt", dtype='str')
try:
with open(output_path, "r") as file:
all_scores = json.load(file)
except FileNotFoundError:
all_scores = dict()
for data_ind, name in enumerate(dataset_names):
if name in all_scores.keys():
if "score_tsfresh" in all_scores[name].keys():
print("Skipped " + name, flush=True)
continue
print("Evaluating " + name, flush=True)
all_scores[name] = dict()
X_train, y_train = sktime.datasets.load_UCR_UEA_dataset(name, split="train", return_X_y=True)
X_test, y_test = sktime.datasets.load_UCR_UEA_dataset(name, split="test", return_X_y=True)
X_train_np = from_nested_to_3d_numpy(X_train)
X_test_np = from_nested_to_3d_numpy(X_test)
X_train_np -= np.mean(X_train_np, axis=-1, keepdims=True)
X_train_np /= np.std(X_train_np, axis=-1, keepdims=True)
X_test_np -= np.mean(X_test_np, axis=-1, keepdims=True)
X_test_np /= np.std(X_test_np, axis=-1, keepdims=True)
transformer = TSFreshFeatureExtractor(show_warnings=False)
X_train_featurized = transformer.fit_transform(from_3d_numpy_to_nested(X_train_np))
X_test_featurized = transformer.fit_transform(from_3d_numpy_to_nested(X_test_np))
model = RidgeClassifierCV(alphas = np.logspace(-3, 3, 10), normalize = True)
model.fit(X_train_featurized, y_train)
score = model.score(X_test_featurized, y_test)
all_scores[name]["score_tsfresh"] = score
print(name, score, flush=True)
with open(output_path, 'w') as file:
json.dump(all_scores, file, indent=4)
| [
"numpy.mean",
"sktime.transformations.panel.tsfresh.TSFreshFeatureExtractor",
"sktime.utils.data_processing.from_3d_numpy_to_nested",
"os.path.realpath",
"numpy.random.seed",
"numpy.std",
"json.load",
"sktime.utils.data_processing.from_nested_to_3d_numpy",
"numpy.logspace",
"numpy.genfromtxt",
"... | [((445, 462), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (459, 462), True, 'import numpy as np\n'), ((653, 716), 'numpy.genfromtxt', 'np.genfromtxt', (["(cwd + '/resources/ucr_ea_names.txt')"], {'dtype': '"""str"""'}), "(cwd + '/resources/ucr_ea_names.txt', dtype='str')\n", (666, 716), True, 'import numpy as np\n'), ((506, 532), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (522, 532), False, 'import os\n'), ((1358, 1390), 'sktime.utils.data_processing.from_nested_to_3d_numpy', 'from_nested_to_3d_numpy', (['X_train'], {}), '(X_train)\n', (1381, 1390), False, 'from sktime.utils.data_processing import from_nested_to_3d_numpy, from_3d_numpy_to_nested\n'), ((1407, 1438), 'sktime.utils.data_processing.from_nested_to_3d_numpy', 'from_nested_to_3d_numpy', (['X_test'], {}), '(X_test)\n', (1430, 1438), False, 'from sktime.utils.data_processing import from_nested_to_3d_numpy, from_3d_numpy_to_nested\n'), ((1462, 1505), 'numpy.mean', 'np.mean', (['X_train_np'], {'axis': '(-1)', 'keepdims': '(True)'}), '(X_train_np, axis=-1, keepdims=True)\n', (1469, 1505), True, 'import numpy as np\n'), ((1524, 1566), 'numpy.std', 'np.std', (['X_train_np'], {'axis': '(-1)', 'keepdims': '(True)'}), '(X_train_np, axis=-1, keepdims=True)\n', (1530, 1566), True, 'import numpy as np\n'), ((1584, 1626), 'numpy.mean', 'np.mean', (['X_test_np'], {'axis': '(-1)', 'keepdims': '(True)'}), '(X_test_np, axis=-1, keepdims=True)\n', (1591, 1626), True, 'import numpy as np\n'), ((1644, 1685), 'numpy.std', 'np.std', (['X_test_np'], {'axis': '(-1)', 'keepdims': '(True)'}), '(X_test_np, axis=-1, keepdims=True)\n', (1650, 1685), True, 'import numpy as np\n'), ((1705, 1749), 'sktime.transformations.panel.tsfresh.TSFreshFeatureExtractor', 'TSFreshFeatureExtractor', ([], {'show_warnings': '(False)'}), '(show_warnings=False)\n', (1728, 1749), False, 'from sktime.transformations.panel.tsfresh import TSFreshFeatureExtractor\n'), ((785, 800), 'json.load', 'json.load', (['file'], {}), '(file)\n', (794, 800), False, 'import json\n'), ((1801, 1836), 'sktime.utils.data_processing.from_3d_numpy_to_nested', 'from_3d_numpy_to_nested', (['X_train_np'], {}), '(X_train_np)\n', (1824, 1836), False, 'from sktime.utils.data_processing import from_nested_to_3d_numpy, from_3d_numpy_to_nested\n'), ((1888, 1922), 'sktime.utils.data_processing.from_3d_numpy_to_nested', 'from_3d_numpy_to_nested', (['X_test_np'], {}), '(X_test_np)\n', (1911, 1922), False, 'from sktime.utils.data_processing import from_nested_to_3d_numpy, from_3d_numpy_to_nested\n'), ((2246, 2283), 'json.dump', 'json.dump', (['all_scores', 'file'], {'indent': '(4)'}), '(all_scores, file, indent=4)\n', (2255, 2283), False, 'import json\n'), ((1964, 1986), 'numpy.logspace', 'np.logspace', (['(-3)', '(3)', '(10)'], {}), '(-3, 3, 10)\n', (1975, 1986), True, 'import numpy as np\n')] |
import numpy as np
def atari_make_initial_state(state):
return np.stack([state] * 4, axis=2)
def atari_make_next_state(state, next_state):
return np.append(state[:,:,1:], np.expand_dims(next_state, 2), axis=2) | [
"numpy.stack",
"numpy.expand_dims"
] | [((68, 97), 'numpy.stack', 'np.stack', (['([state] * 4)'], {'axis': '(2)'}), '([state] * 4, axis=2)\n', (76, 97), True, 'import numpy as np\n'), ((181, 210), 'numpy.expand_dims', 'np.expand_dims', (['next_state', '(2)'], {}), '(next_state, 2)\n', (195, 210), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import pandas as pd
from scipy.optimize import linear_sum_assignment
from scipy import signal
from sklearn.neighbors import KernelDensity
import copy
import os
import utm
import rasterio
from CountLine import CountLine
import sys
sys.path.append('/home/golden/general-detection/functions')
import koger_tracking as ktf
def mark_bats_on_image(image_raw, centers, radii=None,
scale_circle_size=5, contours=None,
draw_contours=False):
'''
Draw a bunch of circles on given image
image: 2D or 3D image
centers: shape(n,2) array of circle centers
radii: list of circle radii
'''
if len(image_raw.shape) < 2:
print('image has too few dimensions')
return None
if len(image_raw.shape) == 2:
color = 200
else:
if image_raw.shape[2] == 3:
color = (0, 255, 255)
else:
print('image is the wrong shape')
return None
image = np.copy(image_raw)
if radii is None:
radii = np.ones(len(centers))
for circle_ind, radius in enumerate(radii):
cv2.circle(image,
(centers[circle_ind, 0].astype(int),
centers[circle_ind, 1].astype(int)),
int(radius * scale_circle_size), color , 1)
if draw_contours and contours:
for contour in contours:
if len(contour.shape) > 1:
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box_d = np.int0(box)
cv2.drawContours(image, [box_d], 0, (0,255,100), 1)
return image
def get_tracks_in_frame(frame_ind, track_list):
""" Return list of all tracks present in frame ind. """
tracks_in_frame = []
for track in track_list:
if (track['last_frame'] >= frame_ind
and track['first_frame'] <= frame_ind):
tracks_in_frame.append(track)
return tracks_in_frame
def draw_tracks_on_frame(frame, frame_ind, track_list,
positions=None, figure_scale=60,
track_width=2, position_alpha=.5,
draw_whole_track=False, shift=0):
""" Draw all active tracks and all detected bat locations on given frame.
frame: loaded image - np array
frame_ind: frame number
track_list: list of all tracks in observation
positions: all detected bat positions in observation
figure_scale: how big to display output image
track_width: width of plotted tracks
position_alpha: alpha of position dots
draw_whole_track: Boolean draw track in the future of frame_ind
shift: compensate for lack of padding in network when drawing tracks
on input frames
"""
plt.figure(
figsize = (int(frame.shape[1] / figure_scale),
int(frame.shape[0] / figure_scale)))
plt.imshow(frame)
num_tracks = 0
for track in track_list:
if (track['last_frame'] >= frame_ind
and track['first_frame'] <= frame_ind):
rel_frame = frame_ind - track['first_frame']
if draw_whole_track:
plt.plot(track['track'][:, 0] + shift,
track['track'][:, 1] + shift,
linewidth=track_width)
else:
plt.plot(track['track'][:rel_frame, 0] + shift,
track['track'][:rel_frame, 1] + shift,
linewidth=track_width)
num_tracks += 1
if positions:
plt.scatter(positions[frame_ind][:,0] + shift,
positions[frame_ind][:,1] + shift,
c='red', alpha=position_alpha)
plt.title('Tracks: {}, Bats: {}'.format(num_tracks,
len(positions[frame_ind])))
def subtract_background(images, image_ind, background_sum):
'''
Subtract an averaged background from the image. Average over frame_range in the past and future
images: 3d numpy array (num images, height, width)
image_ind: index in circular image array
background_sum: sum of blue channel pixels across 0 dimension of images
'''
background = np.floor_divide(background_sum, images.shape[0])
# The order of subtraction means dark bats are now light in image_dif
image_dif = background - images[image_ind, :, :, 2]
return image_dif, background
def preprocess_to_binary(image, binary_thresh, background):
'''
Converts 2D image to binary after rescaling pixel intensity
image: 2D np array
low_pix_value: pixel value below which all pixels are set to 0
high_pix_value: pixel value above which all pixels are set to 255
binary_thresh: number from 0 - 255, above set to 255, bellow, set to 0
background: background image (2D probably blue channel)
'''
# # Rescale image pixels within range
# image_rescale = exposure.rescale_intensity(
# image, in_range=(low_pix_value, high_pix_value), out_range=(0, 255))
image_rescale = image
# Binarize image based on threshold
min_difference = 5
threshold = binary_thresh * background
threshold = np.where(threshold < min_difference, min_difference, threshold)
binary_image = np.where(image < threshold, 0, 255)
return binary_image
def get_blob_info(binary_image, background=None, size_threshold=0):
'''
Get contours from binary image. Then find center and average radius of each contour
binary_image: 2D image
background: 2D array used to see locally how dark the background is
size_threshold: radius above which blob is considered real
'''
contours, hierarchy = cv2.findContours(binary_image.astype(np.uint8).copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
centers = []
# Size of bounding rectangles
sizes = []
areas = []
# angle of bounding rectangle
angles = []
rects = []
good_contours = []
contours = [np.squeeze(contour) for contour in contours]
for contour_ind, contour in enumerate(contours):
if len(contour.shape) > 1:
rect = cv2.minAreaRect(contour)
if background is not None:
darkness = background[int(rect[0][1]), int(rect[0][0])]
if darkness < 30:
dark_size_threshold = size_threshold + 22
elif darkness < 50:
dark_size_threshold = size_threshold + 15
elif darkness < 80:
dark_size_threshold = size_threshold + 10
elif darkness < 100:
dark_size_threshold = size_threshold + 5
# elif darkness < 130:
# dark_size_threshold = size_threshold + 3
else:
dark_size_threshold = size_threshold
else:
dark_size_threshold = 0 # just used in if statement
area = rect[1][0] * rect[1][1]
if (area >= dark_size_threshold) or background is None:
centers.append(rect[0])
sizes.append(rect[1])
angles.append(rect[2])
good_contours.append(contour)
areas.append(area)
rects.append(rect)
if centers:
centers = np.stack(centers, 0)
sizes = np.stack(sizes, 0)
else:
centers = np.zeros((0,2))
return (centers, np.array(areas), good_contours, angles, sizes, rects)
def draw_circles_on_image(image, centers, sizes, rects=None):
'''
Draw a bunch of circles on given image
image: 2D or 3D image
centers: shape(n,2) array of circle centers
rects: list of minimum bounding rectangles
'''
if len(image.shape) < 2:
print('image has too few dimensions')
return None
if len(image.shape) == 2:
color = 200
rect_color = 100
else:
if image.shape[2] == 3:
color = (0, 255, 255)
rect_color = (0,255,100)
else:
print('image is the wrong shape')
return None
for circle_ind, size in enumerate(sizes):
cv2.circle(image, (centers[circle_ind, 0].astype(int), centers[circle_ind, 1].astype(int)),
int(np.max(size)), color , 1)
if rects:
for rect in rects:
box = cv2.boxPoints(rect)
box_d = np.int0(box)
cv2.drawContours(image, [box_d], 0, rect_color, 1)
return image
def update_circular_image_array(images, image_ind, image_files, frame_num, background_sum):
""" Add new image if nessesary and increment image_ind.
Also update sum of pixels across array for background subtraction.
If frame_num is less than half size of array than don't need to
replace image since intitally all images in average are in the future.
images: image array size (num images averaging, height, width, channel)
image_ind: index of focal frame in images
image_files: list of all image files in observation
frame_num: current frame number in observation
background_sum: sum of current frames blue dimension across frames
"""
if (frame_num > int(images.shape[0] / 2)
and frame_num < (len(image_files) - int(images.shape[0] / 2))):
replace_ind = image_ind + int(images.shape[0] / 2)
replace_ind %= images.shape[0]
# Subtract the pixel values that are about to be removed from background
background_sum -= images[replace_ind, :, :, 2]
image_file = image_files[frame_num + int(images.shape[0] / 2)]
image = cv2.imread(image_file)
images[replace_ind] = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Add new pixel values to the background sum
background_sum += images[replace_ind, :, :, 2]
image_ind += 1
# image_ind should always be in between 0 and images.shape - 1
image_ind = image_ind % images.shape[0]
return images, image_ind, background_sum
def initialize_image_array(image_files, focal_frame_ind, num_images):
""" Create array of num_images x h x w x 3.
Args:
image_files (list): sorted paths to all image files in observation
focal_frame_ind (int): number of the frame being process
num_images (int): number of frames used for background subtraction
return array, index in array where focal frame is located
"""
images = []
first_frame_ind = focal_frame_ind - (num_images // 2)
if num_images % 2 == 0:
# even
last_frame_ind = focal_frame_ind + (num_images // 2) - 1
else:
# odd
last_frame_ind = focal_frame_ind + (num_images // 2)
for file in image_files[first_frame_ind:last_frame_ind+1]:
image = cv2.imread(file)
images.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
images = np.stack(images)
focal_ind = num_images // 2
return(images, focal_ind)
def process_frame(images, focal_frame_ind, bat_thresh, background_sum, bat_area_thresh, debug=False):
"""Process bat frame.
images: n x h x w x c array where the n images are averaged together for background subtraction
focal_frame_ind: which index in images array should be processed
bat_thresh: float value to use for thresholding bat from background
background_sum: sum of all blue channel pixels across the n dimension of images
debug: if true return binary image
"""
size_threshold = bat_area_thresh
max_bats = 600
mean = np.mean(images[focal_frame_ind, :, :, 2])
if mean < 35:
max_bats = 200
if mean < 28:
max_bats = 100
if mean < 5:
print('Too dark...')
if debug:
return None, None, None, None, None, None, None, None
else:
return None, None, None, None, None, None, None
image_dif, background = subtract_background(images, focal_frame_ind, background_sum)
while True:
binary_image = preprocess_to_binary(image_dif, bat_thresh, background)
bat_centers, bat_areas, contours, rect_angles, bat_sizes, bat_rects = get_blob_info(
binary_image, background, size_threshold=size_threshold)
if len(bat_centers) < max_bats:
break
bat_thresh += 0.05
if debug:
return bat_centers, bat_areas, contours, rect_angles, bat_sizes, bat_rects, bat_thresh, binary_image
else:
return bat_centers, bat_areas, contours, rect_angles, bat_sizes, bat_rects, bat_thresh
def add_all_points_as_new_tracks(raw_track_list, positions, contours,
sizes, current_frame_ind, noise):
""" When there are no active tracks, add all new points to new tracks.
Args:
raw_track_list (list): list of tracks
positions (numpy array): p x 2
contours (list): p contours
current_frame_ind (int): current frame index
noise: how much noise to add to tracks initially
"""
for ind, (position, contour, size) in enumerate(zip(positions, contours, sizes)):
raw_track_list.append(
ktf.create_new_track(first_frame=current_frame_ind,
first_position=position, pos_index=ind,
noise=noise, contour=contour, size=size
)
)
return raw_track_list
def find_tracks(first_frame_ind, positions,
contours_files=None, contours_list=None,
sizes_list=None, max_frame=None, verbose=True,
tracks_file=None):
""" Take in positions of all individuals in frames and find tracks.
Args:
first_frame_ind (int): index of first frame of these tracks
positions (list): n x 2 for each frame
contours_files (list): list of files for contour info from each frame
contours_list: already loaded list of contours, only used if contours_file
is None
sizes_list (list): sizes info from each frame
return list of all tracks found
"""
raw_track_list = []
max_distance_threshold = 30
max_distance_threshold_noise = 30
min_distance_threshold = 0
max_unseen_time = 2
min_new_track_distance = 3
min_distance_big = 30
# #Create initial tracks based on the objects in the first frame
# raw_track_list = add_all_points_as_new_tracks(
# raw_track_list, positions[0], contours_list[0], sizes_list0, noise=0
# )
#try to connect points to the next frame
if max_frame is None:
max_frame = len(positions)
contours_file_ind = 0
previous_contours_seen = 0
if contours_files:
contours_list = np.load(contours_files[contours_file_ind], allow_pickle=True)
while first_frame_ind >= previous_contours_seen + len(contours_list):
contours_file_ind += 1
previous_contours_seen += len(contours_list)
contours_list = np.load(contours_files[contours_file_ind], allow_pickle=True)
print(f'using {contours_files[contours_file_ind]}')
elif not contours_list:
print("Needs contour_files or contour_list")
return
contours_ind = first_frame_ind - previous_contours_seen - 1
for frame_ind in range(first_frame_ind, max_frame):
contours_ind += 1
if contours_files:
if contours_ind >= len(contours_list):
# load next file
try:
contours_file_ind += 1
contours_list = np.load(contours_files[contours_file_ind], allow_pickle=True)
contours_ind = 0
except:
if tracks_file:
tracks_file_error = os.path.splitext(tracks_file)[0] + f'-error-{frame_ind}.npy'
print(tracks_file_error)
np.save(tracks_file_error, np.array(raw_track_list, dtype=object))
#get tracks that are still active (have been seen within the specified time)
active_list = ktf.calculate_active_list(raw_track_list, max_unseen_time, frame_ind)
if verbose:
if frame_ind % 10000 == 0:
print('frame {} processed.'.format(frame_ind))
if tracks_file:
np.save(tracks_file, np.array(raw_track_list, dtype=object))
if len(active_list) == 0:
#No existing tracks to connect to
#Every point in next frame must start a new track
raw_track_list = add_all_points_as_new_tracks(
raw_track_list, positions[frame_ind], contours_list[contours_ind],
sizes_list[frame_ind], frame_ind, noise=1
)
continue
# Make sure there are new points to add
new_positions = None
row_ind = None
col_ind = None
new_sizes = None
new_position_indexes = None
distance = None
contours = None
if len(positions[frame_ind]) != 0:
#positions from the next step
new_positions = positions[frame_ind]
contours = [np.copy(contour) for contour in contours_list[contours_ind]]
new_sizes = sizes_list[frame_ind]
raw_track_list = ktf.calculate_max_distance(
raw_track_list, active_list, max_distance_threshold,
max_distance_threshold_noise, min_distance_threshold,
use_size=True, min_distance_big=min_distance_big
)
distance = ktf.calculate_distances(
new_positions, raw_track_list, active_list
)
max_distance = ktf.create_max_distance_array(
distance, raw_track_list, active_list
)
assert distance.shape[1] == len(new_positions)
assert distance.shape[1] == len(contours)
assert distance.shape[1] == len(new_sizes)
# Some new points could be too far away from every existing track
raw_track_list, distance, new_positions, new_position_indexes, new_sizes, contours = ktf.process_points_without_tracks(
distance, max_distance, raw_track_list, new_positions, contours,
frame_ind, new_sizes
)
if distance.shape[1] > 0:
# There are new points can be assigned to existing tracks
#connect the dots from one frame to the next
row_ind, col_ind = linear_sum_assignment(np.log(distance + 1))
# for active_ind, track_ind in enumerate(active_list):
# if active_ind in row_ind:
# row_count = np.where(row_ind == active_ind)[0]
# raw_track_list[track_ind]['debug'].append(
# '{} dist {}, best {}'.format(
# frame_ind,
# distance[row_ind[row_count],
# col_ind[row_count]],
# np.min(distance[row_ind[row_count],
# :])
# )
# )
# best_col = np.argmin(distance[row_ind[row_count],
# :])
# row_count = np.where(col_ind == best_col)[0]
# raw_track_list[track_ind]['debug'].append(
# '{} row_ind {} col {} dist {} track {}'.format(
# frame_ind, row_ind[row_count],
# col_ind[row_count],
# distance[row_ind[row_count],
# col_ind[row_count]],
# active_list[row_ind[row_count][0]])
# )
# In casese where there are fewer new points than existing tracks
# some tracks won't get new point. Just assign them to
# the closest point
row_ind, col_ind = ktf.filter_tracks_without_new_points(
raw_track_list, distance, row_ind, col_ind, active_list, frame_ind
)
# Check if tracks with big bats got assigned to small points which are
# probably noise
row_ind, col_ind = ktf.fix_tracks_with_small_points(
raw_track_list, distance, row_ind, col_ind, active_list, new_sizes, frame_ind)
# see if points got assigned to tracks that are farther
# than max_threshold_distance
# This happens when the closer track gets assigned
# to a differnt point
row_ind, col_ind = ktf.filter_bad_assigns(raw_track_list, active_list, distance, max_distance,
row_ind, col_ind
)
raw_track_list = ktf.update_tracks(raw_track_list, active_list, frame_ind,
row_ind, col_ind, new_positions,
new_position_indexes, new_sizes, contours,
distance, min_new_track_distance)
raw_track_list = ktf.remove_noisy_tracks(raw_track_list)
raw_track_list = ktf.finalize_tracks(raw_track_list)
if tracks_file:
np.save(tracks_file, np.array(raw_track_list, dtype=object))
print('{} final save.'.format(os.path.basename(os.path.dirname(tracks_file))))
return raw_track_list
def get_tracked_bats_in_frame(image_files, focal_frame_ind, bat_thresh, bat_area_thresh):
centers_list = []
contours_list = []
sizes_list = []
clip_length = 5
array_size = 31
images, frame_buffer_ind = initialize_image_array(image_files, focal_frame_ind, array_size)
background_sum = np.sum(images[:,:,:,2], 0, dtype=np.int16)
for video_frame_ind in range(focal_frame_ind, focal_frame_ind+clip_length):
bat_centers, bat_areas, bat_contours, _, _, _, bat_thresh = process_frame(
images, frame_buffer_ind, bat_thresh, background_sum,
bat_area_thresh, debug=False)
centers_list.append(bat_centers)
contours_list.append(bat_contours)
sizes_list.append(bat_areas)
images, frame_buffer_ind, background_sum = update_circular_image_array(
images, frame_buffer_ind, image_files, video_frame_ind, background_sum)
raw_tracks = find_tracks(0, centers_list,
contours_list=contours_list,
sizes_list=sizes_list
)
return raw_tracks, centers_list
# return raw_tracks, centers_list, distance, max_distance, active_list, all_pre_distances, all_row_inds, all_col_inds
# return(connected_distance, connected_size)
def piecewise_linear(x, x0, y0, k1, k2):
return np.piecewise(x, [x < x0],
[lambda x:k1*x + y0-k1*x0, lambda x:k2*x + y0-k2*x0]
)
def get_bat_accumulation(crossing_frames, obs=None, parameters=None,
w_multiplier=True, w_darkness=True, w_frac=True):
""" Create and return cummulative sum of bats crossing count line over the course of
list of given positive and negative crossing frames.
crossing_frames: list of frame that each track crosses line. Positive if leaving
negative if going
obs: observation dictionary.
parameters: list of parameters of piecewise linear function
w_multiplier: multiply each bat crossing by apropriate bat multiplier for camera etc.
w_darkness: scale each bat crossing by apropriate accuracy corrrection based on frame darkness
w_frac: scale each bat crossing by fraction of total circle that camera sees
"""
if not np.any(crossing_frames):
return np.zeros(1)
last_crossing_frame = np.max(np.abs(crossing_frames))
crossing_per_frame = np.zeros(last_crossing_frame+1)
if obs and parameters:
accurracies = piecewise_linear(obs['darkness'], *parameters)
for crossing_frame, bm, acc in zip(crossing_frames, obs['multiplier'], accurracies):
scale = 1
if w_multiplier:
scale *= bm
if w_darkness:
scale *= (1/acc)
if crossing_frame < 0:
crossing_per_frame[-crossing_frame] -= scale
elif crossing_frame > 0:
crossing_per_frame[crossing_frame] += scale
if w_frac:
crossing_per_frame *= obs['fraction_total']
else:
for crossing_frame in crossing_frames:
if crossing_frame < 0:
crossing_per_frame[-crossing_frame] -= 1
elif crossing_frame > 0:
crossing_per_frame[crossing_frame] += 1
return np.cumsum(crossing_per_frame)
def threshold_short_tracks(raw_track_list, min_length_threshold=2):
"""Only return tracks that are longer than min_length_threshold."""
track_lengths = []
track_list = []
for track_num, track in enumerate(raw_track_list):
if isinstance(track['track'], list):
track['track'] = np.array(track['track'])
track_length = track['track'].shape[0]
if track_length >= min_length_threshold:
track_lengths.append(track['track'].shape[0])
track_list.append(track)
return track_list
def calculate_height(wingspan_pixels, camera_constant, wingspan_meters):
''' Calculate bats height above the ground assumming wingspan_meters is correct.
camera_constant = (frame pixels / 2) / tan(fov / 2)
height = constant * wingspan_meters / wingspan_pixels
'''
return camera_constant * wingspan_meters / wingspan_pixels
def calculate_bat_multiplier_simple(height, horizontal_fov, distance_to_center):
''' Calculate how many bats one bats at a given height and camera localtion represents.
height: height of bat
horizontal_fov: horizontal field of view of camera (degrees)
distance_to_center: distance from camera to center of colony
ASSUMES CIRCUMFERCE IS MUCH LARGER THAN WIDTH OF SPACE SEEN
circumfernce c = 2 * pi * distance_to_center
width of seen space w = 2 * height * tan(horizontal_fov / 2)
multiplier = c / w
'''
c = 2 * np.pi * distance_to_center
horizontal_fov_rad = horizontal_fov * np.pi / 180
w = 2 * height * np.tan(horizontal_fov_rad / 2)
return c / w
def calculate_bat_multiplier(height, horizontal_fov, distance_to_center):
''' Calculate how many bats one bats at a given height and camera
localtion represents.
height: height of bat
horizontal_fov: horizontal field of view of camera (degrees)
distance_to_center: distance from camera to center of colony
phi = arctan((height*tan(horizontal_fov/2)) / distance to center)
multiplier = pi / phi
'''
horizontal_fov_rad = horizontal_fov * np.pi / 180
distance_to_center = np.max([distance_to_center, 10e-5])
phi = np.arctan((height * np.tan(horizontal_fov_rad / 2))
/ distance_to_center
)
return np.pi/phi
def combined_bat_multiplier(frame_width, wingspan_meters,
wingspan_pixels, camera_distance):
""" Calculates bat multiplier.
Args:
frame_width: frame width in pixels
wingspan_meters: bat wingspan in meters
wingspan_pixels: bat wingspan in pixels
camera_distance: distance from forest point to camera in meters
should be a single value or an array of distances with same
shape as wingspan_pixels
Returns:
bat multiplier: float
"""
denominator = np.arctan(
(frame_width*wingspan_meters)
/ (2*wingspan_pixels*camera_distance)
)
return np.pi / denominator
def get_rects(track):
""" Fit rotated bounding rectangles to each contour in track.
track: track dict with 'contour' key linked to list of cv2 contours
"""
rects = []
for contour in track['contour']:
if len(contour.shape) > 1:
rect = cv2.minAreaRect(contour)
rects.append(rect[1])
else:
rects.append((np.nan, np.nan))
return np.array(rects)
def get_wingspan(track):
""" Estimate wingspan in pixels from average of peak sizes of longest
rectangle edges.
"""
if not 'rects' in track.keys():
track['rects'] = get_rects(track)
max_edge = np.nanmax(track['rects'], 1)
max_edge = max_edge[~np.isnan(max_edge)]
peaks = signal.find_peaks(max_edge)[0]
if len(peaks) != 0:
mean_wing = np.nanmean(max_edge[peaks])
else:
mean_wing = np.nanmean(max_edge)
return mean_wing
def measure_crossing_bats(track_list, frame_height=None, frame_width=None,
count_across=False, count_out=True, num_frames=None,
with_rects=True):
""" Find and quantify all tracks that cross middle line.
track_list: list of track dicts
frame_height: height of frame in pixels
frame_width: width of frame in pixels
count_across: count horizontal tracks
count_out: count vertical tracks
num_frames: number of frames in observation
with_rects: if True calculate rects if not already
in track and estimate wingspan and body size
"""
if count_across:
assert frame_width, "If vertical must specify frame width."
across_line = CountLine(int(frame_width/2), line_dim=0, total_frames=num_frames)
if count_out:
assert frame_height, "If horizontal must specify frame height."
out_line = CountLine(int(frame_height/2), line_dim=1, total_frames=num_frames)
crossing_track_list = []
for track_ind, track in enumerate(track_list[:]):
out_result = None
across_result = None
if count_out:
out_result, out_frame_num = out_line.is_crossing(track, track_ind)
if count_across:
across_result, across_frame_num = across_line.is_crossing(track, track_ind)
if out_result or across_result:
crossing_track_list.append(track)
# result is 1 if forward crossing -1 is backward crossing
if count_out:
if out_frame_num:
crossing_track_list[-1]['crossed'] = out_frame_num * out_result
else:
crossing_track_list[-1]['crossed'] = 0
if count_across:
if across_frame_num:
crossing_track_list[-1]['across_crossed'] = across_frame_num * across_result
else:
crossing_track_list[-1]['across_crossed'] = 0
track[id] = track_ind
if with_rects:
if not 'rects' in track.keys():
track['rects'] = get_rects(track)
min_edge = np.nanmin(track['rects'], 1)
min_edge = min_edge[~np.isnan(min_edge)]
peaks = signal.find_peaks(max_edge)[0]
if len(peaks) != 0:
mean_body = np.nanmean(min_edge[peaks])
else:
mean_body = np.nanmean(max_edge)
crossing_track_list[-1]['mean_wing'] = get_wingspan(track)
crossing_track_list[-1]['mean_body'] = mean_body
return crossing_track_list
def get_camera_locations(observations, all_camera_locations, exclude=False):
"""Return dict of all camera locations that appear in observations.
observations: dict of observations. Probably all observations from one day.
all_camera_locations: dict containing all camera locations across all days
exclude: if True, exclude observations as marked in obs dict
"""
camera_locations = {}
for camera, obs in observations.items():
if exclude:
if 'exclude' in obs.keys():
if obs['exclude']:
continue
camera_locations[obs['camera']] = all_camera_locations[obs['camera']]
return camera_locations
def get_camera_distance(camera_utm, center_utm):
""" Calculate the distance between utm of camera and possible
forest center in meters.
camera_utm: [x, y] array
center_utm: [x, y] array
"""
diff = camera_utm - center_utm
return np.sum(np.sqrt(diff ** 2))
def get_camera_distances(camera_utms, center_utm):
""" Calculate distance from every given camera to specified center.
camera_utms: dict with camera names and locations
center_utm: np.array 2d, location of forest center
"""
camera_distances = {}
for camera, camera_utm in camera_utms.items():
camera_distances[camera] = get_camera_distance(camera_utm,
center_utm)
return camera_distances
def get_camera_angles(camera_utms, center_utm):
""" Calculate angle from center point to each camera location.
camera_utms: dict pairs of camera names and location info
center_utm: 2d np.array, location of forest center
"""
camera_angles = {}
for camera, camera_utm in camera_utms.items():
dif = camera_utm - center_utm
camera_angles[camera] = np.arctan2(dif[1], dif[0])
return camera_angles
def get_camera_borders(camera_utms, center_utm, jitter=False):
""" Get angles around forest center that evenly bisect camera positions.
camera_utms: dict pairs of camera names and location info
center_utm: 2d np.array, location of forest center
jitter: if True, don't actually bisect cameras at midpoint but drawn
from a gaussian
"""
camera_border = {}
camera_angles = get_camera_angles(camera_utms, center_utm)
for camera, camera_utm in camera_utms.items():
min_neg = -10000
min_pos = 100000
# for border case where focal is positive angle
# and closest cclock is negative
max_pos = 0
# for same case a last comment
all_pos = True
# for border case where focal is positive angle
# and closest cclock is negative
max_neg = 0
# for same case a last comment
all_neg = True
max_camera = None
camera_border[camera] = {'cclock': None,
'cclock_angle': None,
'clock': None,
'clock_angle': None
}
for alt_camera, alt_camera_utm in camera_utms.items():
if camera == alt_camera:
continue
dif = camera_angles[camera] - camera_angles[alt_camera]
if dif < 0:
all_pos = False
if dif > min_neg:
min_neg = dif
camera_border[camera]['cclock'] = alt_camera
camera_border[camera]['cclock_angle'] = dif / 2
if dif < max_neg:
max_neg = dif
max_camera = alt_camera
if dif > 0:
all_neg = False
if dif < min_pos:
min_pos = dif
camera_border[camera]['clock'] = alt_camera
camera_border[camera]['clock_angle'] = dif / 2
if dif > max_pos:
max_pos = dif
max_camera = alt_camera
if all_pos:
camera_border[camera]['cclock'] = max_camera
camera_border[camera]['cclock_angle'] = (max_pos - 2*np.pi) / 2
if all_neg:
camera_border[camera]['clock'] = max_camera
camera_border[camera]['clock_angle'] = (max_neg + 2*np.pi) / 2
if jitter:
for camera, border_info in camera_border.items():
camera_angle = camera_angles[camera]
clockwise_camera = border_info['clock']
angle_dif = border_info['clock_angle']
# Three sttandard deviations is between camera pair
jitter_angle = np.random.normal(scale=angle_dif/3)
jitter_angle = np.maximum(-border_info['clock_angle'],
jitter_angle)
jitter_angle = np.minimum(border_info['clock_angle'],
jitter_angle)
camera_border[camera]['clock_angle'] += jitter_angle
if camera_border[camera]['clock_angle'] < 0:
camera_border[camera]['clock_angle'] += (2 * np.pi)
if camera_border[camera]['clock_angle'] >= (2 * np.pi):
camera_border[camera]['clock_angle'] -= (2 * np.pi)
camera_border[clockwise_camera]['cclock_angle'] += jitter_angle
if camera_border[clockwise_camera]['cclock_angle'] < -2 * np.pi:
camera_border[clockwise_camera]['cclock_angle'] += (2 * np.pi)
if camera_border[clockwise_camera]['cclock_angle'] >= (2 * np.pi):
camera_border[clockwise_camera]['cclock_angle'] -= (2 * np.pi)
return camera_border
def latlong_dict_to_utm(latlong_dict):
""" Convert dict of latlong coordinates to utm."""
utm_dict = {}
for key, latlong in latlong_dict.items():
utm_val = utm.from_latlon(*latlong)
utm_dict[key] = np.array([utm_val[0], utm_val[1]])
return utm_dict
def get_camera_fractions(camera_utms, center_utm, jitter=False):
""" Calculate the fraction of circle around center that each camera is closest to.
camera_utms: dict of camera locations
center_utm: 2d np array with utm coordinates of center
jitter: If True instead of evenly dividing circle by
cameras, set borders between camera from a gaussian
return dict with fraction for each camera
"""
if len(camera_utms) == 1:
return {list(camera_utms.keys())[0]: 1.0}
camera_borders = get_camera_borders(camera_utms,
center_utm,
jitter=jitter)
camera_fractions = {}
for camera, border_info in camera_borders.items():
angle = (-border_info['cclock_angle']
+ border_info['clock_angle']
)
camera_fractions[camera] = angle / (np.pi * 2)
return camera_fractions
def get_day_total(observations, center_utm, all_camera_utms,
frame_width, wingspan, exclude=False,
correct_darkness=False, parameters=None):
""" Estimate total number of bats based on all observation counts
and corespoinding camera locations.
observations: dict of all observations for a specific day
center_utm: estimated location of forest center
all_camera_utms: dict of the utm locations of each camera
frame_width: width of camera frame in pixels
wingspan: estimated wingspan off all bats in meters
exlude: to manually remove certain cameras, ie shut off early etc.
correct_darkness: divide by accuracy estimated for given darkness
parameters: param values of linear piecewise function for darkness
error correction. Required if correct_darkness is True
"""
frac_sum = 0
total = 0
obs_totals = []
camera_utms = get_camera_locations(observations, all_camera_utms, exclude=True)
camera_fractions = get_camera_fractions(camera_utms, center_utm)
for obs in observations.values():
if exclude:
if 'exclude' in obs.keys():
if obs['exclude']:
continue
camera_distances = get_camera_distances(camera_utms, center_utm)
obs['multiplier'] = combined_bat_multiplier(frame_width,
wingspan,
obs['mean_wing'],
camera_distances[obs['camera']]
)
if correct_darkness:
assert parameters is not None, "Must pass parameters if correcting for darkness."
acc = piecewise_linear(obs['darkness'], *parameters)
obs['total_darkness'] = np.sum(obs['multiplier'] * obs['direction'] * (1/acc))
obs['total'] = np.sum(obs['multiplier'] * obs['direction'])
obs['total_unscaled'] = np.sum(obs['direction'])
obs['fraction_total'] = camera_fractions[obs['camera']]
frac_sum += obs['fraction_total']
if correct_darkness:
total += obs['total_darkness'] * obs['fraction_total']
obs_totals.append(obs['total_darkness'])
else:
total += obs['total'] * obs['fraction_total']
obs_totals.append(obs['total'])
if len(obs_totals) > 0:
mean_total = np.mean(obs_totals)
else:
mean_total = 0
return total, mean_total
def get_peak_freq(raw_freqs, raw_powers, min_freq):
""" Calculate max power frequency above min_freq.
raw_freqs: list of frequencies
raw_powers: list of powers assosiated with each raw freq value
min_freq: minimum acceptable frequency value
"""
freqs = raw_freqs[raw_freqs>min_freq]
powers = raw_powers[raw_freqs>min_freq]
if np.any(np.isnan(freqs)) or len(freqs)==0:
return np.nan, np.nan
return freqs[np.argmax(powers)], powers[np.argmax(powers)]
def get_track_wingbeat_freqs(track, fps=25, min_freq=.75):
""" Calculate peak wing freqs and assosiated power.
track: track dict
fps: frames per second track temporal resolution
min_freq: minimum frequency for calculating peak_freq.
Messily segmented tracks often have have high power
close to 0 Hz because actual signal is not clear.
"""
assert 'max_edge' in track.keys(), "Track must have max_edge already computed"
if len(track['max_edge']) < 255:
nperseg = len(track['max_edge'])
else:
nperseg = 255
f, p = signal.welch(track['max_edge'], fps, nperseg=nperseg)
peaks = signal.find_peaks(p, threshold=0, height=1)[0]
track['freqs'] = f[peaks]
track['freqs_power'] = p[peaks]
peak_freq, freq_power = get_peak_freq(track['freqs'],
track['freqs_power'],
min_freq
)
track['peak_freq'] = peak_freq
track['peak_freq_power'] = freq_power
def add_wingbeat_info_to_tracks(tracks, fps=25, min_freq=.75,
remove_contours=False):
""" Add main wingbeat freq info for all tracks in tracks after calculating
all nessissary extra info. Can remove contours after getting bounding rects
to save memory.
tracks: list of track dicts
fps: frames per second - temporal resolution of tracks
min_freq: minimum frequency for calculating peak_freq.
Messily segmented tracks often have have high power
close to 0 Hz because actual signal is not clear.
remove_contours: if True remove raw contour info from track dicts.
Useful if need to save memory
"""
for track in tracks:
if 'rects' not in track.keys():
track['rects'] = get_rects(track)
if remove_contours:
try:
del track['contour']
except KeyError:
pass
if 'max_edge' not in track.keys():
track['max_edge'] = np.nanmax(track['rects'], 1)
if 'mean_wing' not in track.keys():
track['mean_wing'] = get_wingspan(track)
get_track_wingbeat_freqs(track, fps=fps, min_freq=min_freq)
def get_random_utm_in_mask(mask, rasterio_map, num_locations=1):
""" Get a random utm location within raster mask.
mask: 2d np array where forest has values > 0 and background < 0
rasterio_map: rasterio.io.DatasetReader for mask
num_locations: number of locations to return in forest
"""
in_hull = np.argwhere(mask>0)
ind = np.random.randint(0, in_hull.shape[0], num_locations)
area_x_origin = rasterio_map.bounds.left
area_y_origin = rasterio_map.bounds.bottom
xutm = in_hull[ind, 1] + area_x_origin
yutm = in_hull[ind, 0] + area_y_origin
utm_vals = np.stack([xutm, yutm], axis=1)
# squeeze for when only returning one value to remove
# extra dimension
return np.squeeze(utm_vals)
def get_wing_correction_distributions(validation_file, num_darkness_bins,
kde_bw_scale=1, should_plot=False):
""" Calculate wing correction distributions from human validation info.
validation_file: .csv file with human groundtruth info
num_darkness_bins: how many groups to split darkness range into
kde_bw_scale: kernel size used in kde calculation: data std. in bin * kde_bw_scale
should_plot: show histograms and resulting distributions
"""
wing_validation = pd.read_csv(validation_file)
max_darkness = wing_validation.loc[wing_validation['has_gt'], 'darkness'].max()
darkness_bins = np.linspace(0, max_darkness, num_darkness_bins+1)
darkness_bins[-1] = 255
wing_correction_kdes = []
for bin_num in range(num_darkness_bins):
rows_in_bin = (wing_validation['has_gt']
& (wing_validation['darkness'] > darkness_bins[bin_num])
& (wing_validation['darkness'] <= darkness_bins[bin_num+1])
& (wing_validation['error_norm'] > -1)
)
errors = wing_validation.loc[rows_in_bin, 'error_norm'].values
error_std = errors.std()
kde = KernelDensity(
kernel='gaussian', bandwidth=error_std*kde_bw_scale).fit(errors[..., np.newaxis])
wing_correction_kdes.append(kde)
if should_plot:
sorted_error = np.sort(errors, axis=0)
samples = np.linspace(-1,1,100)
log_dens = kde.score_samples(samples[..., np.newaxis])
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.hist(sorted_error, bins=40, density=True)
ax1.plot(samples, np.exp(log_dens), c='cyan')
if should_plot:
plt.figure()
for kde in wing_correction_kdes:
samples = np.linspace(-1,1,100)
log_dens = kde.score_samples(samples[..., np.newaxis])
plt.plot(samples, np.exp(log_dens),)
return wing_correction_kdes, darkness_bins
def get_kde_samples(obs, kde_list, darkness_bins):
""" Draw a sample for each track from appropriate kde distribution for tracks darkness.
obs: observation dictionary
kde_list: a kde distribution for each darkness bin
darkness_bins: list of darkness thresholds for between each darkness bin
starts at zero so len=num bins + 1
"""
kde_samples = np.zeros(len(obs['darkness']))
kde_inds = np.zeros(len(obs['darkness']))
for ind, (kde, min_bin_val, max_bin_val) in enumerate(zip(kde_list, darkness_bins[:-1], darkness_bins[1:])):
inds_in_bin = ((obs['darkness'] > min_bin_val)
& (obs['darkness'] <= max_bin_val))
bin_samples = np.squeeze(kde.sample(len(obs['darkness'])))
kde_samples[inds_in_bin] = bin_samples[inds_in_bin]
kde_inds[inds_in_bin] = ind
return kde_samples, kde_inds
def correct_wingspan(estimate, estimate_scale):
""" Correct the estimated wingspan based on groundtruth distribution.
estimate: wingespan estimated from track
estimate_scale: (estimate - groundtruth)/ estimate
obv. don't know groundtruth in application but
estimate scale usually ranomly drawn from distribution
"""
corrected_est = estimate - estimate * estimate_scale
return corrected_est
def save_fig(save_folder, plot_title, fig=None):
""" Convient default figure saving configuration."""
plot_name = plot_title.replace(' ', '-')
file = os.path.join(save_folder, plot_name+'.png')
if fig:
fig.savefig(file, bbox_inches='tight', dpi=600)
return
plt.savefig(file, bbox_inches='tight', dpi=600)
def smooth_track(track, kernel_size=12):
""" Smooth n x 2 track with averaging filter."""
kernel = np.ones(kernel_size) / kernel_size
x = np.convolve(track[:, 0], kernel, mode='valid')
y = np.convolve(track[:, 1], kernel, mode='valid')
return np.stack([x, y], 1)
def calculate_straightness(track):
""" Caclute straightness of n x 2 numpy track."""
track = smooth_track(track, kernel_size=12)
step_vectors = track[1:] - track[:-1]
step_sizes = np.linalg.norm(step_vectors, axis=1)
combined_steps = np.sum(step_sizes)
net_distance = np.linalg.norm(track[-1] - track[0])
return net_distance / combined_steps
def get_middle_percentiles(values, lower_percentile, upper_percentile):
""" Return all values in values between lower and upper percentile."""
values = np.array(values)
values = values[~np.isnan(values)]
sorted_values = sorted(values)
lower_ind = int(lower_percentile * len(values))
upper_ind = int(upper_percentile * len(values) + 1)
return sorted_values[lower_ind:upper_ind]
def calc_movement_unit_vector(track, frame_height=1520):
""" Calculate the unit vector pointing from first position
to last position in track with bottom left origin
track: track dict
frame_height: height of frame in pixels the tracks came from
"""
track = np.copy(track['track'])
track[:, 1] = frame_height - track[:, 1]
diff = track[-1] - track[0]
unit_position = diff / np.linalg.norm(diff)
return unit_position
def calculate_polarization(tracks):
""" Following Couzin et al. 2002 calculate polarization of all bats
in tracks.
"""
direction_unit_vectors = []
for track in tracks:
direction_unit_vectors.append(
calc_movement_unit_vector(track))
direction_sum = np.sum(np.array(direction_unit_vectors), axis=0)
direction_magnitude = np.linalg.norm(direction_sum)
polarization = direction_magnitude / len(tracks)
return polarization
def get_camera_color_dict(colormap=plt.cm.tab10):
""" For consistent camerar colors across plots."""
camera_colors = {'FibweParking2': colormap(0),
'FibweParking': colormap(0),
'Chyniangale': colormap(.1),
'BBC': colormap(.2),
'Sunset': colormap(.3),
'NotChyniangale': colormap(.4),
'MusoleParking': colormap(.5),
'MusolePath2': colormap(.6),
'MusolePath': colormap(.6),
'Puku': colormap(.7),
'FibwePublic': colormap(.8),
'MusoleTower': colormap(.9),
}
return camera_colors
| [
"koger_tracking.create_new_track",
"numpy.convolve",
"numpy.sqrt",
"utm.from_latlon",
"pandas.read_csv",
"koger_tracking.process_points_without_tracks",
"numpy.log",
"numpy.array",
"numpy.nanmean",
"numpy.arctan2",
"koger_tracking.fix_tracks_with_small_points",
"numpy.linalg.norm",
"numpy.pi... | [((293, 352), 'sys.path.append', 'sys.path.append', (['"""/home/golden/general-detection/functions"""'], {}), "('/home/golden/general-detection/functions')\n", (308, 352), False, 'import sys\n'), ((1078, 1096), 'numpy.copy', 'np.copy', (['image_raw'], {}), '(image_raw)\n', (1085, 1096), True, 'import numpy as np\n'), ((3033, 3050), 'matplotlib.pyplot.imshow', 'plt.imshow', (['frame'], {}), '(frame)\n', (3043, 3050), True, 'import matplotlib.pyplot as plt\n'), ((4396, 4444), 'numpy.floor_divide', 'np.floor_divide', (['background_sum', 'images.shape[0]'], {}), '(background_sum, images.shape[0])\n', (4411, 4444), True, 'import numpy as np\n'), ((5418, 5481), 'numpy.where', 'np.where', (['(threshold < min_difference)', 'min_difference', 'threshold'], {}), '(threshold < min_difference, min_difference, threshold)\n', (5426, 5481), True, 'import numpy as np\n'), ((5506, 5541), 'numpy.where', 'np.where', (['(image < threshold)', '(0)', '(255)'], {}), '(image < threshold, 0, 255)\n', (5514, 5541), True, 'import numpy as np\n'), ((11289, 11305), 'numpy.stack', 'np.stack', (['images'], {}), '(images)\n', (11297, 11305), True, 'import numpy as np\n'), ((11964, 12005), 'numpy.mean', 'np.mean', (['images[focal_frame_ind, :, :, 2]'], {}), '(images[focal_frame_ind, :, :, 2])\n', (11971, 12005), True, 'import numpy as np\n'), ((22111, 22146), 'koger_tracking.finalize_tracks', 'ktf.finalize_tracks', (['raw_track_list'], {}), '(raw_track_list)\n', (22130, 22146), True, 'import koger_tracking as ktf\n'), ((22680, 22725), 'numpy.sum', 'np.sum', (['images[:, :, :, 2]', '(0)'], {'dtype': 'np.int16'}), '(images[:, :, :, 2], 0, dtype=np.int16)\n', (22686, 22725), True, 'import numpy as np\n'), ((23762, 23859), 'numpy.piecewise', 'np.piecewise', (['x', '[x < x0]', '[lambda x: k1 * x + y0 - k1 * x0, lambda x: k2 * x + y0 - k2 * x0]'], {}), '(x, [x < x0], [lambda x: k1 * x + y0 - k1 * x0, lambda x: k2 *\n x + y0 - k2 * x0])\n', (23774, 23859), True, 'import numpy as np\n'), ((24859, 24892), 'numpy.zeros', 'np.zeros', (['(last_crossing_frame + 1)'], {}), '(last_crossing_frame + 1)\n', (24867, 24892), True, 'import numpy as np\n'), ((25741, 25770), 'numpy.cumsum', 'np.cumsum', (['crossing_per_frame'], {}), '(crossing_per_frame)\n', (25750, 25770), True, 'import numpy as np\n'), ((28020, 28056), 'numpy.max', 'np.max', (['[distance_to_center, 0.0001]'], {}), '([distance_to_center, 0.0001])\n', (28026, 28056), True, 'import numpy as np\n'), ((28773, 28859), 'numpy.arctan', 'np.arctan', (['(frame_width * wingspan_meters / (2 * wingspan_pixels * camera_distance))'], {}), '(frame_width * wingspan_meters / (2 * wingspan_pixels *\n camera_distance))\n', (28782, 28859), True, 'import numpy as np\n'), ((29323, 29338), 'numpy.array', 'np.array', (['rects'], {}), '(rects)\n', (29331, 29338), True, 'import numpy as np\n'), ((29587, 29615), 'numpy.nanmax', 'np.nanmax', (["track['rects']", '(1)'], {}), "(track['rects'], 1)\n", (29596, 29615), True, 'import numpy as np\n'), ((43191, 43244), 'scipy.signal.welch', 'signal.welch', (["track['max_edge']", 'fps'], {'nperseg': 'nperseg'}), "(track['max_edge'], fps, nperseg=nperseg)\n", (43203, 43244), False, 'from scipy import signal\n'), ((45229, 45250), 'numpy.argwhere', 'np.argwhere', (['(mask > 0)'], {}), '(mask > 0)\n', (45240, 45250), True, 'import numpy as np\n'), ((45259, 45312), 'numpy.random.randint', 'np.random.randint', (['(0)', 'in_hull.shape[0]', 'num_locations'], {}), '(0, in_hull.shape[0], num_locations)\n', (45276, 45312), True, 'import numpy as np\n'), ((45516, 45546), 'numpy.stack', 'np.stack', (['[xutm, yutm]'], {'axis': '(1)'}), '([xutm, yutm], axis=1)\n', (45524, 45546), True, 'import numpy as np\n'), ((45643, 45663), 'numpy.squeeze', 'np.squeeze', (['utm_vals'], {}), '(utm_vals)\n', (45653, 45663), True, 'import numpy as np\n'), ((46217, 46245), 'pandas.read_csv', 'pd.read_csv', (['validation_file'], {}), '(validation_file)\n', (46228, 46245), True, 'import pandas as pd\n'), ((46351, 46402), 'numpy.linspace', 'np.linspace', (['(0)', 'max_darkness', '(num_darkness_bins + 1)'], {}), '(0, max_darkness, num_darkness_bins + 1)\n', (46362, 46402), True, 'import numpy as np\n'), ((49256, 49301), 'os.path.join', 'os.path.join', (['save_folder', "(plot_name + '.png')"], {}), "(save_folder, plot_name + '.png')\n", (49268, 49301), False, 'import os\n'), ((49392, 49439), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "(file, bbox_inches='tight', dpi=600)\n", (49403, 49439), True, 'import matplotlib.pyplot as plt\n'), ((49601, 49647), 'numpy.convolve', 'np.convolve', (['track[:, 0]', 'kernel'], {'mode': '"""valid"""'}), "(track[:, 0], kernel, mode='valid')\n", (49612, 49647), True, 'import numpy as np\n'), ((49656, 49702), 'numpy.convolve', 'np.convolve', (['track[:, 1]', 'kernel'], {'mode': '"""valid"""'}), "(track[:, 1], kernel, mode='valid')\n", (49667, 49702), True, 'import numpy as np\n'), ((49719, 49738), 'numpy.stack', 'np.stack', (['[x, y]', '(1)'], {}), '([x, y], 1)\n', (49727, 49738), True, 'import numpy as np\n'), ((49951, 49987), 'numpy.linalg.norm', 'np.linalg.norm', (['step_vectors'], {'axis': '(1)'}), '(step_vectors, axis=1)\n', (49965, 49987), True, 'import numpy as np\n'), ((50009, 50027), 'numpy.sum', 'np.sum', (['step_sizes'], {}), '(step_sizes)\n', (50015, 50027), True, 'import numpy as np\n'), ((50047, 50083), 'numpy.linalg.norm', 'np.linalg.norm', (['(track[-1] - track[0])'], {}), '(track[-1] - track[0])\n', (50061, 50083), True, 'import numpy as np\n'), ((50291, 50307), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (50299, 50307), True, 'import numpy as np\n'), ((50832, 50855), 'numpy.copy', 'np.copy', (["track['track']"], {}), "(track['track'])\n", (50839, 50855), True, 'import numpy as np\n'), ((51386, 51415), 'numpy.linalg.norm', 'np.linalg.norm', (['direction_sum'], {}), '(direction_sum)\n', (51400, 51415), True, 'import numpy as np\n'), ((3720, 3838), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(positions[frame_ind][:, 0] + shift)', '(positions[frame_ind][:, 1] + shift)'], {'c': '"""red"""', 'alpha': 'position_alpha'}), "(positions[frame_ind][:, 0] + shift, positions[frame_ind][:, 1] +\n shift, c='red', alpha=position_alpha)\n", (3731, 3838), True, 'import matplotlib.pyplot as plt\n'), ((6228, 6247), 'numpy.squeeze', 'np.squeeze', (['contour'], {}), '(contour)\n', (6238, 6247), True, 'import numpy as np\n'), ((7624, 7644), 'numpy.stack', 'np.stack', (['centers', '(0)'], {}), '(centers, 0)\n', (7632, 7644), True, 'import numpy as np\n'), ((7661, 7679), 'numpy.stack', 'np.stack', (['sizes', '(0)'], {}), '(sizes, 0)\n', (7669, 7679), True, 'import numpy as np\n'), ((7708, 7724), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {}), '((0, 2))\n', (7716, 7724), True, 'import numpy as np\n'), ((7758, 7773), 'numpy.array', 'np.array', (['areas'], {}), '(areas)\n', (7766, 7773), True, 'import numpy as np\n'), ((10010, 10032), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (10020, 10032), False, 'import cv2\n'), ((10063, 10101), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (10075, 10101), False, 'import cv2\n'), ((11196, 11212), 'cv2.imread', 'cv2.imread', (['file'], {}), '(file)\n', (11206, 11212), False, 'import cv2\n'), ((15206, 15267), 'numpy.load', 'np.load', (['contours_files[contours_file_ind]'], {'allow_pickle': '(True)'}), '(contours_files[contours_file_ind], allow_pickle=True)\n', (15213, 15267), True, 'import numpy as np\n'), ((16588, 16657), 'koger_tracking.calculate_active_list', 'ktf.calculate_active_list', (['raw_track_list', 'max_unseen_time', 'frame_ind'], {}), '(raw_track_list, max_unseen_time, frame_ind)\n', (16613, 16657), True, 'import koger_tracking as ktf\n'), ((21725, 21900), 'koger_tracking.update_tracks', 'ktf.update_tracks', (['raw_track_list', 'active_list', 'frame_ind', 'row_ind', 'col_ind', 'new_positions', 'new_position_indexes', 'new_sizes', 'contours', 'distance', 'min_new_track_distance'], {}), '(raw_track_list, active_list, frame_ind, row_ind, col_ind,\n new_positions, new_position_indexes, new_sizes, contours, distance,\n min_new_track_distance)\n', (21742, 21900), True, 'import koger_tracking as ktf\n'), ((22050, 22089), 'koger_tracking.remove_noisy_tracks', 'ktf.remove_noisy_tracks', (['raw_track_list'], {}), '(raw_track_list)\n', (22073, 22089), True, 'import koger_tracking as ktf\n'), ((24724, 24747), 'numpy.any', 'np.any', (['crossing_frames'], {}), '(crossing_frames)\n', (24730, 24747), True, 'import numpy as np\n'), ((24764, 24775), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (24772, 24775), True, 'import numpy as np\n'), ((24809, 24832), 'numpy.abs', 'np.abs', (['crossing_frames'], {}), '(crossing_frames)\n', (24815, 24832), True, 'import numpy as np\n'), ((27409, 27439), 'numpy.tan', 'np.tan', (['(horizontal_fov_rad / 2)'], {}), '(horizontal_fov_rad / 2)\n', (27415, 27439), True, 'import numpy as np\n'), ((29673, 29700), 'scipy.signal.find_peaks', 'signal.find_peaks', (['max_edge'], {}), '(max_edge)\n', (29690, 29700), False, 'from scipy import signal\n'), ((29748, 29775), 'numpy.nanmean', 'np.nanmean', (['max_edge[peaks]'], {}), '(max_edge[peaks])\n', (29758, 29775), True, 'import numpy as np\n'), ((29806, 29826), 'numpy.nanmean', 'np.nanmean', (['max_edge'], {}), '(max_edge)\n', (29816, 29826), True, 'import numpy as np\n'), ((33523, 33541), 'numpy.sqrt', 'np.sqrt', (['(diff ** 2)'], {}), '(diff ** 2)\n', (33530, 33541), True, 'import numpy as np\n'), ((34429, 34455), 'numpy.arctan2', 'np.arctan2', (['dif[1]', 'dif[0]'], {}), '(dif[1], dif[0])\n', (34439, 34455), True, 'import numpy as np\n'), ((38465, 38490), 'utm.from_latlon', 'utm.from_latlon', (['*latlong'], {}), '(*latlong)\n', (38480, 38490), False, 'import utm\n'), ((38515, 38549), 'numpy.array', 'np.array', (['[utm_val[0], utm_val[1]]'], {}), '([utm_val[0], utm_val[1]])\n', (38523, 38549), True, 'import numpy as np\n'), ((41473, 41517), 'numpy.sum', 'np.sum', (["(obs['multiplier'] * obs['direction'])"], {}), "(obs['multiplier'] * obs['direction'])\n", (41479, 41517), True, 'import numpy as np\n'), ((41550, 41574), 'numpy.sum', 'np.sum', (["obs['direction']"], {}), "(obs['direction'])\n", (41556, 41574), True, 'import numpy as np\n'), ((41996, 42015), 'numpy.mean', 'np.mean', (['obs_totals'], {}), '(obs_totals)\n', (42003, 42015), True, 'import numpy as np\n'), ((43257, 43300), 'scipy.signal.find_peaks', 'signal.find_peaks', (['p'], {'threshold': '(0)', 'height': '(1)'}), '(p, threshold=0, height=1)\n', (43274, 43300), False, 'from scipy import signal\n'), ((47474, 47486), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (47484, 47486), True, 'import matplotlib.pyplot as plt\n'), ((49557, 49577), 'numpy.ones', 'np.ones', (['kernel_size'], {}), '(kernel_size)\n', (49564, 49577), True, 'import numpy as np\n'), ((50961, 50981), 'numpy.linalg.norm', 'np.linalg.norm', (['diff'], {}), '(diff)\n', (50975, 50981), True, 'import numpy as np\n'), ((51318, 51350), 'numpy.array', 'np.array', (['direction_unit_vectors'], {}), '(direction_unit_vectors)\n', (51326, 51350), True, 'import numpy as np\n'), ((6426, 6450), 'cv2.minAreaRect', 'cv2.minAreaRect', (['contour'], {}), '(contour)\n', (6441, 6450), False, 'import cv2\n'), ((8724, 8743), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (8737, 8743), False, 'import cv2\n'), ((8764, 8776), 'numpy.int0', 'np.int0', (['box'], {}), '(box)\n', (8771, 8776), True, 'import numpy as np\n'), ((8789, 8839), 'cv2.drawContours', 'cv2.drawContours', (['image', '[box_d]', '(0)', 'rect_color', '(1)'], {}), '(image, [box_d], 0, rect_color, 1)\n', (8805, 8839), False, 'import cv2\n'), ((11235, 11273), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (11247, 11273), False, 'import cv2\n'), ((13605, 13741), 'koger_tracking.create_new_track', 'ktf.create_new_track', ([], {'first_frame': 'current_frame_ind', 'first_position': 'position', 'pos_index': 'ind', 'noise': 'noise', 'contour': 'contour', 'size': 'size'}), '(first_frame=current_frame_ind, first_position=position,\n pos_index=ind, noise=noise, contour=contour, size=size)\n', (13625, 13741), True, 'import koger_tracking as ktf\n'), ((15466, 15527), 'numpy.load', 'np.load', (['contours_files[contours_file_ind]'], {'allow_pickle': '(True)'}), '(contours_files[contours_file_ind], allow_pickle=True)\n', (15473, 15527), True, 'import numpy as np\n'), ((17833, 18024), 'koger_tracking.calculate_max_distance', 'ktf.calculate_max_distance', (['raw_track_list', 'active_list', 'max_distance_threshold', 'max_distance_threshold_noise', 'min_distance_threshold'], {'use_size': '(True)', 'min_distance_big': 'min_distance_big'}), '(raw_track_list, active_list,\n max_distance_threshold, max_distance_threshold_noise,\n min_distance_threshold, use_size=True, min_distance_big=min_distance_big)\n', (17859, 18024), True, 'import koger_tracking as ktf\n'), ((18104, 18171), 'koger_tracking.calculate_distances', 'ktf.calculate_distances', (['new_positions', 'raw_track_list', 'active_list'], {}), '(new_positions, raw_track_list, active_list)\n', (18127, 18171), True, 'import koger_tracking as ktf\n'), ((18242, 18310), 'koger_tracking.create_max_distance_array', 'ktf.create_max_distance_array', (['distance', 'raw_track_list', 'active_list'], {}), '(distance, raw_track_list, active_list)\n', (18271, 18310), True, 'import koger_tracking as ktf\n'), ((18714, 18838), 'koger_tracking.process_points_without_tracks', 'ktf.process_points_without_tracks', (['distance', 'max_distance', 'raw_track_list', 'new_positions', 'contours', 'frame_ind', 'new_sizes'], {}), '(distance, max_distance, raw_track_list,\n new_positions, contours, frame_ind, new_sizes)\n', (18747, 18838), True, 'import koger_tracking as ktf\n'), ((22197, 22235), 'numpy.array', 'np.array', (['raw_track_list'], {'dtype': 'object'}), '(raw_track_list, dtype=object)\n', (22205, 22235), True, 'import numpy as np\n'), ((26089, 26113), 'numpy.array', 'np.array', (["track['track']"], {}), "(track['track'])\n", (26097, 26113), True, 'import numpy as np\n'), ((29187, 29211), 'cv2.minAreaRect', 'cv2.minAreaRect', (['contour'], {}), '(contour)\n', (29202, 29211), False, 'import cv2\n'), ((29641, 29659), 'numpy.isnan', 'np.isnan', (['max_edge'], {}), '(max_edge)\n', (29649, 29659), True, 'import numpy as np\n'), ((37250, 37287), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(angle_dif / 3)'}), '(scale=angle_dif / 3)\n', (37266, 37287), True, 'import numpy as np\n'), ((37313, 37366), 'numpy.maximum', 'np.maximum', (["(-border_info['clock_angle'])", 'jitter_angle'], {}), "(-border_info['clock_angle'], jitter_angle)\n", (37323, 37366), True, 'import numpy as np\n'), ((37433, 37485), 'numpy.minimum', 'np.minimum', (["border_info['clock_angle']", 'jitter_angle'], {}), "(border_info['clock_angle'], jitter_angle)\n", (37443, 37485), True, 'import numpy as np\n'), ((41395, 41451), 'numpy.sum', 'np.sum', (["(obs['multiplier'] * obs['direction'] * (1 / acc))"], {}), "(obs['multiplier'] * obs['direction'] * (1 / acc))\n", (41401, 41451), True, 'import numpy as np\n'), ((42460, 42475), 'numpy.isnan', 'np.isnan', (['freqs'], {}), '(freqs)\n', (42468, 42475), True, 'import numpy as np\n'), ((42547, 42564), 'numpy.argmax', 'np.argmax', (['powers'], {}), '(powers)\n', (42556, 42564), True, 'import numpy as np\n'), ((42574, 42591), 'numpy.argmax', 'np.argmax', (['powers'], {}), '(powers)\n', (42583, 42591), True, 'import numpy as np\n'), ((44685, 44713), 'numpy.nanmax', 'np.nanmax', (["track['rects']", '(1)'], {}), "(track['rects'], 1)\n", (44694, 44713), True, 'import numpy as np\n'), ((47125, 47148), 'numpy.sort', 'np.sort', (['errors'], {'axis': '(0)'}), '(errors, axis=0)\n', (47132, 47148), True, 'import numpy as np\n'), ((47171, 47194), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (47182, 47194), True, 'import numpy as np\n'), ((47284, 47298), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (47296, 47298), True, 'import matplotlib.pyplot as plt\n'), ((47550, 47573), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (47561, 47573), True, 'import numpy as np\n'), ((50329, 50345), 'numpy.isnan', 'np.isnan', (['values'], {}), '(values)\n', (50337, 50345), True, 'import numpy as np\n'), ((1560, 1584), 'cv2.minAreaRect', 'cv2.minAreaRect', (['contour'], {}), '(contour)\n', (1575, 1584), False, 'import cv2\n'), ((1607, 1626), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (1620, 1626), False, 'import cv2\n'), ((1651, 1663), 'numpy.int0', 'np.int0', (['box'], {}), '(box)\n', (1658, 1663), True, 'import numpy as np\n'), ((1680, 1733), 'cv2.drawContours', 'cv2.drawContours', (['image', '[box_d]', '(0)', '(0, 255, 100)', '(1)'], {}), '(image, [box_d], 0, (0, 255, 100), 1)\n', (1696, 1733), False, 'import cv2\n'), ((3313, 3408), 'matplotlib.pyplot.plot', 'plt.plot', (["(track['track'][:, 0] + shift)", "(track['track'][:, 1] + shift)"], {'linewidth': 'track_width'}), "(track['track'][:, 0] + shift, track['track'][:, 1] + shift,\n linewidth=track_width)\n", (3321, 3408), True, 'import matplotlib.pyplot as plt\n'), ((3491, 3605), 'matplotlib.pyplot.plot', 'plt.plot', (["(track['track'][:rel_frame, 0] + shift)", "(track['track'][:rel_frame, 1] + shift)"], {'linewidth': 'track_width'}), "(track['track'][:rel_frame, 0] + shift, track['track'][:rel_frame, \n 1] + shift, linewidth=track_width)\n", (3499, 3605), True, 'import matplotlib.pyplot as plt\n'), ((8638, 8650), 'numpy.max', 'np.max', (['size'], {}), '(size)\n', (8644, 8650), True, 'import numpy as np\n'), ((17684, 17700), 'numpy.copy', 'np.copy', (['contour'], {}), '(contour)\n', (17691, 17700), True, 'import numpy as np\n'), ((20805, 20913), 'koger_tracking.filter_tracks_without_new_points', 'ktf.filter_tracks_without_new_points', (['raw_track_list', 'distance', 'row_ind', 'col_ind', 'active_list', 'frame_ind'], {}), '(raw_track_list, distance, row_ind,\n col_ind, active_list, frame_ind)\n', (20841, 20913), True, 'import koger_tracking as ktf\n'), ((21103, 21218), 'koger_tracking.fix_tracks_with_small_points', 'ktf.fix_tracks_with_small_points', (['raw_track_list', 'distance', 'row_ind', 'col_ind', 'active_list', 'new_sizes', 'frame_ind'], {}), '(raw_track_list, distance, row_ind, col_ind,\n active_list, new_sizes, frame_ind)\n', (21135, 21218), True, 'import koger_tracking as ktf\n'), ((21496, 21593), 'koger_tracking.filter_bad_assigns', 'ktf.filter_bad_assigns', (['raw_track_list', 'active_list', 'distance', 'max_distance', 'row_ind', 'col_ind'], {}), '(raw_track_list, active_list, distance, max_distance,\n row_ind, col_ind)\n', (21518, 21593), True, 'import koger_tracking as ktf\n'), ((28086, 28116), 'numpy.tan', 'np.tan', (['(horizontal_fov_rad / 2)'], {}), '(horizontal_fov_rad / 2)\n', (28092, 28116), True, 'import numpy as np\n'), ((32057, 32085), 'numpy.nanmin', 'np.nanmin', (["track['rects']", '(1)'], {}), "(track['rects'], 1)\n", (32066, 32085), True, 'import numpy as np\n'), ((46923, 46991), 'sklearn.neighbors.KernelDensity', 'KernelDensity', ([], {'kernel': '"""gaussian"""', 'bandwidth': '(error_std * kde_bw_scale)'}), "(kernel='gaussian', bandwidth=error_std * kde_bw_scale)\n", (46936, 46991), False, 'from sklearn.neighbors import KernelDensity\n'), ((47418, 47434), 'numpy.exp', 'np.exp', (['log_dens'], {}), '(log_dens)\n', (47424, 47434), True, 'import numpy as np\n'), ((47669, 47685), 'numpy.exp', 'np.exp', (['log_dens'], {}), '(log_dens)\n', (47675, 47685), True, 'import numpy as np\n'), ((16077, 16138), 'numpy.load', 'np.load', (['contours_files[contours_file_ind]'], {'allow_pickle': '(True)'}), '(contours_files[contours_file_ind], allow_pickle=True)\n', (16084, 16138), True, 'import numpy as np\n'), ((19159, 19179), 'numpy.log', 'np.log', (['(distance + 1)'], {}), '(distance + 1)\n', (19165, 19179), True, 'import numpy as np\n'), ((22292, 22320), 'os.path.dirname', 'os.path.dirname', (['tracks_file'], {}), '(tracks_file)\n', (22307, 22320), False, 'import os\n'), ((32167, 32194), 'scipy.signal.find_peaks', 'signal.find_peaks', (['max_edge'], {}), '(max_edge)\n', (32184, 32194), False, 'from scipy import signal\n'), ((32266, 32293), 'numpy.nanmean', 'np.nanmean', (['min_edge[peaks]'], {}), '(min_edge[peaks])\n', (32276, 32293), True, 'import numpy as np\n'), ((32350, 32370), 'numpy.nanmean', 'np.nanmean', (['max_edge'], {}), '(max_edge)\n', (32360, 32370), True, 'import numpy as np\n'), ((16862, 16900), 'numpy.array', 'np.array', (['raw_track_list'], {'dtype': 'object'}), '(raw_track_list, dtype=object)\n', (16870, 16900), True, 'import numpy as np\n'), ((32123, 32141), 'numpy.isnan', 'np.isnan', (['min_edge'], {}), '(min_edge)\n', (32131, 32141), True, 'import numpy as np\n'), ((16441, 16479), 'numpy.array', 'np.array', (['raw_track_list'], {'dtype': 'object'}), '(raw_track_list, dtype=object)\n', (16449, 16479), True, 'import numpy as np\n'), ((16280, 16309), 'os.path.splitext', 'os.path.splitext', (['tracks_file'], {}), '(tracks_file)\n', (16296, 16309), False, 'import os\n')] |
import unittest
import numpy as np
# TODO check correctness for toy data
# TODO check ignore data
class TestMalis(unittest.TestCase):
def test_malis_2d(self):
from affogato.affinities import compute_affinities
from affogato.learning import compute_malis_2d
shape = (100, 100)
labels = np.random.randint(0, 100, size=shape)
offsets = [[-1, 0], [0, -1]]
affs, _ = compute_affinities(labels, offsets)
affs += 0.1 * np.random.randn(*affs.shape)
loss, grads = compute_malis_2d(affs, labels, offsets)
self.assertEqual(grads.shape, affs.shape)
self.assertNotEqual(loss, 0)
self.assertFalse(np.allclose(grads, 0))
def test_malis_3d(self):
from affogato.affinities import compute_affinities
from affogato.learning import compute_malis_3d
shape = (32, 64, 64)
labels = np.random.randint(0, 1000, size=shape)
offsets = [[-1, 0, 0], [0, -1, 0], [0, 0, -1]]
affs, _ = compute_affinities(labels, offsets)
affs += 0.1 * np.random.randn(*affs.shape)
loss, grads = compute_malis_3d(affs, labels, offsets)
self.assertEqual(grads.shape, affs.shape)
self.assertNotEqual(loss, 0)
self.assertFalse(np.allclose(grads, 0))
def seg2edges(self, segmentation):
from scipy.ndimage import convolve
gx = convolve(segmentation + 1, np.array([-1., 1.]).reshape(1, 2))
gy = convolve(segmentation + 1, np.array([-1., 1.]).reshape(2, 1))
return ((gx ** 2 + gy ** 2) > 0)
def test_malis_2d_gradient_descent(self):
from affogato.learning import compute_malis_2d
from affogato.segmentation import connected_components
shape = (100, 100)
labels = np.zeros(shape)
for i in range(10):
for j in range(10):
labels[10 * i:10 * (i + 1), 10 * j:10 * (j + 1)] = 10 * i + j + 1
affs = 0.5 * np.ones((2, 100, 100))
offsets = [[-1, 0], [0, -1]]
for epoch in range(40):
loss, grads = compute_malis_2d(affs, labels, offsets)
affs -= 10000 * grads
affs = np.clip(affs, 0, 1)
labels1, _ = connected_components(affs, 0.5)
self.assertEqual(grads.shape, affs.shape)
self.assertTrue(np.allclose(grads, 0))
self.assertEqual(loss, 0)
edges1 = self.seg2edges(labels1)
edges2 = self.seg2edges(labels)
self.assertTrue(np.allclose(edges1, edges2))
if __name__ == '__main__':
unittest.main()
| [
"numpy.clip",
"numpy.allclose",
"numpy.ones",
"affogato.segmentation.connected_components",
"affogato.learning.compute_malis_3d",
"affogato.learning.compute_malis_2d",
"numpy.array",
"numpy.random.randint",
"affogato.affinities.compute_affinities",
"numpy.zeros",
"unittest.main",
"numpy.random... | [((2537, 2552), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2550, 2552), False, 'import unittest\n'), ((324, 361), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {'size': 'shape'}), '(0, 100, size=shape)\n', (341, 361), True, 'import numpy as np\n'), ((417, 452), 'affogato.affinities.compute_affinities', 'compute_affinities', (['labels', 'offsets'], {}), '(labels, offsets)\n', (435, 452), False, 'from affogato.affinities import compute_affinities\n'), ((526, 565), 'affogato.learning.compute_malis_2d', 'compute_malis_2d', (['affs', 'labels', 'offsets'], {}), '(affs, labels, offsets)\n', (542, 565), False, 'from affogato.learning import compute_malis_2d\n'), ((891, 929), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {'size': 'shape'}), '(0, 1000, size=shape)\n', (908, 929), True, 'import numpy as np\n'), ((1003, 1038), 'affogato.affinities.compute_affinities', 'compute_affinities', (['labels', 'offsets'], {}), '(labels, offsets)\n', (1021, 1038), False, 'from affogato.affinities import compute_affinities\n'), ((1112, 1151), 'affogato.learning.compute_malis_3d', 'compute_malis_3d', (['affs', 'labels', 'offsets'], {}), '(affs, labels, offsets)\n', (1128, 1151), False, 'from affogato.learning import compute_malis_3d\n'), ((1770, 1785), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (1778, 1785), True, 'import numpy as np\n'), ((2205, 2236), 'affogato.segmentation.connected_components', 'connected_components', (['affs', '(0.5)'], {}), '(affs, 0.5)\n', (2225, 2236), False, 'from affogato.segmentation import connected_components\n'), ((475, 503), 'numpy.random.randn', 'np.random.randn', (['*affs.shape'], {}), '(*affs.shape)\n', (490, 503), True, 'import numpy as np\n'), ((678, 699), 'numpy.allclose', 'np.allclose', (['grads', '(0)'], {}), '(grads, 0)\n', (689, 699), True, 'import numpy as np\n'), ((1061, 1089), 'numpy.random.randn', 'np.random.randn', (['*affs.shape'], {}), '(*affs.shape)\n', (1076, 1089), True, 'import numpy as np\n'), ((1264, 1285), 'numpy.allclose', 'np.allclose', (['grads', '(0)'], {}), '(grads, 0)\n', (1275, 1285), True, 'import numpy as np\n'), ((1950, 1972), 'numpy.ones', 'np.ones', (['(2, 100, 100)'], {}), '((2, 100, 100))\n', (1957, 1972), True, 'import numpy as np\n'), ((2070, 2109), 'affogato.learning.compute_malis_2d', 'compute_malis_2d', (['affs', 'labels', 'offsets'], {}), '(affs, labels, offsets)\n', (2086, 2109), False, 'from affogato.learning import compute_malis_2d\n'), ((2163, 2182), 'numpy.clip', 'np.clip', (['affs', '(0)', '(1)'], {}), '(affs, 0, 1)\n', (2170, 2182), True, 'import numpy as np\n'), ((2312, 2333), 'numpy.allclose', 'np.allclose', (['grads', '(0)'], {}), '(grads, 0)\n', (2323, 2333), True, 'import numpy as np\n'), ((2475, 2502), 'numpy.allclose', 'np.allclose', (['edges1', 'edges2'], {}), '(edges1, edges2)\n', (2486, 2502), True, 'import numpy as np\n'), ((1410, 1431), 'numpy.array', 'np.array', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (1418, 1431), True, 'import numpy as np\n'), ((1485, 1506), 'numpy.array', 'np.array', (['[-1.0, 1.0]'], {}), '([-1.0, 1.0])\n', (1493, 1506), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import csv
import random
import os
import codecs
import pickle
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# In[32]:
class TravelingSalesmanProblem:
def __init__(self, name, coordenadas):
# initialize instance variables:
self.name = name
self.locations = []
self.distances = []
self.tspSize = 0
self.coordenadas = coordenadas
# initialize the data:
self.__initData()
def __len__(self):
return self.tspSize # retorna cuantas ciudades hay en el problema
def __initData(self):
# crear data desde cero
if not self.locations or not self.distances:
self.__createData()
self.tspSize = len(self.locations)
def __createData(self):
self.locations = []
# self.locations = [[i,j] for i in range (self.tspSize) for j in range (self.tspSize)]
# dataframe
file = pd.read_csv("Input/"+self.name+".csv", sep=",", header=None)
file = file.iloc[0:, 0:]
# coordenadas
coordenadas = pd.read_csv("Input/"+self.coordenadas+".csv")
coordenadas = coordenadas.iloc[0:601, 4:6]
coordenadas = coordenadas.iloc[1:]
for fila in range(len(coordenadas.index)):
self.locations.append(np.asarray(coordenadas.iloc[fila], dtype=np.float32))
# longitud del problema
self.tspSize = len(self.locations)
# imprimir la data del problem
# print("tamaño={}".format(self.tspSize), "locations={}".format(self.locations))
# crear la matriz con 0 primero
self.distances = [[0] * self.tspSize for _ in range(self.tspSize)]
# que la nueva matriz tenga las ya calculadas distancias (solo va a ser el triangulo inferior)
for i in range(self.tspSize):
for j in range(i+1, self.tspSize):
# poner la distancia del dataframe a la nueva matriz
distance = file[i][j]
self.distances[i][j] = distance
self.distances[j][i] = distance
# print("{}, {}: location1 = {}, location2 = {} => distance = {}".format(
# i, j, self.locations[i], self.locations[j], distance))
# print(self.distances)
# # serialize locations and distances:
# pickle.dump(self.locations, open(os.path.join("tsp-data", self.name + "-loc.pickle"), "wb"))
# pickle.dump(self.distances, open(os.path.join("tsp-data", self.name + "-dist.pickle"), "wb"))
def getTotalDistance(self, ciudad):
"""ciudad: lista de las ciudades que hay en la ruta :)"""
# distancia entre la ultima y primera ciudad
distance = self.distances[ciudad[-1]][ciudad[0]]
# add the distance between each pair of consequtive cities:
for i in range(len(ciudad) - 1):
distance += self.distances[ciudad[i]][ciudad[i + 1]]
return distance
def plotData(self, indices):
"""plots the path described by the given indices of the cities
:param indices: A list of ordered city indices describing the given path.
:return: the resulting plot
"""
# plot the dots representing the cities:
plt.scatter(*zip(*self.locations), marker='.', color='red')
# create a list of the corresponding city locations:
locs = [self.locations[i] for i in indices]
locs.append(locs[0])
# plot a line between each pair of consequtive cities:
plt.plot(*zip(*locs), linestyle='-', color='blue')
return plt
def main(verbose=False):
tsp = TravelingSalesmanProblem("cost_matrix", "demanda_bodegas")
solucionoptima = []
if verbose:
print("Soluciòn òptima=", solucionoptima)
print("Soluciòn òptima=", tsp.getTotalDistance(solucionoptima))
plotear = tsp.plotData(solucionoptima)
plotear.show()
| [
"numpy.asarray",
"pandas.read_csv"
] | [((1002, 1066), 'pandas.read_csv', 'pd.read_csv', (["('Input/' + self.name + '.csv')"], {'sep': '""","""', 'header': 'None'}), "('Input/' + self.name + '.csv', sep=',', header=None)\n", (1013, 1066), True, 'import pandas as pd\n'), ((1141, 1190), 'pandas.read_csv', 'pd.read_csv', (["('Input/' + self.coordenadas + '.csv')"], {}), "('Input/' + self.coordenadas + '.csv')\n", (1152, 1190), True, 'import pandas as pd\n'), ((1368, 1420), 'numpy.asarray', 'np.asarray', (['coordenadas.iloc[fila]'], {'dtype': 'np.float32'}), '(coordenadas.iloc[fila], dtype=np.float32)\n', (1378, 1420), True, 'import numpy as np\n')] |
"""
In this file, we will define the compressor of new videos.
Current plan:
1. we will try using force_key_frames to do new encoding.
2. We assume the other module gives a meta-data format that we use to encode / decode
"""
from loaders.seattle_loader import SeattleLoader
from eva_storage.jvc.ffmpeg_commands import FfmpegCommands
from eva_storage.jvc.preprocessor import Preprocessor
import os
import numpy as np
class Encoder:
def __init__(self):
pass
def indices2timestamps(self, rep_indices, frame_info):
"""
Converts rep indices to timestamps -- we will use the frame_info to do wso
:param rep_indices:
:param timestamps:
:return:
"""
## now we can convert from video indices to timstamps
### so now we need a helper function to convert the indices to timestamps
timestamps_list = []
for i, val in enumerate(rep_indices):
timestamps_list.append(frame_info['frames'][val]['pkt_pts_time'])
return timestamps_list
def run(self, images, rep_indices, load_directory, video_save_directory, iframe_save_directory):
"""
:param images: images we are trying to form into the compressed format
:param rep_indices: the frames that we be hardcoded into an i frame
:param save_directory:
:return:
"""
## once we have the images, rep_indices, and where to save the video, we can move onto creating the command to generate the new video
### move as pipes,
## we need to create the timestamps list -- conversion from rep_indices is necessary
## we need to convert from rep_indices to timestamps list
print(f"saving newly encoded video to: {video_save_directory}")
print(f"saving i frame information video to: {iframe_save_directory}")
frame_info = FfmpegCommands.get_frameinfo(load_directory)
timestamps_list = self.indices2timestamps(rep_indices, frame_info) ## I need access to the preprocessor
FfmpegCommands.force_keyframes(images, timestamps_list, video_save_directory, framerate=60)
self.save_iframe_indices(video_save_directory, iframe_save_directory)
return ##DONE!!
def save_iframe_indices(self, video_directory, save_directory):
iframe_indices = FfmpegCommands.get_iframe_indices(video_directory)
if type(iframe_indices) == list:
iframe_indices = np.array(iframe_indices)
dirname = os.path.dirname(save_directory)
os.makedirs(dirname, exist_ok=True)
np.save(save_directory, iframe_indices)
if __name__ == "__main__":
"""
Encoding pipeline from a regular seattle video
"""
loader = SeattleLoader()
video_directory = '/nethome/jbang36/eva_jaeho/data/seattle/seattle2_15000.mp4'
images, meta_data = loader.load_images(video_directory)
## preprocessing the video
preprocessor = Preprocessor()
video_filename = os.path.basename(video_directory)
video_filename = video_filename.split('.')[0]
rep_indices = preprocessor.run(images, video_filename)
new_video_directory = os.path.join( os.path.dirname(video_directory), 'seattle2_15000_jvc.mp4' )
encoder = Encoder()
encoder.run(images, rep_indices, video_directory, new_video_directory)
| [
"eva_storage.jvc.ffmpeg_commands.FfmpegCommands.get_iframe_indices",
"os.makedirs",
"eva_storage.jvc.preprocessor.Preprocessor",
"eva_storage.jvc.ffmpeg_commands.FfmpegCommands.force_keyframes",
"loaders.seattle_loader.SeattleLoader",
"os.path.dirname",
"numpy.array",
"os.path.basename",
"eva_storag... | [((2737, 2752), 'loaders.seattle_loader.SeattleLoader', 'SeattleLoader', ([], {}), '()\n', (2750, 2752), False, 'from loaders.seattle_loader import SeattleLoader\n'), ((2948, 2962), 'eva_storage.jvc.preprocessor.Preprocessor', 'Preprocessor', ([], {}), '()\n', (2960, 2962), False, 'from eva_storage.jvc.preprocessor import Preprocessor\n'), ((2984, 3017), 'os.path.basename', 'os.path.basename', (['video_directory'], {}), '(video_directory)\n', (3000, 3017), False, 'import os\n'), ((1876, 1920), 'eva_storage.jvc.ffmpeg_commands.FfmpegCommands.get_frameinfo', 'FfmpegCommands.get_frameinfo', (['load_directory'], {}), '(load_directory)\n', (1904, 1920), False, 'from eva_storage.jvc.ffmpeg_commands import FfmpegCommands\n'), ((2041, 2136), 'eva_storage.jvc.ffmpeg_commands.FfmpegCommands.force_keyframes', 'FfmpegCommands.force_keyframes', (['images', 'timestamps_list', 'video_save_directory'], {'framerate': '(60)'}), '(images, timestamps_list,\n video_save_directory, framerate=60)\n', (2071, 2136), False, 'from eva_storage.jvc.ffmpeg_commands import FfmpegCommands\n'), ((2333, 2383), 'eva_storage.jvc.ffmpeg_commands.FfmpegCommands.get_iframe_indices', 'FfmpegCommands.get_iframe_indices', (['video_directory'], {}), '(video_directory)\n', (2366, 2383), False, 'from eva_storage.jvc.ffmpeg_commands import FfmpegCommands\n'), ((2498, 2529), 'os.path.dirname', 'os.path.dirname', (['save_directory'], {}), '(save_directory)\n', (2513, 2529), False, 'import os\n'), ((2538, 2573), 'os.makedirs', 'os.makedirs', (['dirname'], {'exist_ok': '(True)'}), '(dirname, exist_ok=True)\n', (2549, 2573), False, 'import os\n'), ((2582, 2621), 'numpy.save', 'np.save', (['save_directory', 'iframe_indices'], {}), '(save_directory, iframe_indices)\n', (2589, 2621), True, 'import numpy as np\n'), ((3169, 3201), 'os.path.dirname', 'os.path.dirname', (['video_directory'], {}), '(video_directory)\n', (3184, 3201), False, 'import os\n'), ((2454, 2478), 'numpy.array', 'np.array', (['iframe_indices'], {}), '(iframe_indices)\n', (2462, 2478), True, 'import numpy as np\n')] |
from http.cookies import CookieError
import numpy as np
import pandas as pd
from panel import state
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from scipy.sparse import csr_matrix
from scipy.spatial.distance import squareform
from group_lasso import LogisticGroupLasso
from tqdm import tqdm
import warnings
from .utils.plot import plot_regularization_path, plot_classifier_complexity_vs_accuracy
from ._configs import *
__all__ = ["Classifier","prepare_training_dataset"]
def prepare_training_dataset(descriptors, states_labels, n_configs, regex_filter = None, states_subset=None):
"""Sample points from trajectory
Args:
n_configs (int): number of points to sample for each metastable state
regex_filter (str, optional): regex to filter the features. Defaults to '.*'.
states_subset (list, optional): list of integers corresponding to the metastable states to sample. Defaults to None take all states.
states_names (list, optional): list of strings corresponding to the name of the states. Defaults to None.
Returns:
(configurations, labels), features_names, states_names
"""
assert len(descriptors) == len(states_labels), "Length mismatch between descriptors and states_labels."
if regex_filter is not None:
features = descriptors.filter(regex=regex_filter).columns.values
else:
features = descriptors.columns.values
config_list = []
labels = []
if isinstance(states_labels, pd.DataFrame):
pass
elif isinstance(states_labels, np.ndarray):
states_labels = np.squeeze(states_labels)
columns = ['labels']
if states_labels.ndim == 2:
columns.append('selection')
states_labels = pd.DataFrame(data=states_labels, columns=columns)
else:
raise TypeError(
f"{states_labels}: Accepted types are 'pandas.Dataframe' or 'numpy.ndarray' "
)
if not ('selection' in states_labels):
states_labels['selection'] = np.ones(len(states_labels), dtype=bool)
# convert labels in string
states_labels['labels'] = states_labels['labels'].astype(str)
if states_subset is None:
states_subset = states_labels['labels'].unique()
states_subset = states_subset[states_subset != 'undefined' ]
for label in states_subset:
#select label
df = descriptors.loc[ (states_labels['labels'] == label) & (states_labels['selection'] == True)]
#select descriptors and sample
replace = False
if n_configs > len(df):
warnings.warn("The asked number of samples is higher than the possible unique values. Sampling with replacement")
replace = True
if regex_filter is not None:
config_i = df.filter(regex=regex_filter).sample(n=n_configs, replace=replace).values
else:
config_i = df.sample(n=n_configs, replace=replace).values
config_list.append(config_i)
labels.extend([label]*n_configs)
labels = np.array(labels)
configurations = np.vstack(config_list)
return (configurations, labels), features
class Classifier():
def __init__(self, dataset, features_names, rescale=True, test_size=0.25):
self._X, self._labels = dataset
_tmp_classes, self._labels = np.unique(self._labels, return_inverse=True)
self.classes = dict()
for _i, _class in enumerate(_tmp_classes):
self.classes[_i] = f"{_class}"
self._rescale = rescale
self._test_size = test_size
if self._rescale:
scaler = StandardScaler(with_mean=True)
scaler.fit(self._X)
self._X = scaler.transform(self._X)
self._train_in, self._val_in, self._train_out, self._val_out = train_test_split(self._X, self._labels, test_size=self._test_size)
self._n_samples = self._train_in.shape[0]
self.features = features_names
self._computed = False
def compute(self, reg, max_iter = 100, quadratic_kernel=False, groups=None, warm_start = True):
if self._computed:
warnings.warn("Warning: deleting old computed data")
self._purge()
if hasattr(reg, '__iter__') == False:
reg = np.array([reg])
_num_reg = len(reg)
_n_basins = len(np.unique(self._train_out))
self._reg = reg
if quadratic_kernel:
train_in, val_in = quadratic_kernel_featuremap(self._train_in), quadratic_kernel_featuremap(self._val_in)
else:
train_in = self._train_in
val_in = self._val_in
_n_features = train_in.shape[1]
if groups is not None:
groups_names, groups = np.unique(groups, return_inverse=True)
if quadratic_kernel:
assert len(groups) == train_in.shape[1], "Length of group array does not match quadratic features number."
else:
assert len(groups) == len(self.features), "Length of group array does not match features number."
_is_group = True
_reg_name = 'estimator__group_reg'
tmp_model = LogisticGroupLasso(groups, group_reg = reg[0], l1_reg=0, n_iter=max_iter, supress_warning=True, scale_reg='none', warm_start=False)
model = OneVsRestClassifier(tmp_model, n_jobs=2)
else:
_is_group = False
_reg_name = 'C'
reg = (reg*self._n_samples)**-1
model = LogisticRegression(penalty='l1', C=reg[0], solver='liblinear', multi_class='ovr', fit_intercept=False, max_iter=max_iter, warm_start=warm_start)
coeffs = np.empty((_num_reg, _n_basins, _n_features))
crossval = np.empty((_num_reg,))
_classes_labels = np.empty((_num_reg, _n_basins), dtype=np.int_)
for reg_idx in tqdm(range(len(reg)), desc='Optimizing Lasso Estimator'):
model.set_params(**{_reg_name: reg[reg_idx]})
model.fit(train_in,self._train_out)
crossval[reg_idx] = model.score(val_in,self._val_out)
_classes_labels[reg_idx] = model.classes_.astype(int)
if _is_group:
assert _n_basins == model.classes_.shape[0]
tmp_coeffs = np.empty((_n_basins, _n_features))
for est_idx, _e in enumerate(model.estimators_):
tmp_coeffs[est_idx] = _e.coef_[:,0]
coeffs[reg_idx] = tmp_coeffs
else:
coeffs[reg_idx] = model.coef_
self._quadratic_kernel=quadratic_kernel
self._coeffs = coeffs
self._crossval = crossval
self._classes_labels = _classes_labels
self._groups = groups
if _is_group:
self._groups_names = groups_names
self._groups_mask = [
self._groups == u
for u in np.unique(self._groups)
]
self._computed = True
# Count number of features for each regularization value.
num_feat = np.empty((_num_reg,),dtype=int)
for i,reg in enumerate(self._reg):
selected = self._get_selected(reg, feature_mode=False)
unique_idxs = set()
for state in selected.values():
for data in state:
unique_idxs.add(data[0])
num_feat[i] = len(unique_idxs)
self._num_feat = num_feat
def _purge(self):
if __DEV__:
print("DEV >>> Purging old data")
if self._computed:
del self._quadratic_kernel
del self._reg
del self._coeffs
del self._crossval
del self._classes_labels
del self._num_feat
if self._groups is not None:
del self._groups_names
del self._groups_mask
del self._groups
self._computed = False
def _closest_reg_idx(self, reg):
assert self._computed, "You have to run Classifier.compute first."
return np.argmin(np.abs(self._reg - reg))
def _get_selected(self, reg, feature_mode=False):
reg_idx = self._closest_reg_idx(reg)
coefficients = self._coeffs[reg_idx]
_classes = self._classes_labels[reg_idx]
selected = dict()
group_mode = (not feature_mode) and (self._groups is not None)
for idx, coef in enumerate(coefficients):
state_name = self.classes[_classes[idx]]
if group_mode:
coef = np.array([np.linalg.norm(coef[b])**2 for b in self._groups_mask])
else:
coef = coef**2
nrm = np.sum(coef)
if nrm < __EPS__:
selected[state_name] = []
else:
coef = csr_matrix(coef/nrm)
sort_perm = np.argsort(coef.data)[::-1]
names = []
for idx in coef.indices:
if group_mode:
names.append(self._groups_names[idx])
else:
if self._quadratic_kernel:
names.append(decode_quadratic_features(idx, self.features))
else:
names.append(self.features[idx])
#idx, weight, name
names = np.array(names)
selected[state_name]= list(zip(coef.indices[sort_perm], coef.data[sort_perm], names[sort_perm]))
#If only two states the learned models are the same.
if len(selected.keys()) == 2:
del_key = list(selected.keys())[0]
selected.pop(del_key)
return selected
def get_accuracy(self):
return self._crossval
def get_num_features(self):
return self._num_feat
def feature_summary(self, reg):
return self._get_selected(reg, feature_mode=True)
def print_selected(self, reg):
selected = self._get_selected(reg)
print(f"Accuracy: {int(self._crossval[self._closest_reg_idx(reg)]*100)}%")
for state in selected.keys():
state_name = 'State ' + f'{state}' + ':'
print(state_name)
for row in selected[state]:
print(" " + row[2])
def prune(self, reg, overwrite=False):
selected = self._get_selected(reg)
if self._quadratic_kernel:
AttributeError("Pruning is not possible on classifiers trained with quadratic kernels.")
unique_idxs = set()
for state in selected.values():
for data in state:
unique_idxs.add(data[0])
if self._groups is not None:
mask = np.logical_or.reduce([self._groups_mask[idx] for idx in unique_idxs])
else:
mask = np.array([False]*len(self.features))
for idx in unique_idxs:
mask[idx] = True
if overwrite:
self._train_in = self._train_in[:, mask]
self._val_in = self._val_in[:, mask]
self._X = self._X[:, mask]
self.features = self.features[mask]
self._purge()
else:
X = self._X[:, mask]
dset = (X, self._labels)
pruned_features = self.features[mask]
return Classifier(dset, pruned_features, self._rescale, self._test_size)
def plot_regularization_path(self, reg):
return plot_regularization_path(self, reg)
def plot(self):
return plot_classifier_complexity_vs_accuracy(self)
def save(self, filename):
raise NotImplementedError("Saving is not implemented yet.")
def quadratic_kernel_featuremap(X):
n_pts, n_feats = X.shape
n_feats +=1
transformed_X = np.empty((n_pts, n_feats + n_feats*(n_feats - 1)//2), dtype=np.float_)
X = np.c_[X, np.ones(n_pts)]
def _compute_repr(x):
mat = np.outer(x,x)
diag = np.diag(mat)
mat = (mat - np.diag(diag))*np.sqrt(2)
off_diag = squareform(mat)
return np.r_[diag, off_diag]
for idx, x in enumerate(X):
transformed_X[idx] = _compute_repr(x)
return transformed_X
def decode_quadratic_features(idx, features_names):
num_feats = features_names.shape[0]
s = ''
if idx < num_feats:
s = f"{features_names[idx]} || {features_names[idx]}"
elif idx > num_feats:
rows, cols = np.triu_indices(num_feats + 1, k = 1)
offset_idx = idx - num_feats - 1
i, j = rows[offset_idx], cols[offset_idx]
if i == num_feats:
s = f"{features_names[j]}"
elif j == num_feats:
s = f"{features_names[i]}"
else:
s = f"{features_names[i]} || {features_names[j]}"
return s | [
"numpy.sqrt",
"numpy.argsort",
"numpy.array",
"numpy.linalg.norm",
"numpy.empty",
"numpy.vstack",
"sklearn.multiclass.OneVsRestClassifier",
"pandas.DataFrame",
"warnings.warn",
"scipy.sparse.csr_matrix",
"numpy.abs",
"scipy.spatial.distance.squareform",
"numpy.ones",
"numpy.triu_indices",
... | [((3179, 3195), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3187, 3195), True, 'import numpy as np\n'), ((3217, 3239), 'numpy.vstack', 'np.vstack', (['config_list'], {}), '(config_list)\n', (3226, 3239), True, 'import numpy as np\n'), ((11864, 11938), 'numpy.empty', 'np.empty', (['(n_pts, n_feats + n_feats * (n_feats - 1) // 2)'], {'dtype': 'np.float_'}), '((n_pts, n_feats + n_feats * (n_feats - 1) // 2), dtype=np.float_)\n', (11872, 11938), True, 'import numpy as np\n'), ((3463, 3507), 'numpy.unique', 'np.unique', (['self._labels'], {'return_inverse': '(True)'}), '(self._labels, return_inverse=True)\n', (3472, 3507), True, 'import numpy as np\n'), ((3931, 3997), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self._X', 'self._labels'], {'test_size': 'self._test_size'}), '(self._X, self._labels, test_size=self._test_size)\n', (3947, 3997), False, 'from sklearn.model_selection import train_test_split\n'), ((5806, 5850), 'numpy.empty', 'np.empty', (['(_num_reg, _n_basins, _n_features)'], {}), '((_num_reg, _n_basins, _n_features))\n', (5814, 5850), True, 'import numpy as np\n'), ((5870, 5891), 'numpy.empty', 'np.empty', (['(_num_reg,)'], {}), '((_num_reg,))\n', (5878, 5891), True, 'import numpy as np\n'), ((5918, 5964), 'numpy.empty', 'np.empty', (['(_num_reg, _n_basins)'], {'dtype': 'np.int_'}), '((_num_reg, _n_basins), dtype=np.int_)\n', (5926, 5964), True, 'import numpy as np\n'), ((7172, 7204), 'numpy.empty', 'np.empty', (['(_num_reg,)'], {'dtype': 'int'}), '((_num_reg,), dtype=int)\n', (7180, 7204), True, 'import numpy as np\n'), ((12013, 12027), 'numpy.outer', 'np.outer', (['x', 'x'], {}), '(x, x)\n', (12021, 12027), True, 'import numpy as np\n'), ((12042, 12054), 'numpy.diag', 'np.diag', (['mat'], {}), '(mat)\n', (12049, 12054), True, 'import numpy as np\n'), ((12121, 12136), 'scipy.spatial.distance.squareform', 'squareform', (['mat'], {}), '(mat)\n', (12131, 12136), False, 'from scipy.spatial.distance import squareform\n'), ((1743, 1768), 'numpy.squeeze', 'np.squeeze', (['states_labels'], {}), '(states_labels)\n', (1753, 1768), True, 'import numpy as np\n'), ((1898, 1947), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'states_labels', 'columns': 'columns'}), '(data=states_labels, columns=columns)\n', (1910, 1947), True, 'import pandas as pd\n'), ((2725, 2848), 'warnings.warn', 'warnings.warn', (['"""The asked number of samples is higher than the possible unique values. Sampling with replacement"""'], {}), "(\n 'The asked number of samples is higher than the possible unique values. Sampling with replacement'\n )\n", (2738, 2848), False, 'import warnings\n'), ((3748, 3778), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(True)'}), '(with_mean=True)\n', (3762, 3778), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4268, 4320), 'warnings.warn', 'warnings.warn', (['"""Warning: deleting old computed data"""'], {}), "('Warning: deleting old computed data')\n", (4281, 4320), False, 'import warnings\n'), ((4411, 4426), 'numpy.array', 'np.array', (['[reg]'], {}), '([reg])\n', (4419, 4426), True, 'import numpy as np\n'), ((4479, 4505), 'numpy.unique', 'np.unique', (['self._train_out'], {}), '(self._train_out)\n', (4488, 4505), True, 'import numpy as np\n'), ((4872, 4910), 'numpy.unique', 'np.unique', (['groups'], {'return_inverse': '(True)'}), '(groups, return_inverse=True)\n', (4881, 4910), True, 'import numpy as np\n'), ((5299, 5432), 'group_lasso.LogisticGroupLasso', 'LogisticGroupLasso', (['groups'], {'group_reg': 'reg[0]', 'l1_reg': '(0)', 'n_iter': 'max_iter', 'supress_warning': '(True)', 'scale_reg': '"""none"""', 'warm_start': '(False)'}), "(groups, group_reg=reg[0], l1_reg=0, n_iter=max_iter,\n supress_warning=True, scale_reg='none', warm_start=False)\n", (5317, 5432), False, 'from group_lasso import LogisticGroupLasso\n'), ((5452, 5492), 'sklearn.multiclass.OneVsRestClassifier', 'OneVsRestClassifier', (['tmp_model'], {'n_jobs': '(2)'}), '(tmp_model, n_jobs=2)\n', (5471, 5492), False, 'from sklearn.multiclass import OneVsRestClassifier\n'), ((5642, 5791), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l1"""', 'C': 'reg[0]', 'solver': '"""liblinear"""', 'multi_class': '"""ovr"""', 'fit_intercept': '(False)', 'max_iter': 'max_iter', 'warm_start': 'warm_start'}), "(penalty='l1', C=reg[0], solver='liblinear', multi_class=\n 'ovr', fit_intercept=False, max_iter=max_iter, warm_start=warm_start)\n", (5660, 5791), False, 'from sklearn.linear_model import LogisticRegression\n'), ((8177, 8200), 'numpy.abs', 'np.abs', (['(self._reg - reg)'], {}), '(self._reg - reg)\n', (8183, 8200), True, 'import numpy as np\n'), ((8788, 8800), 'numpy.sum', 'np.sum', (['coef'], {}), '(coef)\n', (8794, 8800), True, 'import numpy as np\n'), ((10815, 10884), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['[self._groups_mask[idx] for idx in unique_idxs]'], {}), '([self._groups_mask[idx] for idx in unique_idxs])\n', (10835, 10884), True, 'import numpy as np\n'), ((11952, 11966), 'numpy.ones', 'np.ones', (['n_pts'], {}), '(n_pts)\n', (11959, 11966), True, 'import numpy as np\n'), ((12091, 12101), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (12098, 12101), True, 'import numpy as np\n'), ((12515, 12550), 'numpy.triu_indices', 'np.triu_indices', (['(num_feats + 1)'], {'k': '(1)'}), '(num_feats + 1, k=1)\n', (12530, 12550), True, 'import numpy as np\n'), ((6400, 6434), 'numpy.empty', 'np.empty', (['(_n_basins, _n_features)'], {}), '((_n_basins, _n_features))\n', (6408, 6434), True, 'import numpy as np\n'), ((8914, 8936), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(coef / nrm)'], {}), '(coef / nrm)\n', (8924, 8936), False, 'from scipy.sparse import csr_matrix\n'), ((9472, 9487), 'numpy.array', 'np.array', (['names'], {}), '(names)\n', (9480, 9487), True, 'import numpy as np\n'), ((12076, 12089), 'numpy.diag', 'np.diag', (['diag'], {}), '(diag)\n', (12083, 12089), True, 'import numpy as np\n'), ((7017, 7040), 'numpy.unique', 'np.unique', (['self._groups'], {}), '(self._groups)\n', (7026, 7040), True, 'import numpy as np\n'), ((8963, 8984), 'numpy.argsort', 'np.argsort', (['coef.data'], {}), '(coef.data)\n', (8973, 8984), True, 'import numpy as np\n'), ((8664, 8687), 'numpy.linalg.norm', 'np.linalg.norm', (['coef[b]'], {}), '(coef[b])\n', (8678, 8687), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from model.config import cfg
from utils.cython_bbox import bbox_overlaps
try:
import cPickle as pickle
except ImportError:
import pickle
# Just return the ground truth boxes for a single image
def compute_target(memory_size, gt_boxes, feat_stride):
factor_h = (memory_size[0] - 1.) * feat_stride
factor_w = (memory_size[1] - 1.) * feat_stride
num_gt = gt_boxes.shape[0]
x1 = gt_boxes[:, [0]] / factor_w
y1 = gt_boxes[:, [1]] / factor_h
x2 = gt_boxes[:, [2]] / factor_w
y2 = gt_boxes[:, [3]] / factor_h
rois = np.hstack((y1, x1, y2, x2))
batch_ids = np.zeros((num_gt), dtype=np.int32)
# overlap to regions of interest
roi_overlaps = np.ones((num_gt), dtype=np.float32)
labels = np.array(gt_boxes[:, 4], dtype=np.int32)
return rois, batch_ids, roi_overlaps, labels
# Also return the reverse index of rois
def compute_target_memory(memory_size, rois, feat_stride):
"""
:param memory_size: [H/16, W/16], shape of memory
:param rois: [N, 5], for (batch_id, x1, y1, x2, y2)
:param labels: [N,], roi labels
:param feat_stride: 16
:return:
"""
minus_h = memory_size[0] - 1.
minus_w = memory_size[1] - 1.
num_roi = rois.shape[0]
assert np.all(rois[:, 0] == 0), 'only support single image per batch.'
x1 = rois[:, [1]] / feat_stride
y1 = rois[:, [2]] / feat_stride
x2 = rois[:, [3]] / feat_stride
y2 = rois[:, [4]] / feat_stride
# h, w, h, w
n_rois = np.hstack((y1, x1, y2, x2))
n_rois[:, 0::2] /= minus_h
n_rois[:, 1::2] /= minus_w
batch_ids = np.zeros(num_roi, dtype=np.int32)
# h, w, h, w
inv_rois = np.empty_like(n_rois)
inv_rois[:, 0:2] = 0.
inv_rois[:, 2] = minus_h
inv_rois[:, 3] = minus_w
inv_rois[:, 0::2] -= y1
inv_rois[:, 1::2] -= x1
# normalize coordinates
inv_rois[:, 0::2] /= np.maximum(y2 - y1, cfg.EPS)
inv_rois[:, 1::2] /= np.maximum(x2 - x1, cfg.EPS)
inv_batch_ids = np.arange(num_roi, dtype=np.int32)
return n_rois, batch_ids, inv_rois, inv_batch_ids
def compute_rel_rois(num_rel, rois, relations):
"""
union subject boxes and object boxes given a set of rois and relations
"""
rel_rois = np.zeros([num_rel, 5])
for i, rel in enumerate(relations):
sub_im_i = rois[rel[0], 0]
obj_im_i = rois[rel[1], 0]
assert(sub_im_i == obj_im_i)
rel_rois[i, 0] = sub_im_i
sub_roi = rois[rel[0], 1:]
obj_roi = rois[rel[1], 1:]
union_roi = [np.minimum(sub_roi[0], obj_roi[0]),
np.minimum(sub_roi[1], obj_roi[1]),
np.maximum(sub_roi[2], obj_roi[2]),
np.maximum(sub_roi[3], obj_roi[3])]
rel_rois[i, 1:] = union_roi
return rel_rois
# Update weights for the target
def update_weights(labels, cls_prob):
num_gt = labels.shape[0]
index = np.arange(num_gt)
cls_score = cls_prob[index, labels]
big_ones = cls_score >= 1. - cfg.MEM.BETA
# Focus on the hard examples
weights = 1. - cls_score
weights[big_ones] = cfg.MEM.BETA
weights /= np.maximum(np.sum(weights), cfg.EPS)
return weights
| [
"numpy.ones",
"numpy.minimum",
"numpy.hstack",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty_like",
"numpy.maximum",
"numpy.all",
"numpy.arange"
] | [((708, 735), 'numpy.hstack', 'np.hstack', (['(y1, x1, y2, x2)'], {}), '((y1, x1, y2, x2))\n', (717, 735), True, 'import numpy as np\n'), ((753, 785), 'numpy.zeros', 'np.zeros', (['num_gt'], {'dtype': 'np.int32'}), '(num_gt, dtype=np.int32)\n', (761, 785), True, 'import numpy as np\n'), ((846, 879), 'numpy.ones', 'np.ones', (['num_gt'], {'dtype': 'np.float32'}), '(num_gt, dtype=np.float32)\n', (853, 879), True, 'import numpy as np\n'), ((896, 936), 'numpy.array', 'np.array', (['gt_boxes[:, 4]'], {'dtype': 'np.int32'}), '(gt_boxes[:, 4], dtype=np.int32)\n', (904, 936), True, 'import numpy as np\n'), ((1416, 1439), 'numpy.all', 'np.all', (['(rois[:, 0] == 0)'], {}), '(rois[:, 0] == 0)\n', (1422, 1439), True, 'import numpy as np\n'), ((1664, 1691), 'numpy.hstack', 'np.hstack', (['(y1, x1, y2, x2)'], {}), '((y1, x1, y2, x2))\n', (1673, 1691), True, 'import numpy as np\n'), ((1773, 1806), 'numpy.zeros', 'np.zeros', (['num_roi'], {'dtype': 'np.int32'}), '(num_roi, dtype=np.int32)\n', (1781, 1806), True, 'import numpy as np\n'), ((1843, 1864), 'numpy.empty_like', 'np.empty_like', (['n_rois'], {}), '(n_rois)\n', (1856, 1864), True, 'import numpy as np\n'), ((2067, 2095), 'numpy.maximum', 'np.maximum', (['(y2 - y1)', 'cfg.EPS'], {}), '(y2 - y1, cfg.EPS)\n', (2077, 2095), True, 'import numpy as np\n'), ((2122, 2150), 'numpy.maximum', 'np.maximum', (['(x2 - x1)', 'cfg.EPS'], {}), '(x2 - x1, cfg.EPS)\n', (2132, 2150), True, 'import numpy as np\n'), ((2174, 2208), 'numpy.arange', 'np.arange', (['num_roi'], {'dtype': 'np.int32'}), '(num_roi, dtype=np.int32)\n', (2183, 2208), True, 'import numpy as np\n'), ((2429, 2451), 'numpy.zeros', 'np.zeros', (['[num_rel, 5]'], {}), '([num_rel, 5])\n', (2437, 2451), True, 'import numpy as np\n'), ((3120, 3137), 'numpy.arange', 'np.arange', (['num_gt'], {}), '(num_gt)\n', (3129, 3137), True, 'import numpy as np\n'), ((3355, 3370), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (3361, 3370), True, 'import numpy as np\n'), ((2734, 2768), 'numpy.minimum', 'np.minimum', (['sub_roi[0]', 'obj_roi[0]'], {}), '(sub_roi[0], obj_roi[0])\n', (2744, 2768), True, 'import numpy as np\n'), ((2791, 2825), 'numpy.minimum', 'np.minimum', (['sub_roi[1]', 'obj_roi[1]'], {}), '(sub_roi[1], obj_roi[1])\n', (2801, 2825), True, 'import numpy as np\n'), ((2848, 2882), 'numpy.maximum', 'np.maximum', (['sub_roi[2]', 'obj_roi[2]'], {}), '(sub_roi[2], obj_roi[2])\n', (2858, 2882), True, 'import numpy as np\n'), ((2905, 2939), 'numpy.maximum', 'np.maximum', (['sub_roi[3]', 'obj_roi[3]'], {}), '(sub_roi[3], obj_roi[3])\n', (2915, 2939), True, 'import numpy as np\n')] |
"""
Test the cost function from the problem 1010
"""
import numpy as np
import sympy as sp
import quadpy
import unittest
from opttrj.costtime import cCostTime
from itertools import tee
class cMyTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(cMyTest, self).__init__(*args, **kwargs)
self.wp_ = np.array([[0.8981, -1.4139, -1.7304, -3.0529, -1.239, -9.391],
[0.1479, -1.6466, -1.8270, -2.523, -1.1025, -9.3893],
[-0.539, -1.6466, -1.8270, -2.5237, -1.1091, -9.389],
[-0.9361, -2.0998, -1.5694, -2.523, -0.769, -9.39756],
[-1.0740, -2.6962, -0.6919, -2.5144, -0.9305, -9.722]])
self.N_ = self.wp_.shape[0] - 1
self.dim_ = 6
def testRun(self):
cost = cCostTime(self.wp_)
tauv = np.random.rand(self.N_)
res = np.zeros((self.N_, ))
for i in range(5):
cost(tauv)
cost.gradient(tauv, res)
def main():
unittest.main()
if __name__ == '__main__':
main()
| [
"opttrj.costtime.cCostTime",
"numpy.random.rand",
"numpy.array",
"numpy.zeros",
"unittest.main"
] | [((990, 1005), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1003, 1005), False, 'import unittest\n'), ((341, 634), 'numpy.array', 'np.array', (['[[0.8981, -1.4139, -1.7304, -3.0529, -1.239, -9.391], [0.1479, -1.6466, -\n 1.827, -2.523, -1.1025, -9.3893], [-0.539, -1.6466, -1.827, -2.5237, -\n 1.1091, -9.389], [-0.9361, -2.0998, -1.5694, -2.523, -0.769, -9.39756],\n [-1.074, -2.6962, -0.6919, -2.5144, -0.9305, -9.722]]'], {}), '([[0.8981, -1.4139, -1.7304, -3.0529, -1.239, -9.391], [0.1479, -\n 1.6466, -1.827, -2.523, -1.1025, -9.3893], [-0.539, -1.6466, -1.827, -\n 2.5237, -1.1091, -9.389], [-0.9361, -2.0998, -1.5694, -2.523, -0.769, -\n 9.39756], [-1.074, -2.6962, -0.6919, -2.5144, -0.9305, -9.722]])\n', (349, 634), True, 'import numpy as np\n'), ((788, 807), 'opttrj.costtime.cCostTime', 'cCostTime', (['self.wp_'], {}), '(self.wp_)\n', (797, 807), False, 'from opttrj.costtime import cCostTime\n'), ((824, 847), 'numpy.random.rand', 'np.random.rand', (['self.N_'], {}), '(self.N_)\n', (838, 847), True, 'import numpy as np\n'), ((863, 883), 'numpy.zeros', 'np.zeros', (['(self.N_,)'], {}), '((self.N_,))\n', (871, 883), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import os
import glob
def calibrate():
CHECKERBOARD = (6,9)
subpix_criteria = (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
calibration_flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC+cv2.fisheye.CALIB_CHECK_COND+cv2.fisheye.CALIB_FIX_SKEW
objp = np.zeros((1, CHECKERBOARD[0]*CHECKERBOARD[1], 3), np.float32)
objp[0,:,:2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
_img_shape = None
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('*.png')
print(images)
for fname in images:
img = cv2.imread(fname)
if _img_shape == None:
_img_shape = img.shape[:2]
else:
assert _img_shape == img.shape[:2], "All images must share the same size."
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH+cv2.CALIB_CB_FAST_CHECK+cv2.CALIB_CB_NORMALIZE_IMAGE)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
cv2.cornerSubPix(gray,corners,(3,3),(-1,-1),subpix_criteria)
imgpoints.append(corners)
N_OK = len(objpoints)
K = np.zeros((3, 3))
D = np.zeros((4, 1))
rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
rms, _, _, _, _ = \
cv2.fisheye.calibrate(
objpoints,
imgpoints,
gray.shape[::-1],
K,
D,
rvecs,
tvecs,
calibration_flags,
(cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6)
)
print("Found " + str(N_OK) + " valid images for calibration")
print("DIM=" + str(_img_shape[::-1]))
print("K=np.array(" + str(K.tolist()) + ")")
print("D=np.array(" + str(D.tolist()) + ")")
DIM=_img_shape[::-1]
K=np.array(K)
D=np.array(D)
return K, D, DIM
def undistort_fisheye(img, K=np.array([[637.8931714029114, 0.0, 509.67125143385334], [0.0, 636.4000140079311, 371.2613659540199], [0.0, 0.0, 1.0]]), D=np.array([[-0.02628723220492124], [-0.1740869162806197], [0.11587794888959864], [0.041124156040405195]]), DIM=(1016, 760)):
img = rotate_image(img, -3)
h,w = img.shape[:2]
cam = np.eye(3)
DIM = (int(w * 1.1), int(h * 1.1))
map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, cam, K, DIM, cv2.CV_16SC2)
undistorted_img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_DEFAULT)
undistorted_img = undistorted_img[0:816, 90:987]
return undistorted_img
def undi(img, K=np.array([[637.8931714029114, 0.0, 509.67125143385334], [0.0, 636.4000140079311, 371.2613659540199], [0.0, 0.0, 1.0]]), D=np.array([[-0.02628723220492124], [-0.1740869162806197], [0.11587794888959864], [0.041124156040405195]]), DIM=(1016, 760)):
h,w = img.shape[:2]
cam = np.eye(3)
newcammatrix, _ = cv2.getOptimalNewCameraMatrix(K, D, DIM[::-1], 1, DIM[::-1])
return cv2.undistort(img, K, D, None, newcammatrix)
def undistort(img, K=np.array([[637.8931714029114, 0.0, 509.67125143385334], [0.0, 636.4000140079311, 371.2613659540199], [0.0, 0.0, 1.0]]), D=np.array([[-0.02628723220492124], [-0.1740869162806197], [0.11587794888959864], [0.041124156040405195]]), DIM=(1016, 760)):
return cv2.undistort(img, K, D, None, K)
# calibrate camera, output parameter, and undistort val.jpg
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
if __name__ == "__main__":
img = cv2.imread("./TestPics/cube/DT_imaged1.png")
cv2.imshow("pic", undistort_fisheye(img))
img = undistort_fisheye(img)
cv2.imwrite("./TestPics/cube/UDT_imaged1.png", img)
cv2.waitKey(0) | [
"cv2.imwrite",
"numpy.eye",
"cv2.warpAffine",
"cv2.remap",
"cv2.undistort",
"numpy.array",
"numpy.zeros",
"cv2.fisheye.calibrate",
"cv2.getOptimalNewCameraMatrix",
"cv2.waitKey",
"cv2.cvtColor",
"cv2.findChessboardCorners",
"cv2.fisheye.initUndistortRectifyMap",
"cv2.getRotationMatrix2D",
... | [((317, 380), 'numpy.zeros', 'np.zeros', (['(1, CHECKERBOARD[0] * CHECKERBOARD[1], 3)', 'np.float32'], {}), '((1, CHECKERBOARD[0] * CHECKERBOARD[1], 3), np.float32)\n', (325, 380), True, 'import numpy as np\n'), ((599, 617), 'glob.glob', 'glob.glob', (['"""*.png"""'], {}), "('*.png')\n", (608, 617), False, 'import glob\n'), ((1402, 1418), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1410, 1418), True, 'import numpy as np\n'), ((1428, 1444), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (1436, 1444), True, 'import numpy as np\n'), ((1627, 1801), 'cv2.fisheye.calibrate', 'cv2.fisheye.calibrate', (['objpoints', 'imgpoints', 'gray.shape[::-1]', 'K', 'D', 'rvecs', 'tvecs', 'calibration_flags', '(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-06)'], {}), '(objpoints, imgpoints, gray.shape[::-1], K, D, rvecs,\n tvecs, calibration_flags, (cv2.TERM_CRITERIA_EPS + cv2.\n TERM_CRITERIA_MAX_ITER, 30, 1e-06))\n', (1648, 1801), False, 'import cv2\n'), ((2161, 2172), 'numpy.array', 'np.array', (['K'], {}), '(K)\n', (2169, 2172), True, 'import numpy as np\n'), ((2180, 2191), 'numpy.array', 'np.array', (['D'], {}), '(D)\n', (2188, 2191), True, 'import numpy as np\n'), ((2246, 2369), 'numpy.array', 'np.array', (['[[637.8931714029114, 0.0, 509.67125143385334], [0.0, 636.4000140079311, \n 371.2613659540199], [0.0, 0.0, 1.0]]'], {}), '([[637.8931714029114, 0.0, 509.67125143385334], [0.0, \n 636.4000140079311, 371.2613659540199], [0.0, 0.0, 1.0]])\n', (2254, 2369), True, 'import numpy as np\n'), ((2368, 2477), 'numpy.array', 'np.array', (['[[-0.02628723220492124], [-0.1740869162806197], [0.11587794888959864], [\n 0.041124156040405195]]'], {}), '([[-0.02628723220492124], [-0.1740869162806197], [\n 0.11587794888959864], [0.041124156040405195]])\n', (2376, 2477), True, 'import numpy as np\n'), ((2561, 2570), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2567, 2570), True, 'import numpy as np\n'), ((2629, 2697), 'cv2.fisheye.initUndistortRectifyMap', 'cv2.fisheye.initUndistortRectifyMap', (['K', 'D', 'cam', 'K', 'DIM', 'cv2.CV_16SC2'], {}), '(K, D, cam, K, DIM, cv2.CV_16SC2)\n', (2664, 2697), False, 'import cv2\n'), ((2721, 2815), 'cv2.remap', 'cv2.remap', (['img', 'map1', 'map2'], {'interpolation': 'cv2.INTER_LINEAR', 'borderMode': 'cv2.BORDER_DEFAULT'}), '(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.\n BORDER_DEFAULT)\n', (2730, 2815), False, 'import cv2\n'), ((2910, 3033), 'numpy.array', 'np.array', (['[[637.8931714029114, 0.0, 509.67125143385334], [0.0, 636.4000140079311, \n 371.2613659540199], [0.0, 0.0, 1.0]]'], {}), '([[637.8931714029114, 0.0, 509.67125143385334], [0.0, \n 636.4000140079311, 371.2613659540199], [0.0, 0.0, 1.0]])\n', (2918, 3033), True, 'import numpy as np\n'), ((3032, 3141), 'numpy.array', 'np.array', (['[[-0.02628723220492124], [-0.1740869162806197], [0.11587794888959864], [\n 0.041124156040405195]]'], {}), '([[-0.02628723220492124], [-0.1740869162806197], [\n 0.11587794888959864], [0.041124156040405195]])\n', (3040, 3141), True, 'import numpy as np\n'), ((3192, 3201), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3198, 3201), True, 'import numpy as np\n'), ((3225, 3285), 'cv2.getOptimalNewCameraMatrix', 'cv2.getOptimalNewCameraMatrix', (['K', 'D', 'DIM[::-1]', '(1)', 'DIM[::-1]'], {}), '(K, D, DIM[::-1], 1, DIM[::-1])\n', (3254, 3285), False, 'import cv2\n'), ((3298, 3342), 'cv2.undistort', 'cv2.undistort', (['img', 'K', 'D', 'None', 'newcammatrix'], {}), '(img, K, D, None, newcammatrix)\n', (3311, 3342), False, 'import cv2\n'), ((3367, 3490), 'numpy.array', 'np.array', (['[[637.8931714029114, 0.0, 509.67125143385334], [0.0, 636.4000140079311, \n 371.2613659540199], [0.0, 0.0, 1.0]]'], {}), '([[637.8931714029114, 0.0, 509.67125143385334], [0.0, \n 636.4000140079311, 371.2613659540199], [0.0, 0.0, 1.0]])\n', (3375, 3490), True, 'import numpy as np\n'), ((3489, 3598), 'numpy.array', 'np.array', (['[[-0.02628723220492124], [-0.1740869162806197], [0.11587794888959864], [\n 0.041124156040405195]]'], {}), '([[-0.02628723220492124], [-0.1740869162806197], [\n 0.11587794888959864], [0.041124156040405195]])\n', (3497, 3598), True, 'import numpy as np\n'), ((3625, 3658), 'cv2.undistort', 'cv2.undistort', (['img', 'K', 'D', 'None', 'K'], {}), '(img, K, D, None, K)\n', (3638, 3658), False, 'import cv2\n'), ((3826, 3875), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['image_center', 'angle', '(1.0)'], {}), '(image_center, angle, 1.0)\n', (3849, 3875), False, 'import cv2\n'), ((3888, 3962), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'rot_mat', 'image.shape[1::-1]'], {'flags': 'cv2.INTER_LINEAR'}), '(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\n', (3902, 3962), False, 'import cv2\n'), ((4019, 4063), 'cv2.imread', 'cv2.imread', (['"""./TestPics/cube/DT_imaged1.png"""'], {}), "('./TestPics/cube/DT_imaged1.png')\n", (4029, 4063), False, 'import cv2\n'), ((4150, 4201), 'cv2.imwrite', 'cv2.imwrite', (['"""./TestPics/cube/UDT_imaged1.png"""', 'img'], {}), "('./TestPics/cube/UDT_imaged1.png', img)\n", (4161, 4201), False, 'import cv2\n'), ((4207, 4221), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4218, 4221), False, 'import cv2\n'), ((678, 695), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (688, 695), False, 'import cv2\n'), ((887, 924), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (899, 924), False, 'import cv2\n'), ((988, 1124), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', 'CHECKERBOARD', '(cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.\n CALIB_CB_NORMALIZE_IMAGE)'], {}), '(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH +\n cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)\n', (1013, 1124), False, 'import cv2\n'), ((1459, 1496), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {'dtype': 'np.float64'}), '((1, 1, 3), dtype=np.float64)\n', (1467, 1496), True, 'import numpy as np\n'), ((1533, 1570), 'numpy.zeros', 'np.zeros', (['(1, 1, 3)'], {'dtype': 'np.float64'}), '((1, 1, 3), dtype=np.float64)\n', (1541, 1570), True, 'import numpy as np\n'), ((1266, 1332), 'cv2.cornerSubPix', 'cv2.cornerSubPix', (['gray', 'corners', '(3, 3)', '(-1, -1)', 'subpix_criteria'], {}), '(gray, corners, (3, 3), (-1, -1), subpix_criteria)\n', (1282, 1332), False, 'import cv2\n'), ((3779, 3807), 'numpy.array', 'np.array', (['image.shape[1::-1]'], {}), '(image.shape[1::-1])\n', (3787, 3807), True, 'import numpy as np\n')] |
import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def plot(data, cluster_value, k):
x_val = data[:,0]
y_val = data[:,1]
colors = cm.rainbow(np.linspace(0,1,k))
for i in range(k):
x = x_val[cluster_value[:] == i]
y = y_val[cluster_value[:] == i]
color = colors[i,:]
plt.scatter(x,y, c=color)
plt.show()
def read_in(filename):
data = np.genfromtxt(filename, delimiter=None)
return data
def kmeans(data, k):
cluster_value = np.random.randint(0,k, data.shape[0])
cluster_mean = np.zeros((k,2))
repeat = True
previous_loss = 2e10
while (repeat == True):
for i in range(10):
for n in range(k):
cluster_mean[n,:] = np.mean(data[cluster_value[:] == n], axis=0)
data_distance = np.zeros((data.shape[0], k))
loss = 0
for i in range(data.shape[0]):
for j in range(k):
data_distance[i,j] = np.linalg.norm(data[i] - cluster_mean[j])
for i in range(data.shape[0]):
loss += data_distance[i,cluster_value[i]]
if (previous_loss - loss < 1): #algorithm has converged
repeat = False
previous_loss = loss
for i in range(data.shape[0]):
cluster_value[i] = np.argmin(data_distance[i,:])
return cluster_value, previous_loss
def cmeans(data, c, m): #fuzzy c-means
one = np.ones(c)
cluster = np.zeros(data.shape[0])
cluster_value = np.zeros((data.shape[0], c))
previous_loss = 2e10
for i in range(data.shape[0]):
cluster_value[i,:] = np.random.dirichlet(one)
repeat = True
while(repeat == True): #repeat until convergence is met
centroid = np.zeros((c,2))
for i in range(c):
for j in range(data.shape[0]):
centroid[i,:] += np.multiply(np.power(cluster_value[j,i], m), data[j,:])
sum = 0
for j in range(data.shape[0]):
sum += np.power(cluster_value[j,i], m)
centroid[i,:] = np.divide(centroid[i,:], sum)
for i in range(data.shape[0]):
for j in range(c):
sum = 0
temp = np.abs(np.linalg.norm(data[i,:] - centroid[j,:]))
for k in range(c):
temp2 = np.abs(np.linalg.norm(data[i,:] - centroid[k,:]))
val = temp / temp2
val = np.power(val, (2/(m-1)))
sum += val
cluster_value[i,j] = 1 / sum
for i in range(data.shape[0]):
cluster[i] = np.argmax(cluster_value[i,:])
loss = 0
for i in range(data.shape[0]):
loss += np.linalg.norm(data[i,:] - centroid[int(cluster[i])]) #calculate loss
if (previous_loss - loss < 1):
repeat = False
previous_loss = loss
return cluster, loss
def main():
data = read_in('data/cluster_dataset.txt')
k = 3
loss = np.NaN
cluster_value = None
for i in range(10):
new_cluster_value, new_loss = cmeans(data, k, 3)
if (np.isnan(loss)):
loss = new_loss
cluster_value = new_cluster_value
if (new_loss < loss):
loss = new_loss
cluster_value = new_cluster_value
print("c-means loss: ")
print(loss)
plot(data, cluster_value, k)
loss = np.NaN
for i in range(10):
new_cluster_value, new_loss = kmeans(data, k)
if (np.isnan(loss)):
loss = new_loss
cluster_value = new_cluster_value
if (new_loss < loss):
loss = new_loss
cluster_value = new_cluster_value
print("k-means loss: ")
print(loss)
plot(data, cluster_value, k)
if __name__ == '__main__':
main()
| [
"numpy.mean",
"numpy.ones",
"numpy.power",
"numpy.linalg.norm",
"numpy.argmax",
"numpy.random.randint",
"numpy.zeros",
"numpy.linspace",
"numpy.random.dirichlet",
"matplotlib.pyplot.scatter",
"numpy.isnan",
"numpy.argmin",
"numpy.genfromtxt",
"numpy.divide",
"matplotlib.pyplot.show"
] | [((382, 392), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (390, 392), True, 'import matplotlib.pyplot as plt\n'), ((428, 467), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'delimiter': 'None'}), '(filename, delimiter=None)\n', (441, 467), True, 'import numpy as np\n'), ((525, 563), 'numpy.random.randint', 'np.random.randint', (['(0)', 'k', 'data.shape[0]'], {}), '(0, k, data.shape[0])\n', (542, 563), True, 'import numpy as np\n'), ((582, 598), 'numpy.zeros', 'np.zeros', (['(k, 2)'], {}), '((k, 2))\n', (590, 598), True, 'import numpy as np\n'), ((1480, 1490), 'numpy.ones', 'np.ones', (['c'], {}), '(c)\n', (1487, 1490), True, 'import numpy as np\n'), ((1505, 1528), 'numpy.zeros', 'np.zeros', (['data.shape[0]'], {}), '(data.shape[0])\n', (1513, 1528), True, 'import numpy as np\n'), ((1549, 1577), 'numpy.zeros', 'np.zeros', (['(data.shape[0], c)'], {}), '((data.shape[0], c))\n', (1557, 1577), True, 'import numpy as np\n'), ((191, 211), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'k'], {}), '(0, 1, k)\n', (202, 211), True, 'import numpy as np\n'), ((352, 378), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': 'color'}), '(x, y, c=color)\n', (363, 378), True, 'import matplotlib.pyplot as plt\n'), ((1667, 1691), 'numpy.random.dirichlet', 'np.random.dirichlet', (['one'], {}), '(one)\n', (1686, 1691), True, 'import numpy as np\n'), ((1789, 1805), 'numpy.zeros', 'np.zeros', (['(c, 2)'], {}), '((c, 2))\n', (1797, 1805), True, 'import numpy as np\n'), ((3154, 3168), 'numpy.isnan', 'np.isnan', (['loss'], {}), '(loss)\n', (3162, 3168), True, 'import numpy as np\n'), ((3534, 3548), 'numpy.isnan', 'np.isnan', (['loss'], {}), '(loss)\n', (3542, 3548), True, 'import numpy as np\n'), ((839, 867), 'numpy.zeros', 'np.zeros', (['(data.shape[0], k)'], {}), '((data.shape[0], k))\n', (847, 867), True, 'import numpy as np\n'), ((2110, 2140), 'numpy.divide', 'np.divide', (['centroid[i, :]', 'sum'], {}), '(centroid[i, :], sum)\n', (2119, 2140), True, 'import numpy as np\n'), ((2651, 2681), 'numpy.argmax', 'np.argmax', (['cluster_value[i, :]'], {}), '(cluster_value[i, :])\n', (2660, 2681), True, 'import numpy as np\n'), ((766, 810), 'numpy.mean', 'np.mean', (['data[cluster_value[:] == n]'], {'axis': '(0)'}), '(data[cluster_value[:] == n], axis=0)\n', (773, 810), True, 'import numpy as np\n'), ((1361, 1391), 'numpy.argmin', 'np.argmin', (['data_distance[i, :]'], {}), '(data_distance[i, :])\n', (1370, 1391), True, 'import numpy as np\n'), ((2050, 2082), 'numpy.power', 'np.power', (['cluster_value[j, i]', 'm'], {}), '(cluster_value[j, i], m)\n', (2058, 2082), True, 'import numpy as np\n'), ((1008, 1049), 'numpy.linalg.norm', 'np.linalg.norm', (['(data[i] - cluster_mean[j])'], {}), '(data[i] - cluster_mean[j])\n', (1022, 1049), True, 'import numpy as np\n'), ((1920, 1952), 'numpy.power', 'np.power', (['cluster_value[j, i]', 'm'], {}), '(cluster_value[j, i], m)\n', (1928, 1952), True, 'import numpy as np\n'), ((2265, 2308), 'numpy.linalg.norm', 'np.linalg.norm', (['(data[i, :] - centroid[j, :])'], {}), '(data[i, :] - centroid[j, :])\n', (2279, 2308), True, 'import numpy as np\n'), ((2486, 2512), 'numpy.power', 'np.power', (['val', '(2 / (m - 1))'], {}), '(val, 2 / (m - 1))\n', (2494, 2512), True, 'import numpy as np\n'), ((2378, 2421), 'numpy.linalg.norm', 'np.linalg.norm', (['(data[i, :] - centroid[k, :])'], {}), '(data[i, :] - centroid[k, :])\n', (2392, 2421), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
# Author : Camey
# DateTime : 2021/12/1 7:47 下午
# Description :
"""
import math
from COMMON.model import MLP
import torch
import torch.nn as nn
import os
import numpy as np
import torch.optim as optim
from COMMON.memory import ReplayBuffer
import random
class DQN:
def __init__(self, cfg, state_dim, action_dim):
self.state_dim = state_dim
self.action_dim = action_dim
self.device = cfg.device
self.gamma = cfg.gamma
self.frame_idx = 0 # 用于epsilon的衰减计数
self.epsilon = lambda frame_idx: cfg.epsilon_end + \
(cfg.epsilon_start - cfg.epsilon_end) * math.exp(-1. * frame_idx / cfg.epsilon_decay)
self.batch_size = cfg.batch_size
self.policy_net = MLP(state_dim, action_dim, hidden_dim=cfg.hidden_dim).to(self.device)
self.target_net = MLP(state_dim, action_dim, hidden_dim=cfg.hidden_dim).to(self.device)
# 行为函数和评估函数
for target_param, param in zip(self.target_net.parameters(), self.policy_net.parameters()): # 复制参数
target_param.data.copy_(param.data)
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=cfg.lr)
self.memory = ReplayBuffer(cfg.memory_capacity)
def choose_action(self, state):
self.frame_idx += 1
if random.random() > self.epsilon(self.frame_idx):
with torch.no_grad():
state = torch.tensor([state], device=self.device, dtype=torch.float32)
q_values = self.policy_net(state)
action = q_values.max(1)[1].item() # 选择Q值最大的动作
else:
action = random.randrange(self.action_dim)
return action
def predict(self, state):
with torch.no_grad():
state = torch.tensor([state],device=self.device, dtype=torch.float32)
q_values = self.policy_net(state)
action = q_values.max(1)[1].item()
return action
def update(self):
if len(self.memory) < self.batch_size:
return
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample(self.batch_size)
# 转为张量
state_batch = torch.tensor(state_batch, device=self.device, dtype=torch.float)
action_batch = torch.tensor(action_batch, device=self.device).unsqueeze(1)
reward_batch = torch.tensor(reward_batch, device=self.device, dtype=torch.float)
next_state_batch = torch.tensor(next_state_batch, device=self.device, dtype=torch.float)
done_batch = torch.tensor(np.float32(done_batch), device=self.device)
q_values = self.policy_net(state_batch).gather(dim=1, index=action_batch)
next_q_values = self.target_net(next_state_batch).max(1)[0].detach()
# 计算期望的Q值,对于终止状态,done_batch=1,对应的expected_q_value等于reward
expected_q_values = reward_batch + self.gamma * next_q_values * (1 - done_batch)
loss = nn.MSELoss()(q_values, expected_q_values.unsqueeze(1))
#优化模型
self.optimizer.zero_grad()
loss.backward()
for param in self.policy_net.parameters():
param.grad.data.clamp(-1, 1)
self.optimizer.step()
def save(self, path):
torch.save(self.target_net.state_dict(), os.path.join(path, "dqn_checkpoint.pth"))
def load(self, path):
self.target_net.load_state_dict(torch.load(os.path.join(path,"dqn_checkpoint.pth")))
for target_param, param in zip(self.target_net.parameters(), self.policy_net.parameters()):
param.data.copy_(target_param.data) | [
"random.randrange",
"os.path.join",
"COMMON.memory.ReplayBuffer",
"torch.tensor",
"torch.nn.MSELoss",
"COMMON.model.MLP",
"torch.no_grad",
"random.random",
"numpy.float32",
"math.exp"
] | [((1203, 1236), 'COMMON.memory.ReplayBuffer', 'ReplayBuffer', (['cfg.memory_capacity'], {}), '(cfg.memory_capacity)\n', (1215, 1236), False, 'from COMMON.memory import ReplayBuffer\n'), ((2194, 2258), 'torch.tensor', 'torch.tensor', (['state_batch'], {'device': 'self.device', 'dtype': 'torch.float'}), '(state_batch, device=self.device, dtype=torch.float)\n', (2206, 2258), False, 'import torch\n'), ((2365, 2430), 'torch.tensor', 'torch.tensor', (['reward_batch'], {'device': 'self.device', 'dtype': 'torch.float'}), '(reward_batch, device=self.device, dtype=torch.float)\n', (2377, 2430), False, 'import torch\n'), ((2458, 2527), 'torch.tensor', 'torch.tensor', (['next_state_batch'], {'device': 'self.device', 'dtype': 'torch.float'}), '(next_state_batch, device=self.device, dtype=torch.float)\n', (2470, 2527), False, 'import torch\n'), ((1314, 1329), 'random.random', 'random.random', ([], {}), '()\n', (1327, 1329), False, 'import random\n'), ((1632, 1665), 'random.randrange', 'random.randrange', (['self.action_dim'], {}), '(self.action_dim)\n', (1648, 1665), False, 'import random\n'), ((1733, 1748), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1746, 1748), False, 'import torch\n'), ((1770, 1832), 'torch.tensor', 'torch.tensor', (['[state]'], {'device': 'self.device', 'dtype': 'torch.float32'}), '([state], device=self.device, dtype=torch.float32)\n', (1782, 1832), False, 'import torch\n'), ((2562, 2584), 'numpy.float32', 'np.float32', (['done_batch'], {}), '(done_batch)\n', (2572, 2584), True, 'import numpy as np\n'), ((2936, 2948), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2946, 2948), True, 'import torch.nn as nn\n'), ((3263, 3303), 'os.path.join', 'os.path.join', (['path', '"""dqn_checkpoint.pth"""'], {}), "(path, 'dqn_checkpoint.pth')\n", (3275, 3303), False, 'import os\n'), ((762, 815), 'COMMON.model.MLP', 'MLP', (['state_dim', 'action_dim'], {'hidden_dim': 'cfg.hidden_dim'}), '(state_dim, action_dim, hidden_dim=cfg.hidden_dim)\n', (765, 815), False, 'from COMMON.model import MLP\n'), ((858, 911), 'COMMON.model.MLP', 'MLP', (['state_dim', 'action_dim'], {'hidden_dim': 'cfg.hidden_dim'}), '(state_dim, action_dim, hidden_dim=cfg.hidden_dim)\n', (861, 911), False, 'from COMMON.model import MLP\n'), ((1379, 1394), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1392, 1394), False, 'import torch\n'), ((1420, 1482), 'torch.tensor', 'torch.tensor', (['[state]'], {'device': 'self.device', 'dtype': 'torch.float32'}), '([state], device=self.device, dtype=torch.float32)\n', (1432, 1482), False, 'import torch\n'), ((2282, 2328), 'torch.tensor', 'torch.tensor', (['action_batch'], {'device': 'self.device'}), '(action_batch, device=self.device)\n', (2294, 2328), False, 'import torch\n'), ((3383, 3423), 'os.path.join', 'os.path.join', (['path', '"""dqn_checkpoint.pth"""'], {}), "(path, 'dqn_checkpoint.pth')\n", (3395, 3423), False, 'import os\n'), ((648, 694), 'math.exp', 'math.exp', (['(-1.0 * frame_idx / cfg.epsilon_decay)'], {}), '(-1.0 * frame_idx / cfg.epsilon_decay)\n', (656, 694), False, 'import math\n')] |
import numpy
import scipy
import scipy.special
from clean import *
from synthesis import *
from simulate import *
from matplotlib import pylab
from arl.parameters import crocodile_path
def aaf_ns(a, m, c):
"""
"""
r=numpy.hypot(*ucs(a))
return scipy.special.pro_ang1(m,m,c,r)
if 1:
import os
vlas=numpy.genfromtxt(crocodile_path("test/VLA_A_hor_xyz.txt"), delimiter=",")
vobs=genuv(vlas, numpy.arange(0,numpy.pi,0.1) , numpy.pi/4)
yy=genvis(vobs/5, 0.01, 0.01)
if 1:
majorcycle(2*0.025, 2*15000, vobs/5 , yy, 0.1, 5, 100, 250000)
if 0: # some other testing code bits
mg=exmid(numpy.fft.fftshift(numpy.fft.fft2(aaf(a, 0, 3))),5)
ws=numpy.arange( p[:,2].min(), p[:,2].max(), wstep)
wr=zip(ws[:-1], ws[1:]) + [ (ws[-1], p[:,2].max() ) ]
yy=genvis(vobs/5, 0.001, 0.001)
d,p,_=doimg(2*0.025, 2*15000, vobs/5, yy, lambda *x: wslicimg(*x, wstep=250))
pylab.matshow(p[740:850,740:850]); pylab.colorbar(); pylab.show()
x=numpy.zeros_like(d)
x[1050,1050]=1
xuv=numpy.fft.fftshift(numpy.fft.fft2(numpy.fft.ifftshift(x)))
| [
"matplotlib.pylab.colorbar",
"scipy.special.pro_ang1",
"arl.parameters.crocodile_path",
"matplotlib.pylab.show",
"numpy.fft.ifftshift",
"numpy.zeros_like",
"matplotlib.pylab.matshow",
"numpy.arange"
] | [((265, 299), 'scipy.special.pro_ang1', 'scipy.special.pro_ang1', (['m', 'm', 'c', 'r'], {}), '(m, m, c, r)\n', (287, 299), False, 'import scipy\n'), ((920, 954), 'matplotlib.pylab.matshow', 'pylab.matshow', (['p[740:850, 740:850]'], {}), '(p[740:850, 740:850])\n', (933, 954), False, 'from matplotlib import pylab\n'), ((955, 971), 'matplotlib.pylab.colorbar', 'pylab.colorbar', ([], {}), '()\n', (969, 971), False, 'from matplotlib import pylab\n'), ((973, 985), 'matplotlib.pylab.show', 'pylab.show', ([], {}), '()\n', (983, 985), False, 'from matplotlib import pylab\n'), ((992, 1011), 'numpy.zeros_like', 'numpy.zeros_like', (['d'], {}), '(d)\n', (1008, 1011), False, 'import numpy\n'), ((346, 386), 'arl.parameters.crocodile_path', 'crocodile_path', (['"""test/VLA_A_hor_xyz.txt"""'], {}), "('test/VLA_A_hor_xyz.txt')\n", (360, 386), False, 'from arl.parameters import crocodile_path\n'), ((424, 454), 'numpy.arange', 'numpy.arange', (['(0)', 'numpy.pi', '(0.1)'], {}), '(0, numpy.pi, 0.1)\n', (436, 454), False, 'import numpy\n'), ((1073, 1095), 'numpy.fft.ifftshift', 'numpy.fft.ifftshift', (['x'], {}), '(x)\n', (1092, 1095), False, 'import numpy\n')] |
"""
Copyright (R) @huawei.com, all rights reserved
-*- coding:utf-8 -*-
"""
import cv2 as cv
import numpy as np
import os
import time
import constants
import acl_resource
import acl_model
MODEL_WIDTH = 513
MODEL_HEIGHT = 513
INPUT_DIR = './data/'
OUTPUT_DIR = './outputs/'
MODEL_PATH = './model/deeplabv3_plus.om'
def preprocess(picPath):
"""preprocess"""
#read img
bgr_img = cv.imread(picPath)
print(bgr_img.shape)
#get img shape
orig_shape = bgr_img.shape[:2]
#resize img
img = cv.resize(bgr_img, (MODEL_WIDTH, MODEL_HEIGHT)).astype(np.int8)
# save memory C_CONTIGUOUS mode
if not img.flags['C_CONTIGUOUS']:
img = np.ascontiguousarray(img)
return orig_shape, img
def postprocess(result_list, pic, orig_shape, pic_path):
"""postprocess"""
result_img = result_list[0].reshape(513, 513)
result_img = result_img.astype('uint8')
orig_img = cv.imread(pic_path)
img = cv.merge((result_img, result_img, result_img))
bgr_img = cv.resize(img, (orig_shape[1], orig_shape[0]))
bgr_img = (bgr_img + 255)
output_pic = os.path.join(OUTPUT_DIR, "out_" + pic)
cv.imwrite(output_pic, bgr_img)
def main():
"""main"""
#create output directory
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
#acl init
aclresource = acl_resource.AclResource()
aclresource.init()
#load model
model = acl_model.Model(aclresource, MODEL_PATH)
src_dir = os.listdir(INPUT_DIR)
#infer picture
for pic in src_dir:
#read picture
pic_path = os.path.join(INPUT_DIR, pic)
#get pic data
orig_shape, l_data = preprocess(pic_path)
#inference
result_list = model.execute([l_data])
#postprocess
postprocess(result_list, pic, orig_shape, pic_path)
print("Execute end")
if __name__ == '__main__':
main()
| [
"cv2.imwrite",
"cv2.merge",
"acl_resource.AclResource",
"os.listdir",
"os.path.exists",
"os.path.join",
"acl_model.Model",
"numpy.ascontiguousarray",
"os.mkdir",
"cv2.resize",
"cv2.imread"
] | [((390, 408), 'cv2.imread', 'cv.imread', (['picPath'], {}), '(picPath)\n', (399, 408), True, 'import cv2 as cv\n'), ((912, 931), 'cv2.imread', 'cv.imread', (['pic_path'], {}), '(pic_path)\n', (921, 931), True, 'import cv2 as cv\n'), ((942, 988), 'cv2.merge', 'cv.merge', (['(result_img, result_img, result_img)'], {}), '((result_img, result_img, result_img))\n', (950, 988), True, 'import cv2 as cv\n'), ((1003, 1049), 'cv2.resize', 'cv.resize', (['img', '(orig_shape[1], orig_shape[0])'], {}), '(img, (orig_shape[1], orig_shape[0]))\n', (1012, 1049), True, 'import cv2 as cv\n'), ((1097, 1135), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', "('out_' + pic)"], {}), "(OUTPUT_DIR, 'out_' + pic)\n", (1109, 1135), False, 'import os\n'), ((1140, 1171), 'cv2.imwrite', 'cv.imwrite', (['output_pic', 'bgr_img'], {}), '(output_pic, bgr_img)\n', (1150, 1171), True, 'import cv2 as cv\n'), ((1331, 1357), 'acl_resource.AclResource', 'acl_resource.AclResource', ([], {}), '()\n', (1355, 1357), False, 'import acl_resource\n'), ((1410, 1450), 'acl_model.Model', 'acl_model.Model', (['aclresource', 'MODEL_PATH'], {}), '(aclresource, MODEL_PATH)\n', (1425, 1450), False, 'import acl_model\n'), ((1465, 1486), 'os.listdir', 'os.listdir', (['INPUT_DIR'], {}), '(INPUT_DIR)\n', (1475, 1486), False, 'import os\n'), ((669, 694), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {}), '(img)\n', (689, 694), True, 'import numpy as np\n'), ((1241, 1267), 'os.path.exists', 'os.path.exists', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (1255, 1267), False, 'import os\n'), ((1277, 1297), 'os.mkdir', 'os.mkdir', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (1285, 1297), False, 'import os\n'), ((1572, 1600), 'os.path.join', 'os.path.join', (['INPUT_DIR', 'pic'], {}), '(INPUT_DIR, pic)\n', (1584, 1600), False, 'import os\n'), ((516, 563), 'cv2.resize', 'cv.resize', (['bgr_img', '(MODEL_WIDTH, MODEL_HEIGHT)'], {}), '(bgr_img, (MODEL_WIDTH, MODEL_HEIGHT))\n', (525, 563), True, 'import cv2 as cv\n')] |
#%% [markdown]
# # Evaluate clustering concordance
#%%
import datetime
import pprint
import time
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from anytree import LevelOrderGroupIter, Node, NodeMixin, Walker
from matplotlib.colors import ListedColormap
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.metrics import adjusted_rand_score, rand_score
from giskard.plot import stacked_barplot
from graspologic.align import OrthogonalProcrustes, SeedlessProcrustes
from graspologic.cluster import DivisiveCluster
from graspologic.embed import (
AdjacencySpectralEmbed,
OmnibusEmbed,
select_dimension,
selectSVD,
)
from graspologic.plot import pairplot
from graspologic.utils import (
augment_diagonal,
binarize,
is_fully_connected,
multigraph_lcc_intersection,
pass_to_ranks,
to_laplacian,
)
from pkg.data import load_adjacency, load_maggot_graph, load_node_meta
from pkg.io import savefig
from pkg.plot import set_theme
from pkg.utils import get_paired_inds, get_paired_subgraphs, set_warnings
from src.visualization import CLASS_COLOR_DICT as palette
from src.visualization import adjplot # TODO fix graspologic version and replace here
from pkg.flow import signal_flow
from pkg.utils import get_paired_inds
set_warnings()
t0 = time.time()
def stashfig(name, **kwargs):
foldername = "cluster_concordance"
savefig(name, foldername=foldername, **kwargs)
set_theme()
#%% [markdown]
# ### Load the data
#%%
mg = load_maggot_graph()
mg = mg[mg.nodes["paper_clustered_neurons"] | mg.nodes["accessory_neurons"]]
mg.to_largest_connected_component()
mg.fix_pairs()
mg.nodes["sf"] = signal_flow(mg.sum.adj)
mg.nodes["_inds"] = range(len(mg.nodes))
lp_inds, rp_inds = get_paired_inds(mg.nodes)
(mg.nodes.iloc[lp_inds]["pair"] == mg.nodes.iloc[rp_inds].index).all()
#%% [markdown]
# ## Evaluate cluster concordance between same or different hemispheres
#%% [markdown]
# ## Run multiple rounds of embedding/clustering each hemisphere independently
#%%
def preprocess_adjs(adjs, method="ase"):
"""Preprocessing necessary prior to embedding a graph, opetates on a list
Parameters
----------
adjs : list of adjacency matrices
[description]
method : str, optional
[description], by default "ase"
Returns
-------
[type]
[description]
"""
adjs = [pass_to_ranks(a) for a in adjs]
adjs = [a + 1 / a.size for a in adjs]
if method == "ase":
adjs = [augment_diagonal(a) for a in adjs]
elif method == "lse": # haven't really used much. a few params to look at here
adjs = [to_laplacian(a) for a in adjs]
return adjs
def svd(X, n_components=None):
return selectSVD(X, n_components=n_components, algorithm="full")[0]
n_omni_components = 8 # this is used for all of the embedings initially
n_svd_components = 16 # this is for the last step
method = "ase" # one could also do LSE
n_init = 1
cluster_kws = dict(affinity=["euclidean", "manhattan", "cosine"])
rows = []
for side in ["left", "right"]:
# TODO this is ignoring the contralateral connections!
print("Preprocessing...")
# side_mg = mg[mg.nodes[side]]
# side_mg.to_largest_connected_component()
# adj = side_mg.sum.adj
if side == "left":
inds = lp_inds
else:
inds = rp_inds
adj = mg.sum.adj[np.ix_(inds, inds)]
embed_adj = preprocess_adjs([adj])[0]
# svd_embed = svd(embed_adj)
print("Embedding...")
latent = AdjacencySpectralEmbed(n_components=8, concat=True).fit_transform(
embed_adj
)
print("Clustering...")
for init in range(n_init):
print(f"Init {init}")
dc = DivisiveCluster(max_level=10, min_split=16, cluster_kws=cluster_kws)
hier_pred_labels = dc.fit_predict(latent)
row = {
"hier_pred_labels": hier_pred_labels,
"nodes": mg.nodes.iloc[inds].copy(),
"init": init,
"side": side,
}
rows.append(row)
#%%
comparison_rows = []
max_level = 7
for i, row1 in enumerate(rows):
labels1 = row1["hier_pred_labels"]
nodes1 = row1["nodes"]
for j, row2 in enumerate(rows):
if i > j:
labels2 = row2["hier_pred_labels"]
nodes2 = row2["nodes"]
print((nodes1["pair"] == nodes2.index).all())
for level in range(1, max_level):
_, flat_labels1 = np.unique(labels1[:, :level], return_inverse=True)
_, flat_labels2 = np.unique(labels2[:, :level], return_inverse=True)
ari = adjusted_rand_score(flat_labels1, flat_labels2)
row = {
"source": i,
"target": j,
"source_side": row1["side"],
"target_side": row2["side"],
"metric_val": ari,
"metric": "ARI",
"level": level,
}
comparison_rows.append(row)
ri = rand_score(flat_labels1, flat_labels2)
row = row.copy()
row["metric_val"] = ri
row["metric"] = "RI"
comparison_rows.append(row)
comparison_results = pd.DataFrame(comparison_rows)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
sns.lineplot(
data=comparison_results,
y="metric_val",
hue="metric",
x="level",
style="metric",
markers=True,
)
stashfig("pairwise-metrics-by-level")
#%%
class MetaNode(NodeMixin):
def __init__(self, name, parent=None, children=None, meta=None):
super().__init__()
self.name = name
self.parent = parent
if children:
self.children = children
self.meta = meta
def hierarchical_mean(self, key):
if self.is_leaf:
meta = self.meta
var = meta[key]
return np.mean(var)
else:
children = self.children
child_vars = [child.hierarchical_mean(key) for child in children]
return np.mean(child_vars)
def make_node(label, node_map):
if label not in node_map:
node = MetaNode(label)
node_map[label] = node
else:
node = node_map[label]
return node
def apply_flat_labels(meta, hier_pred_labels):
cluster_meta = meta.copy()
n_levels = hier_pred_labels.shape[1]
last_max = -1
cluster_map = {}
for level in range(n_levels):
uni_pred_labels, indicator = np.unique(
hier_pred_labels[:, :level], axis=0, return_inverse=True
)
uni_pred_labels = [tuple(u) for u in uni_pred_labels]
labels = indicator + last_max + 1
level_map = dict(
zip(np.arange(len(uni_pred_labels)) + last_max + 1, uni_pred_labels)
)
cluster_meta[f"lvl{level}_labels"] = labels
last_max = np.max(labels)
cluster_map.update(level_map)
return cluster_meta, cluster_map
def get_x_y(xs, ys, orient):
if orient == "h":
return xs, ys
elif orient == "v":
return (ys, xs)
def plot_dendrogram(
root,
ax=None,
index_key="_inds",
orient="h",
linewidth=0.7,
cut=None,
max_level=None,
):
if max_level is None:
max_level = root.height
for node in (root.descendants) + (root,):
node.y = node.hierarchical_mean(index_key)
node.x = node.depth
walker = Walker()
walked = []
for node in root.leaves:
upwards, common, downwards = walker.walk(node, root)
curr_node = node
for up_node in (upwards) + (root,):
edge = (curr_node, up_node)
if edge not in walked:
xs = [curr_node.x, up_node.x]
ys = [curr_node.y, up_node.y]
xs, ys = get_x_y(xs, ys, orient)
ax.plot(
xs,
ys,
linewidth=linewidth,
color="black",
alpha=1,
)
walked.append(edge)
curr_node = up_node
y_max = node.meta[index_key].max()
y_min = node.meta[index_key].min()
xs = [node.x, node.x, node.x + 1, node.x + 1]
ys = [node.y - linewidth * 2, node.y + linewidth * 2, y_max + 1, y_min]
xs, ys = get_x_y(xs, ys, orient)
ax.fill(xs, ys, facecolor="black")
if orient == "h":
ax.set(xlim=(-1, max_level + 1))
if cut is not None:
ax.axvline(cut - 1, linewidth=1, color="grey", linestyle=":")
elif orient == "v":
ax.set(ylim=(max_level + 1, -1))
if cut is not None:
ax.axhline(cut - 1, linewidth=1, color="grey", linestyle=":")
ax.axis("off")
return ax
def plot_colorstrip(
meta, colors_var, ax=None, orient="v", index_key="_inds", palette="tab10"
):
if ax is None:
ax = plt.gca()
color_data = meta[colors_var]
uni_classes = list(np.unique(color_data))
indicator = np.full((meta[index_key].max() + 1, 1), np.nan)
# Create the color dictionary
if isinstance(palette, dict):
color_dict = palette
elif isinstance(palette, str):
color_dict = dict(
zip(uni_classes, sns.color_palette(palette, len(uni_classes)))
)
# Make the colormap
class_map = dict(zip(uni_classes, range(len(uni_classes))))
color_sorted = list(map(color_dict.get, uni_classes))
lc = ListedColormap(color_sorted)
for idx, row in meta.iterrows():
indicator[row[index_key]] = class_map[row[colors_var]]
if orient == "v":
indicator = indicator.T
sns.heatmap(
indicator,
cmap=lc,
cbar=False,
yticklabels=False,
xticklabels=False,
ax=ax,
square=False,
)
return ax
def sort_meta(meta, groups=[], group_orders=[], item_order=[]):
# create new columns in the dataframe that correspond to the sorting order
total_sort_by = []
for group in groups:
for group_order in group_orders:
if group_order == "size":
class_values = meta.groupby(group).size()
else:
class_values = meta.groupby(group)[group_order].mean()
meta[f"_{group}_group_{group_order}"] = meta[group].map(class_values)
total_sort_by.append(f"_{group}_group_{group_order}")
total_sort_by.append(group)
total_sort_by += item_order
meta = meta.sort_values(total_sort_by, kind="mergesort", ascending=False)
return meta
max_level = 7
sorters = []
item_orderers = ["merge_class", "pair"]
groupers = [f"lvl{i}_labels" for i in range(max_level)]
sorters += groupers
sorters += item_orderers
gap = 30
def preprocess_meta(meta, hier_labels, max_level=None):
if max_level is None:
max_level = hier_labels.shape[1]
# sorting
meta, cluster_map = apply_flat_labels(meta, hier_labels)
# meta = meta.sort_values(sorters, kind="mergesort")
meta = sort_meta(
meta, groups=groupers, group_orders=["sf"], item_order=item_orderers
)
# add a new dummy variable to keep track of index
meta["_inds"] = range(len(meta))
# based on the grouping and the gap specification, map this onto the positional index
ordered_lowest_clusters = meta[f"lvl{max_level-1}_labels"].unique()
gap_map = dict(
zip(ordered_lowest_clusters, np.arange(len(ordered_lowest_clusters)) * gap)
)
gap_vec = meta[f"lvl{max_level-1}_labels"].map(gap_map)
meta["_inds"] += gap_vec
return meta, cluster_map
def construct_meta_tree(meta, groupers, cluster_map):
inv_cluster_map = {v: k for k, v in cluster_map.items()}
node_map = {}
for grouper in groupers[::-1][:-1]:
level_labels = meta[grouper].unique()
for label in level_labels:
node = make_node(label, node_map)
node.meta = meta[meta[grouper] == label]
barcode = cluster_map[label]
parent_label = inv_cluster_map[barcode[:-1]]
if parent_label is not None:
parent = make_node(parent_label, node_map)
node.parent = parent
parent.meta = meta
root = parent
return root
# #%%
# fig, dend_ax = plt.subplots(1, 1, figsize=(10, 4))
# orient = "v"
# plot_dendrogram(root, ax=dend_ax, index_key="_inds", orient=orient)
# divider = make_axes_locatable(dend_ax)
# color_ax = divider.append_axes("bottom", size="30%", pad=0, sharex=dend_ax)
# plot_colorstrip(meta, "merge_class", ax=color_ax, orient=orient, palette=palette)
# stashfig("test-dendrogram-bars", format="pdf")
class MatrixGrid:
def __init__(
self,
data=None,
row_meta=None,
col_meta=None,
plot_type="heatmap",
col_group=None, # single string, list of string, or np.ndarray
row_group=None, # can also represent a clustering?
col_group_order="size",
row_group_order="size",
col_dendrogram=None, # can this just be true false?
row_dendrogram=None,
col_item_order=None, # single string, list of string, or np.ndarray
row_item_order=None,
col_colors=None, # single string, list of string, or np.ndarray
row_colors=None,
col_palette="tab10",
row_palette="tab10",
col_ticks=True,
row_ticks=True,
col_tick_pad=None,
row_tick_pad=None,
ax=None,
figsize=(10, 10),
gap=False,
):
self.data = data
self.row_meta = row_meta
self.col_meta = col_meta
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
else:
fig = ax.figure
divider = make_axes_locatable(ax)
self.fig = fig
self.ax = ax
self.divider = divider
self.top_axs = []
self.left_axs = []
self.bottom_axs = []
self.right_axs = []
def sort_values(self):
raise NotImplementedError()
def append_axes(self, side, size="10%", pad=0):
kws = {}
if side in ["top", "bottom"]:
kws["sharex"] = self.ax
elif side in ["left", "right"]:
kws["sharey"] = self.ax
ax = self.divider.append_axes(side, size=size, pad=pad, **kws)
if side == "top":
self.top_axs.append(ax)
elif side == "bottom":
self.bottom_axs.append(ax)
elif side == "left":
self.left_axs.append(ax)
elif side == "right":
self.right_axs.append(ax)
return ax
colors_var = "merge_class"
pad = 0.01
data = np.eye(len(lp_inds))
matrixgrid = MatrixGrid(data)
ax = matrixgrid.ax
top_colors_ax = matrixgrid.append_axes("top", size="10%", pad=pad)
top_dendrogram_ax = matrixgrid.append_axes("top", size="20%", pad=pad)
left_colors_ax = matrixgrid.append_axes("left", size="10%", pad=pad)
left_dendrogram_ax = matrixgrid.append_axes("left", size="20%", pad=pad)
labels1 = rows[0]["hier_pred_labels"]
labels2 = rows[1]["hier_pred_labels"]
cluster1_meta = rows[0]["nodes"].copy()
cluster2_meta = rows[1]["nodes"].copy()
print((cluster1_meta["pair"] == cluster2_meta.index).all())
cluster1_meta["_inds"] = range(len(cluster1_meta))
cluster2_meta["_inds"] = range(len(cluster2_meta))
cluster1_meta, cluster1_map = preprocess_meta(
cluster1_meta, labels1, max_level=max_level
)
cluster2_meta, cluster2_map = preprocess_meta(
cluster2_meta, labels2, max_level=max_level
)
from graspologic.plot.plot_matrix import scattermap
# data = data.reindex(index=cluster1_meta.index, columns=cluster2_meta.index)
height = cluster1_meta["_inds"].max() + 1
width = cluster2_meta["_inds"].max() + 1
data = np.zeros((height, width))
for row_ind in range(len(cluster1_meta)):
i = cluster1_meta.loc[cluster1_meta.index[row_ind], "_inds"]
j = cluster2_meta.loc[
cluster1_meta.loc[cluster1_meta.index[row_ind], "pair"], "_inds"
]
data[i, j] = 1
scattermap(data, ax=ax, sizes=(4, 4))
for side in ["left", "right", "top", "bottom"]:
ax.spines[side].set_visible(True)
cluster1_meta_tree = construct_meta_tree(cluster1_meta, groupers, cluster1_map)
cluster2_meta_tree = construct_meta_tree(cluster2_meta, groupers, cluster2_map)
plot_colorstrip(
cluster1_meta, colors_var, ax=left_colors_ax, orient="h", palette=palette
)
plot_dendrogram(cluster1_meta_tree, ax=left_dendrogram_ax, orient="h")
plot_colorstrip(
cluster2_meta, colors_var, ax=top_colors_ax, orient="v", palette=palette
)
plot_dendrogram(cluster2_meta_tree, ax=top_dendrogram_ax, orient="v")
stashfig("full-confusion-mat", format="pdf")
stashfig("full-confusion-mat", format="png")
#%%
level = 6
hier_labels1 = rows[0]["hier_pred_labels"]
hier_labels2 = rows[1]["hier_pred_labels"]
cluster1_meta = rows[0]["nodes"].copy()
cluster2_meta = rows[1]["nodes"].copy()
cluster1_meta, cluster1_map = apply_flat_labels(cluster1_meta, hier_labels1)
cluster2_meta, cluster2_map = apply_flat_labels(cluster2_meta, hier_labels2)
labels1 = cluster1_meta[f"lvl{level}_labels"].values
labels2 = cluster2_meta[f"lvl{level}_labels"].values
from sklearn.metrics import confusion_matrix
from graspologic.utils import remap_labels
from giskard.plot import confusionplot, stacked_barplot
from mpl_toolkits.axes_grid1 import make_axes_locatable
from giskard.plot import soft_axis_off, axis_on
# stacked_barplot()
labels2 = remap_labels(labels1, labels2)
cluster2_meta[f"lvl{level}_labels"] = labels2
ax, conf_mat = confusionplot(
labels1,
labels2,
annot=False,
xticklabels=False,
yticklabels=False,
return_confusion_matrix=True,
title=False,
normalize="true",
)
axis_on(ax)
divider = make_axes_locatable(ax)
def plot_stacked_bars(groups, colors, index, ax=None, normalize=False, orient="h"):
if ax is None:
ax = plt.gca()
counts_by_cluster = pd.crosstab(
index=groups,
columns=colors,
)
for i, (cluster_idx, row) in enumerate(counts_by_cluster.iterrows()):
row /= row.sum()
i = np.squeeze(np.argwhere(index == cluster_idx))
stacked_barplot(
row,
center=i + 0.5,
palette=palette,
ax=ax,
orient=orient,
)
ax.set(xticks=[], yticks=[])
soft_axis_off(ax)
return ax
left_ax = divider.append_axes("left", size="20%", pad=0.01, sharey=ax)
plot_stacked_bars(
cluster1_meta[f"lvl{level}_labels"],
cluster1_meta["merge_class"],
conf_mat.index,
normalize=True,
orient="h",
)
top_ax = divider.append_axes("top", size="20%", pad=0.02, sharex=ax)
plot_stacked_bars(
cluster2_meta[f"lvl{level}_labels"],
cluster2_meta["merge_class"],
conf_mat.columns,
normalize=True,
orient="v",
)
ax.set_xlabel("Right clustering")
left_ax.set_ylabel("Left clustering")
top_ax.set_title("Confusion matrix (row normalized)")
stashfig(f"confusion-lvl{level}") | [
"graspologic.plot.plot_matrix.scattermap",
"anytree.Walker",
"sklearn.metrics.adjusted_rand_score",
"pkg.utils.set_warnings",
"pkg.data.load_maggot_graph",
"giskard.plot.confusionplot",
"numpy.mean",
"graspologic.utils.pass_to_ranks",
"giskard.plot.stacked_barplot",
"numpy.ix_",
"matplotlib.colo... | [((1341, 1355), 'pkg.utils.set_warnings', 'set_warnings', ([], {}), '()\n', (1353, 1355), False, 'from pkg.utils import get_paired_inds, get_paired_subgraphs, set_warnings\n'), ((1363, 1374), 'time.time', 'time.time', ([], {}), '()\n', (1372, 1374), False, 'import time\n'), ((1499, 1510), 'pkg.plot.set_theme', 'set_theme', ([], {}), '()\n', (1508, 1510), False, 'from pkg.plot import set_theme\n'), ((1556, 1575), 'pkg.data.load_maggot_graph', 'load_maggot_graph', ([], {}), '()\n', (1573, 1575), False, 'from pkg.data import load_adjacency, load_maggot_graph, load_node_meta\n'), ((1721, 1744), 'pkg.flow.signal_flow', 'signal_flow', (['mg.sum.adj'], {}), '(mg.sum.adj)\n', (1732, 1744), False, 'from pkg.flow import signal_flow\n'), ((1805, 1830), 'pkg.utils.get_paired_inds', 'get_paired_inds', (['mg.nodes'], {}), '(mg.nodes)\n', (1820, 1830), False, 'from pkg.utils import get_paired_inds\n'), ((5297, 5326), 'pandas.DataFrame', 'pd.DataFrame', (['comparison_rows'], {}), '(comparison_rows)\n', (5309, 5326), True, 'import pandas as pd\n'), ((5337, 5371), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6)'}), '(1, 1, figsize=(8, 6))\n', (5349, 5371), True, 'import matplotlib.pyplot as plt\n'), ((5372, 5485), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'comparison_results', 'y': '"""metric_val"""', 'hue': '"""metric"""', 'x': '"""level"""', 'style': '"""metric"""', 'markers': '(True)'}), "(data=comparison_results, y='metric_val', hue='metric', x=\n 'level', style='metric', markers=True)\n", (5384, 5485), True, 'import seaborn as sns\n'), ((15763, 15788), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (15771, 15788), True, 'import numpy as np\n'), ((16022, 16059), 'graspologic.plot.plot_matrix.scattermap', 'scattermap', (['data'], {'ax': 'ax', 'sizes': '(4, 4)'}), '(data, ax=ax, sizes=(4, 4))\n', (16032, 16059), False, 'from graspologic.plot.plot_matrix import scattermap\n'), ((17454, 17484), 'graspologic.utils.remap_labels', 'remap_labels', (['labels1', 'labels2'], {}), '(labels1, labels2)\n', (17466, 17484), False, 'from graspologic.utils import remap_labels\n'), ((17547, 17695), 'giskard.plot.confusionplot', 'confusionplot', (['labels1', 'labels2'], {'annot': '(False)', 'xticklabels': '(False)', 'yticklabels': '(False)', 'return_confusion_matrix': '(True)', 'title': '(False)', 'normalize': '"""true"""'}), "(labels1, labels2, annot=False, xticklabels=False, yticklabels\n =False, return_confusion_matrix=True, title=False, normalize='true')\n", (17560, 17695), False, 'from giskard.plot import confusionplot, stacked_barplot\n'), ((17726, 17737), 'giskard.plot.axis_on', 'axis_on', (['ax'], {}), '(ax)\n', (17733, 17737), False, 'from giskard.plot import soft_axis_off, axis_on\n'), ((17748, 17771), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (17767, 17771), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((1450, 1496), 'pkg.io.savefig', 'savefig', (['name'], {'foldername': 'foldername'}), '(name, foldername=foldername, **kwargs)\n', (1457, 1496), False, 'from pkg.io import savefig\n'), ((7483, 7491), 'anytree.Walker', 'Walker', ([], {}), '()\n', (7489, 7491), False, 'from anytree import LevelOrderGroupIter, Node, NodeMixin, Walker\n'), ((9513, 9541), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['color_sorted'], {}), '(color_sorted)\n', (9527, 9541), False, 'from matplotlib.colors import ListedColormap\n'), ((9703, 9810), 'seaborn.heatmap', 'sns.heatmap', (['indicator'], {'cmap': 'lc', 'cbar': '(False)', 'yticklabels': '(False)', 'xticklabels': '(False)', 'ax': 'ax', 'square': '(False)'}), '(indicator, cmap=lc, cbar=False, yticklabels=False, xticklabels=\n False, ax=ax, square=False)\n', (9714, 9810), True, 'import seaborn as sns\n'), ((17924, 17965), 'pandas.crosstab', 'pd.crosstab', ([], {'index': 'groups', 'columns': 'colors'}), '(index=groups, columns=colors)\n', (17935, 17965), True, 'import pandas as pd\n'), ((2444, 2460), 'graspologic.utils.pass_to_ranks', 'pass_to_ranks', (['a'], {}), '(a)\n', (2457, 2460), False, 'from graspologic.utils import augment_diagonal, binarize, is_fully_connected, multigraph_lcc_intersection, pass_to_ranks, to_laplacian\n'), ((2784, 2841), 'graspologic.embed.selectSVD', 'selectSVD', (['X'], {'n_components': 'n_components', 'algorithm': '"""full"""'}), "(X, n_components=n_components, algorithm='full')\n", (2793, 2841), False, 'from graspologic.embed import AdjacencySpectralEmbed, OmnibusEmbed, select_dimension, selectSVD\n'), ((3430, 3448), 'numpy.ix_', 'np.ix_', (['inds', 'inds'], {}), '(inds, inds)\n', (3436, 3448), True, 'import numpy as np\n'), ((3757, 3825), 'graspologic.cluster.DivisiveCluster', 'DivisiveCluster', ([], {'max_level': '(10)', 'min_split': '(16)', 'cluster_kws': 'cluster_kws'}), '(max_level=10, min_split=16, cluster_kws=cluster_kws)\n', (3772, 3825), False, 'from graspologic.cluster import DivisiveCluster\n'), ((6549, 6616), 'numpy.unique', 'np.unique', (['hier_pred_labels[:, :level]'], {'axis': '(0)', 'return_inverse': '(True)'}), '(hier_pred_labels[:, :level], axis=0, return_inverse=True)\n', (6558, 6616), True, 'import numpy as np\n'), ((6931, 6945), 'numpy.max', 'np.max', (['labels'], {}), '(labels)\n', (6937, 6945), True, 'import numpy as np\n'), ((8954, 8963), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8961, 8963), True, 'import matplotlib.pyplot as plt\n'), ((9022, 9043), 'numpy.unique', 'np.unique', (['color_data'], {}), '(color_data)\n', (9031, 9043), True, 'import numpy as np\n'), ((13782, 13805), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (13801, 13805), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((17890, 17899), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (17897, 17899), True, 'import matplotlib.pyplot as plt\n'), ((18154, 18229), 'giskard.plot.stacked_barplot', 'stacked_barplot', (['row'], {'center': '(i + 0.5)', 'palette': 'palette', 'ax': 'ax', 'orient': 'orient'}), '(row, center=i + 0.5, palette=palette, ax=ax, orient=orient)\n', (18169, 18229), False, 'from giskard.plot import confusionplot, stacked_barplot\n'), ((18346, 18363), 'giskard.plot.soft_axis_off', 'soft_axis_off', (['ax'], {}), '(ax)\n', (18359, 18363), False, 'from giskard.plot import soft_axis_off, axis_on\n'), ((2558, 2577), 'graspologic.utils.augment_diagonal', 'augment_diagonal', (['a'], {}), '(a)\n', (2574, 2577), False, 'from graspologic.utils import augment_diagonal, binarize, is_fully_connected, multigraph_lcc_intersection, pass_to_ranks, to_laplacian\n'), ((3564, 3615), 'graspologic.embed.AdjacencySpectralEmbed', 'AdjacencySpectralEmbed', ([], {'n_components': '(8)', 'concat': '(True)'}), '(n_components=8, concat=True)\n', (3586, 3615), False, 'from graspologic.embed import AdjacencySpectralEmbed, OmnibusEmbed, select_dimension, selectSVD\n'), ((5954, 5966), 'numpy.mean', 'np.mean', (['var'], {}), '(var)\n', (5961, 5966), True, 'import numpy as np\n'), ((6115, 6134), 'numpy.mean', 'np.mean', (['child_vars'], {}), '(child_vars)\n', (6122, 6134), True, 'import numpy as np\n'), ((13685, 13721), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 10)'}), '(1, 1, figsize=(10, 10))\n', (13697, 13721), True, 'import matplotlib.pyplot as plt\n'), ((18111, 18144), 'numpy.argwhere', 'np.argwhere', (['(index == cluster_idx)'], {}), '(index == cluster_idx)\n', (18122, 18144), True, 'import numpy as np\n'), ((2693, 2708), 'graspologic.utils.to_laplacian', 'to_laplacian', (['a'], {}), '(a)\n', (2705, 2708), False, 'from graspologic.utils import augment_diagonal, binarize, is_fully_connected, multigraph_lcc_intersection, pass_to_ranks, to_laplacian\n'), ((4492, 4542), 'numpy.unique', 'np.unique', (['labels1[:, :level]'], {'return_inverse': '(True)'}), '(labels1[:, :level], return_inverse=True)\n', (4501, 4542), True, 'import numpy as np\n'), ((4577, 4627), 'numpy.unique', 'np.unique', (['labels2[:, :level]'], {'return_inverse': '(True)'}), '(labels2[:, :level], return_inverse=True)\n', (4586, 4627), True, 'import numpy as np\n'), ((4650, 4697), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['flat_labels1', 'flat_labels2'], {}), '(flat_labels1, flat_labels2)\n', (4669, 4697), False, 'from sklearn.metrics import adjusted_rand_score, rand_score\n'), ((5082, 5120), 'sklearn.metrics.rand_score', 'rand_score', (['flat_labels1', 'flat_labels2'], {}), '(flat_labels1, flat_labels2)\n', (5092, 5120), False, 'from sklearn.metrics import adjusted_rand_score, rand_score\n')] |
#coding=utf-8
import pandas as pd
import glob
import numpy as np
def accumulate():
global num
num+=1
df1 = pd.read_csv(filename)
coef1=df2.iat[num-1,2]
coef2=df2.iat[num-1,3]
pre=coef1*df1['Pre']
tmp=coef2*df1['Tmp']
pre_list.append(np.std(pre))
tmp_list.append(np.std(tmp))
maize_list.append(np.std(df1['Value']))
if __name__ == '__main__':
base_dir = r'F:\crop-climate\maize&cru\linear-additive\*.csv'
filelist = glob.glob(base_dir)
pre_list = []
tmp_list = []
maize_list = []
num=0
x_ref=59
df2 = pd.read_csv(r'F:\crop-climate\regression\mlr\linear-additive.csv',index_col=False)#读回归系数
result_file = r'F:\crop-climate\IAV2.csv'
fresult = open(result_file, 'w')
cols_result = ['lat','pre','pre_std','tmp','tmp_std']
fresult.write(','.join(cols_result) + '\n')#写第一列的标签
for filename in filelist:
grid_id=filename[-10:-4]
x=int(grid_id)//1000
if x<=x_ref:#添加元素
accumulate()
else:#计算平均值,写入文件,并开始新一轮的累加
lat=89.75-(x_ref-4.5)/2
m1=np.mean(pre_list)
m2=np.mean(tmp_list)
u1=np.std(pre_list)
u2=np.std(tmp_list)
line= '%f,%f,%f,%f,%f,' % (lat,m1,u1,m2,u2)
fresult.write(line + '\n')
x_ref+=10
pre_list=[]
tmp_list=[]
accumulate()
lat=89.75-(x_ref-4.5)/2#最后一个维度写入文件
m1=np.mean(pre_list)
m2=np.mean(tmp_list)
u1=np.std(pre_list)
u2=np.std(tmp_list)
line= '%f,%f,%f,%f,%f,' % (lat,m1,u1,m2,u2)
fresult.write(line + '\n')
fresult.close()
print(np.mean(maize_list))
| [
"numpy.mean",
"glob.glob",
"pandas.read_csv",
"numpy.std"
] | [((130, 151), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (141, 151), True, 'import pandas as pd\n'), ((500, 519), 'glob.glob', 'glob.glob', (['base_dir'], {}), '(base_dir)\n', (509, 519), False, 'import glob\n'), ((615, 705), 'pandas.read_csv', 'pd.read_csv', (['"""F:\\\\crop-climate\\\\regression\\\\mlr\\\\linear-additive.csv"""'], {'index_col': '(False)'}), "('F:\\\\crop-climate\\\\regression\\\\mlr\\\\linear-additive.csv',\n index_col=False)\n", (626, 705), True, 'import pandas as pd\n'), ((1531, 1548), 'numpy.mean', 'np.mean', (['pre_list'], {}), '(pre_list)\n', (1538, 1548), True, 'import numpy as np\n'), ((1557, 1574), 'numpy.mean', 'np.mean', (['tmp_list'], {}), '(tmp_list)\n', (1564, 1574), True, 'import numpy as np\n'), ((1583, 1599), 'numpy.std', 'np.std', (['pre_list'], {}), '(pre_list)\n', (1589, 1599), True, 'import numpy as np\n'), ((1608, 1624), 'numpy.std', 'np.std', (['tmp_list'], {}), '(tmp_list)\n', (1614, 1624), True, 'import numpy as np\n'), ((283, 294), 'numpy.std', 'np.std', (['pre'], {}), '(pre)\n', (289, 294), True, 'import numpy as np\n'), ((317, 328), 'numpy.std', 'np.std', (['tmp'], {}), '(tmp)\n', (323, 328), True, 'import numpy as np\n'), ((353, 373), 'numpy.std', 'np.std', (["df1['Value']"], {}), "(df1['Value'])\n", (359, 373), True, 'import numpy as np\n'), ((1738, 1757), 'numpy.mean', 'np.mean', (['maize_list'], {}), '(maize_list)\n', (1745, 1757), True, 'import numpy as np\n'), ((1153, 1170), 'numpy.mean', 'np.mean', (['pre_list'], {}), '(pre_list)\n', (1160, 1170), True, 'import numpy as np\n'), ((1187, 1204), 'numpy.mean', 'np.mean', (['tmp_list'], {}), '(tmp_list)\n', (1194, 1204), True, 'import numpy as np\n'), ((1221, 1237), 'numpy.std', 'np.std', (['pre_list'], {}), '(pre_list)\n', (1227, 1237), True, 'import numpy as np\n'), ((1254, 1270), 'numpy.std', 'np.std', (['tmp_list'], {}), '(tmp_list)\n', (1260, 1270), True, 'import numpy as np\n')] |
import os
import numpy as np
import pandas as pd
import torch
from data_management import IPDataset
from operators import (
Fourier,
RadialMaskFunc,
TVAnalysisPeriodic,
noise_gaussian,
to_complex,
unprep_fft_channel,
)
from reconstruction_methods import admm_l1_rec_diag, grid_search
# ----- load configuration -----
import config # isort:skip
# ------ setup ----------
device = torch.device("cuda")
file_name = "grid_search_l1_fourier_"
save_path = os.path.join(config.RESULTS_PATH, "grid_search_l1")
# ----- operators --------
mask_func = RadialMaskFunc(config.n, 40)
mask = unprep_fft_channel(mask_func((1, 1) + config.n + (1,)))
OpA = Fourier(mask)
OpTV = TVAnalysisPeriodic(config.n, device=device)
# ----- load test data --------
samples = range(50, 100)
test_data = IPDataset("test", config.DATA_PATH)
X_0 = torch.stack([test_data[s][0] for s in samples])
X_0 = to_complex(X_0.to(device))
# ----- noise setup --------
noise_min = 1e-3
noise_max = 0.08
noise_steps = 50
noise_rel = torch.tensor(
np.logspace(np.log10(noise_min), np.log10(noise_max), num=noise_steps)
).float()
# add extra noise levels 0.00 and 0.16 for tabular evaluation
noise_rel = (
torch.cat(
[torch.zeros(1).float(), noise_rel, 0.16 * torch.ones(1).float()]
)
.float()
.to(device)
)
def meas_noise(y, noise_level):
return noise_gaussian(y, noise_level)
# ----- set up reconstruction method and grid params --------
def _reconstruct(y, lam, rho):
x, _ = admm_l1_rec_diag(
y,
OpA,
OpTV,
OpA.adj(y),
OpTV(OpA.adj(y)),
lam,
rho,
iter=1000,
silent=True,
)
return x
# parameter search grid
grid = {
"lam": np.logspace(-6, -1, 25),
"rho": np.logspace(-5, 1, 25),
}
def combine_results():
results = pd.DataFrame(
columns=["noise_rel", "grid_param", "err_min", "grid", "err"]
)
for idx in range(len(noise_rel)):
results_cur = pd.read_pickle(
os.path.join(save_path, file_name + str(idx) + ".pkl")
)
results.loc[idx] = results_cur.loc[idx]
os.makedirs(save_path, exist_ok=True)
results.to_pickle(os.path.join(save_path, file_name + "all.pkl"))
return results
# ------ perform grid search ---------
if __name__ == "__main__":
idx_noise = (int(os.environ.get("SGE_TASK_ID")) - 1,)
for idx in idx_noise:
noise_level = noise_rel[idx] * OpA(X_0).norm(
p=2, dim=(-2, -1), keepdim=True
)
Y_ref = meas_noise(OpA(X_0), noise_level)
grid_param, err_min, err = grid_search(X_0, Y_ref, _reconstruct, grid)
results = pd.DataFrame(
columns=["noise_rel", "grid_param", "err_min", "grid", "err"]
)
results.loc[idx] = {
"noise_rel": noise_rel[idx],
"grid_param": grid_param,
"err_min": err_min,
"grid": grid,
"err": err,
}
os.makedirs(save_path, exist_ok=True)
results.to_pickle(
os.path.join(save_path, file_name + str(idx) + ".pkl")
)
| [
"reconstruction_methods.grid_search",
"operators.noise_gaussian",
"numpy.log10",
"os.makedirs",
"torch.ones",
"operators.TVAnalysisPeriodic",
"torch.stack",
"os.path.join",
"operators.Fourier",
"data_management.IPDataset",
"os.environ.get",
"operators.RadialMaskFunc",
"pandas.DataFrame",
"... | [((409, 429), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (421, 429), False, 'import torch\n'), ((481, 532), 'os.path.join', 'os.path.join', (['config.RESULTS_PATH', '"""grid_search_l1"""'], {}), "(config.RESULTS_PATH, 'grid_search_l1')\n", (493, 532), False, 'import os\n'), ((573, 601), 'operators.RadialMaskFunc', 'RadialMaskFunc', (['config.n', '(40)'], {}), '(config.n, 40)\n', (587, 601), False, 'from operators import Fourier, RadialMaskFunc, TVAnalysisPeriodic, noise_gaussian, to_complex, unprep_fft_channel\n'), ((671, 684), 'operators.Fourier', 'Fourier', (['mask'], {}), '(mask)\n', (678, 684), False, 'from operators import Fourier, RadialMaskFunc, TVAnalysisPeriodic, noise_gaussian, to_complex, unprep_fft_channel\n'), ((692, 735), 'operators.TVAnalysisPeriodic', 'TVAnalysisPeriodic', (['config.n'], {'device': 'device'}), '(config.n, device=device)\n', (710, 735), False, 'from operators import Fourier, RadialMaskFunc, TVAnalysisPeriodic, noise_gaussian, to_complex, unprep_fft_channel\n'), ((806, 841), 'data_management.IPDataset', 'IPDataset', (['"""test"""', 'config.DATA_PATH'], {}), "('test', config.DATA_PATH)\n", (815, 841), False, 'from data_management import IPDataset\n'), ((848, 895), 'torch.stack', 'torch.stack', (['[test_data[s][0] for s in samples]'], {}), '([test_data[s][0] for s in samples])\n', (859, 895), False, 'import torch\n'), ((1368, 1398), 'operators.noise_gaussian', 'noise_gaussian', (['y', 'noise_level'], {}), '(y, noise_level)\n', (1382, 1398), False, 'from operators import Fourier, RadialMaskFunc, TVAnalysisPeriodic, noise_gaussian, to_complex, unprep_fft_channel\n'), ((1740, 1763), 'numpy.logspace', 'np.logspace', (['(-6)', '(-1)', '(25)'], {}), '(-6, -1, 25)\n', (1751, 1763), True, 'import numpy as np\n'), ((1776, 1798), 'numpy.logspace', 'np.logspace', (['(-5)', '(1)', '(25)'], {}), '(-5, 1, 25)\n', (1787, 1798), True, 'import numpy as np\n'), ((1841, 1916), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['noise_rel', 'grid_param', 'err_min', 'grid', 'err']"}), "(columns=['noise_rel', 'grid_param', 'err_min', 'grid', 'err'])\n", (1853, 1916), True, 'import pandas as pd\n'), ((2137, 2174), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (2148, 2174), False, 'import os\n'), ((2197, 2243), 'os.path.join', 'os.path.join', (['save_path', "(file_name + 'all.pkl')"], {}), "(save_path, file_name + 'all.pkl')\n", (2209, 2243), False, 'import os\n'), ((2613, 2656), 'reconstruction_methods.grid_search', 'grid_search', (['X_0', 'Y_ref', '_reconstruct', 'grid'], {}), '(X_0, Y_ref, _reconstruct, grid)\n', (2624, 2656), False, 'from reconstruction_methods import admm_l1_rec_diag, grid_search\n'), ((2676, 2751), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['noise_rel', 'grid_param', 'err_min', 'grid', 'err']"}), "(columns=['noise_rel', 'grid_param', 'err_min', 'grid', 'err'])\n", (2688, 2751), True, 'import pandas as pd\n'), ((2983, 3020), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (2994, 3020), False, 'import os\n'), ((1052, 1071), 'numpy.log10', 'np.log10', (['noise_min'], {}), '(noise_min)\n', (1060, 1071), True, 'import numpy as np\n'), ((1073, 1092), 'numpy.log10', 'np.log10', (['noise_max'], {}), '(noise_max)\n', (1081, 1092), True, 'import numpy as np\n'), ((2356, 2385), 'os.environ.get', 'os.environ.get', (['"""SGE_TASK_ID"""'], {}), "('SGE_TASK_ID')\n", (2370, 2385), False, 'import os\n'), ((1221, 1235), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (1232, 1235), False, 'import torch\n'), ((1263, 1276), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (1273, 1276), False, 'import torch\n')] |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import itertools
import six
import numpy as np
from tqdm import tqdm
from lxml import etree
# pylint: disable=old-style-class
class ImageAnnotation:
"""
Class for image annotation
"""
def __init__(self, image_path, objects=None, ignore_regs=None):
self.image_path = image_path
self.objects = objects if objects else []
self.ignore_regs = ignore_regs if ignore_regs else []
def __len__(self):
return len(self.objects)
def __getitem__(self, item):
return self.objects[item]
def bbox_to_string(bbox):
""" Store bbox coordinated to string"""
return ' '.join([str(int(float(coord))) for coord in bbox])
# pylint: disable=invalid-name
def is_empty_bbox(x, y, w, h):
""" Check if the bbox is empty """
bbox = np.asarray([x, y, w, h])
return np.any(bbox == -1)
def write_annotation(annotation, filename, is_canonical=False):
"""
Write annotation
"""
root = etree.Element('opencv_storage')
for frame in tqdm(sorted(annotation, key=lambda x: annotation[x].image_path), desc='Converting to ' + filename):
image = etree.SubElement(root, 'image' + str(frame).zfill(6))
if not is_canonical:
image_path = etree.SubElement(image, 'path')
image_path.text = annotation[frame].image_path
if annotation[frame].ignore_regs:
ignore_regions = etree.SubElement(image, 'ignore_reg')
ignore_regions.text = " ".join(
map(str, [" ".join(map(str, val)) for val in annotation[frame].ignore_regs]))
canonical_tags = ("bbox", "type", "quality", "id", "visibility")
for obj_idx, obj in enumerate(annotation[frame]):
obj_element = etree.SubElement(image, 'object' + str(obj.get("id", obj_idx)).zfill(6))
for key, value in six.iteritems(obj):
if not is_canonical or key in canonical_tags:
obj_feature = etree.SubElement(obj_element, key)
obj_feature.text = str(value)
with open(filename, 'wb') as output:
output.write(etree.tostring(root, pretty_print=True, xml_declaration=True, encoding='utf-8'))
def read_object_info(xml_root):
"""
Read objects
"""
obj = {}
tags = np.unique([element.tag for element in xml_root])
for tag in tags:
values = [x.text for x in xml_root.findall(tag)]
if len(values) == 1:
values = values[0]
obj[tag] = values
return obj
def convert_object_info(converters, obj_info):
"""
Convert object information
"""
for key, transform in converters.items():
if key in obj_info:
obj_info[key] = transform(obj_info[key])
return obj_info
def chunkwise(t, size=2):
""" Get a chunk """
it = iter(t)
return zip(*[it]*size)
def read_regions(text):
""" Read regions """
if text is None:
return None
return [list(val) for val in list(chunkwise(list(map(int, text.split(" "))), 4))]
def read_annotation(filename):
"""
Read annotation
"""
tree = etree.parse(filename)
root = tree.getroot()
objects = {}
for frame in tqdm(root, desc='Reading ' + filename):
current_objects = []
for obj in frame:
if obj.tag.startswith("object"):
current_objects.append(read_object_info(obj))
path_tag = frame.find("path")
if path_tag is not None:
image_path = path_tag.text
else:
image_path = frame.tag
ignore_reg_tag = frame.find("ignore_reg")
if ignore_reg_tag is not None:
ignore_regions = read_regions(ignore_reg_tag.text)
else:
ignore_regions = None
frame_id = int(re.search(r"image([0-9]+)", frame.tag).group(1))
objects[frame_id] = ImageAnnotation(image_path, current_objects, ignore_regions)
return objects
| [
"lxml.etree.Element",
"numpy.unique",
"lxml.etree.SubElement",
"lxml.etree.parse",
"numpy.asarray",
"tqdm.tqdm",
"numpy.any",
"six.iteritems",
"lxml.etree.tostring",
"re.search"
] | [((1376, 1400), 'numpy.asarray', 'np.asarray', (['[x, y, w, h]'], {}), '([x, y, w, h])\n', (1386, 1400), True, 'import numpy as np\n'), ((1412, 1430), 'numpy.any', 'np.any', (['(bbox == -1)'], {}), '(bbox == -1)\n', (1418, 1430), True, 'import numpy as np\n'), ((1545, 1576), 'lxml.etree.Element', 'etree.Element', (['"""opencv_storage"""'], {}), "('opencv_storage')\n", (1558, 1576), False, 'from lxml import etree\n'), ((2873, 2921), 'numpy.unique', 'np.unique', (['[element.tag for element in xml_root]'], {}), '([element.tag for element in xml_root])\n', (2882, 2921), True, 'import numpy as np\n'), ((3698, 3719), 'lxml.etree.parse', 'etree.parse', (['filename'], {}), '(filename)\n', (3709, 3719), False, 'from lxml import etree\n'), ((3781, 3819), 'tqdm.tqdm', 'tqdm', (['root'], {'desc': "('Reading ' + filename)"}), "(root, desc='Reading ' + filename)\n", (3785, 3819), False, 'from tqdm import tqdm\n'), ((1820, 1851), 'lxml.etree.SubElement', 'etree.SubElement', (['image', '"""path"""'], {}), "(image, 'path')\n", (1836, 1851), False, 'from lxml import etree\n'), ((2437, 2455), 'six.iteritems', 'six.iteritems', (['obj'], {}), '(obj)\n', (2450, 2455), False, 'import six\n'), ((2701, 2780), 'lxml.etree.tostring', 'etree.tostring', (['root'], {'pretty_print': '(True)', 'xml_declaration': '(True)', 'encoding': '"""utf-8"""'}), "(root, pretty_print=True, xml_declaration=True, encoding='utf-8')\n", (2715, 2780), False, 'from lxml import etree\n'), ((1991, 2028), 'lxml.etree.SubElement', 'etree.SubElement', (['image', '"""ignore_reg"""'], {}), "(image, 'ignore_reg')\n", (2007, 2028), False, 'from lxml import etree\n'), ((2553, 2587), 'lxml.etree.SubElement', 'etree.SubElement', (['obj_element', 'key'], {}), '(obj_element, key)\n', (2569, 2587), False, 'from lxml import etree\n'), ((4368, 4405), 're.search', 're.search', (['"""image([0-9]+)"""', 'frame.tag'], {}), "('image([0-9]+)', frame.tag)\n", (4377, 4405), False, 'import re\n')] |
# -*- coding: utf-8 -*-
"""MexicoData.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Hqi63fZVwhGI1iNAEi8GMywCf2amMLNf
"""
import pandas as pd
df = pd.read_csv("/content/200918COVID19MEXICO.csv", encoding = "ISO-8859-1")
df.head(10)
df = df.rename(columns = {'FECHA_ACTUALIZACION': 'date',
'ID_REGISTRO':'id',
'ORIGEN': 'origin',
'SECTOR': 'sector',
'ENTIDAD_UM': 'care_unit_location',
'SEXO': 'sex',
'ENTIDAD_NAC': 'birth_state',
'ENTIDAD_RES': 'residence',
'MUNICIPIO_RES': 'city',
'TIPO_PACIENTE': 'patient_type',
'FECHA_INGRESO': 'entry_date',
'FECHA_SINTOMAS': 'date_begin_symptoms',
'FECHA_DEF': 'date_death',
'INTUBADO': 'intubed',
'NEUMONIA': 'pneumonia',
'EDAD': 'age',
'NACIONALIDAD': 'nationality',
'EMBARAZO': 'pregnancy',
'HABLA_LENGUA_INDIG': 'speaks_indig_language',
'DIABETES': 'diabete',
'EPOC': 'COPD',
"RESULTADO":"result",
"OTRO_CASO":"cov_contact",
"TABAQUISMO":"tobacco",
"RENAL_CRONICA":"chronic_kindney",
"OBESIDAD":"obesity",
"CARDIOVASCULAR":"cardiovascular",
"OTRA_COM":"other_diseases",
"HIPERTENSION":"hypertension",
"INMUSUPR":"immunosuppression",
"ASMA":"asthma",
"MIGRANTE":"migrante",
"PAIS_NACIONALIDAD":"nationality",
"PAIS_ORIGEN":"departure",
'UCI': 'ICU', })
df = df.drop(columns=['date','origin','sector','care_unit_location','birth_state','residence','city',
'patient_type','entry_date','date_begin_symptoms','nationality','speaks_indig_language',
'migrante','nationality','departure'])
df.columns
df.head(10)
filtered = df.loc[(df['intubed'] < 97) &
(df['pneumonia'] < 97) &
(df['pregnancy'] < 97) &
(df['diabete'] < 97) &
(df['COPD'] < 97) &
(df['asthma'] < 97) &
(df['immunosuppression'] < 97) &
(df['hypertension'] < 97) &
(df['other_diseases'] < 97) &
(df['cardiovascular'] < 97) &
(df['obesity'] < 97) &
(df['chronic_kindney'] < 97) &
(df['tobacco'] < 97) &
(df['cov_contact'] < 3) &
(df['result'] < 3) &
(df['ICU'] < 97)]
filtered.loc[df.date_death == '9999-99-99', 'death'] = 1
filtered.loc[df.date_death != '9999-99-99', 'death'] = 2
filtered = filtered.drop(columns=['date_death'])
filtered["death"] = filtered["death"].astype(int)
# filtered.dtypes
filtered.reset_index(drop=True, inplace=True)
filtered.loc[:, (filtered.columns != 'age') & (filtered.columns != 'id')] = filtered.loc[:, (filtered.columns != 'age') & (filtered.columns != 'id')].apply(lambda col: col - 1)
filtered.head(10)
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
X = filtered.loc[:, (filtered.columns != 'id') & (filtered.columns != 'result')]
y = filtered.loc[:, filtered.columns == 'result']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
logisticRegr = LogisticRegression()
logisticRegr.fit(X_train, y_train)
predictions = logisticRegr.predict(X_test)
score = logisticRegr.score(X_test, y_test)
print('Accuracy of logistic regression classifier on test set: {:.4f}'.format(score))
from sklearn.metrics import classification_report
print(classification_report(y_test, predictions))
import pickle
pickle.dump(logisticRegr, open("/content/preconditionLogReg.pkl", "wb"))
logisticRegr = pickle.load(open("/content/preconditionLogReg.pkl", "rb"))
score = logisticRegr.score(X_test, y_test)
print(score)
from sklearn import svm
import numpy as np
X = filtered.loc[:, (filtered.columns != 'id') & (filtered.columns != 'result')]
y = filtered.loc[:, filtered.columns == 'result']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
svm_model = svm.SVC()
svm_model.fit(X_train, np.ravel(y_train))
predictions = svm_model.predict(X_test)
score = svm_model.score(X_test, y_test)
print('Accuracy of svm classifier on test set: {:.4f}'.format(score))
from sklearn.metrics import classification_report
print(classification_report(y_test, predictions))
def precondition_risk(x):
preconditionLogisticRegr = pickle.load(open("model/preconditionLogReg.pkl", "rb"))
return preconditionLogisticRegr.predict_proba([x])[0][1]
precondition_risk(X_test.loc[0,:])
| [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.classification_report",
"sklearn.linear_model.LogisticRegression",
"numpy.ravel",
"sklearn.svm.SVC"
] | [((223, 293), 'pandas.read_csv', 'pd.read_csv', (['"""/content/200918COVID19MEXICO.csv"""'], {'encoding': '"""ISO-8859-1"""'}), "('/content/200918COVID19MEXICO.csv', encoding='ISO-8859-1')\n", (234, 293), True, 'import pandas as pd\n'), ((4181, 4235), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.33)', 'random_state': '(0)'}), '(X, y, test_size=0.33, random_state=0)\n', (4197, 4235), False, 'from sklearn.model_selection import train_test_split\n'), ((4252, 4272), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (4270, 4272), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5009, 5063), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.33)', 'random_state': '(0)'}), '(X, y, test_size=0.33, random_state=0)\n', (5025, 5063), False, 'from sklearn.model_selection import train_test_split\n'), ((5077, 5086), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (5084, 5086), False, 'from sklearn import svm\n'), ((4536, 4578), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (4557, 4578), False, 'from sklearn.metrics import classification_report\n'), ((5110, 5127), 'numpy.ravel', 'np.ravel', (['y_train'], {}), '(y_train)\n', (5118, 5127), True, 'import numpy as np\n'), ((5336, 5378), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (5357, 5378), False, 'from sklearn.metrics import classification_report\n')] |
#! /bin/env python
import sys, os
import PIL
import numpy as np
from scipy import ndimage, misc
from copy import copy
from matplotlib.pyplot import plot, show
# Module to compute the area of an irregulare, closed, convex polygon.
# The x and y vectors are the vertices ; the last should equal the first
def poly_area(x,y):
area = 0.0
nx = len(x)
for i,x_i in enumerate(x):
if (i == nx-1): break
area += (x_i*y[i+1] - x[i+1]*y[i])*0.5
return abs(area)
# Module to compute chain code of a feature's contour
# (represented by its pixel value) on a image.
# Inputs:
# image - 2d numpy array
# pixel_value - scalar containing the pixel value
# of feature on the image.
# Outputs:
# chaincode, locations - String containing the chain code
# of the feature's contour,
# and locations [X,Y] of the contour's pixels
#
def image2chain(image,pixel_value,
FILL_HOLES=False,
REMOVE_ISOLATED_PIXELS=False,
VERBOSE=True):
ardir = np.array([[-1,0],[-1,1],[0,1],[1,1],[1,0],[1,-1],[0,-1],[-1,-1]])
ccdir = np.array([0,7,6,5,4,3,2,1])
if (image.ndim != 2):
print ("Error: input array must have 2 dimensions!")
return "",[]
nX = image.shape[0] ; nY = image.shape[1]
n = nX*nY
mask = (image == pixel_value)
if (REMOVE_ISOLATED_PIXELS): mask = closerec(mask)
if (FILL_HOLES): mask = fill_holes(mask)
# Find location of the starting pixel [X,Y]
# It must be the leftmost-uppermost pixel belonging to the feature
indices = np.where(mask)
cc_x_pix = min(indices[0])
cc_y_pix = max(indices[1][np.where(indices[0] == cc_x_pix)])
chaincode="" ; locations = []
xpix = int(cc_x_pix) ; ypix = int(cc_y_pix)
loop = True ; niter=0
while (loop):
for i,direction in enumerate(ardir):
x = xpix + direction[0] ; y = ypix + direction[1]
current_ccdir = ccdir[i]
if (mask[x,y]): break
chaincode += str(current_ccdir)
locations.append([xpix,ypix])
# if return to starting pixel, then stop
if ([x,y] == [cc_x_pix,cc_y_pix]):
locations.append([x,y])
loop=False
# assign new pixel position
xpix = x ; ypix = y
# rotate direction vector
ishift = int(np.where(ccdir == (int(current_ccdir)+4)%8)[0])
ardir = np.roll(ardir,7-ishift,axis=0) ; ccdir = np.roll(ccdir,7-ishift)
niter+=1
if (niter > n):
if (VERBOSE): print ("Error: can not compute chain code!")
return "",[]
return chaincode, locations
# Module to compute a feature's contour on a image from its chain code.
# Inputs:
# chain - string containing the chain code
# start_pixel - location [X,Y] of the starting pixel.
#
# Outputs:
# locations - the locations X and Y of the feature's contour.
#
def chain2image(chain,start_pixel=[0,0],
VERBOSE=True,CCLOCKWISE=False):
pix0 = np.array(start_pixel)
ardir = np.array([[-1,0],[-1,1],[0,1],[1,1],[1,0],[1,-1],[0,-1],[-1,-1]])
ccdir = np.array([0,7,6,5,4,3,2,1])
if (CCLOCKWISE):ardir[:,1] = -ardir[:,1]
X = [pix0[0]] ; Y = [pix0[1]]
for direction in chain:
X.append(np.int64(X[-1]) + ardir[int(direction)][0])
Y.append(np.int64(Y[-1]) + ardir[int(direction)][1])
return X,Y
# Module to apply a closing reconstruction operator on the input binary image
def closerec(image,open_structure=None,close_structure=None):
# Multiple erosion operations
if not (isinstance(image[0,0],np.bool_)):
binary_image = image > 0
else:
binary_image = image.copy()
current_image1 = binary_image.copy()
while (True in current_image1):
current_image0 = current_image1.copy()
current_image1 = ndimage.morphology.binary_erosion(current_image0,
structure=open_structure)
# Multiple dilatation operations
reconstruct_image = ndimage.morphology.binary_propagation(
current_image0,structure=close_structure,mask=binary_image)
return reconstruct_image
# Apply ndimage.morphology.fill_holes module on the input binary image
def fill_holes(image,structure=None):
# Multiple erosion operations
if not (isinstance(image[0,0],np.bool_)):
binary_image = image > 0
else:
binary_image = image.copy()
filled_image = ndimage.morphology.binary_fill_holes(binary_image,
structure=structure)
return filled_image
# Module to adjust automatically the contrast of an image using
# the histogram of its pixel's values.
def auto_contrast(image,low=0.02,high=0.99):
max_val = np.max(image)
min_val = np.min(image)
imb = misc.bytescale(image)
xh = np.array(range(257))
histo = np.histogram(imb,bins=xh)
xh = histo[1][0:-1]
yh = histo[0]
# plot(xh,yh)
# show()
yhtot = np.sum(yh)
# Get mininmum level
yh_i = 0.0 ; i=0
if (low <= 0.0):
lev_min = min_val
else:
while (yh_i < low*yhtot):
yh_i += yh[i]
i += 1
lev_min = (max_val - min_val)*(float(xh[i-1])/255.0) + min_val
# Get maximum level
yh_i = 0.0 ; i=0
if (high >= 1.0):
lev_max = max_val
else:
while (yh_i < high*yhtot):
yh_i += yh[i]
i += 1
lev_max = (max_val - min_val)*(float(xh[i-1])/255.0) + min_val
il = np.where(image <= lev_min)
image[il] = lev_min
ih = np.where(image >= lev_max)
image[ih] = lev_max
return image
| [
"numpy.histogram",
"numpy.int64",
"numpy.roll",
"numpy.where",
"scipy.ndimage.morphology.binary_propagation",
"numpy.max",
"numpy.array",
"numpy.sum",
"scipy.misc.bytescale",
"numpy.min",
"scipy.ndimage.morphology.binary_fill_holes",
"scipy.ndimage.morphology.binary_erosion"
] | [((1109, 1194), 'numpy.array', 'np.array', (['[[-1, 0], [-1, 1], [0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1]]'], {}), '([[-1, 0], [-1, 1], [0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1]]\n )\n', (1117, 1194), True, 'import numpy as np\n'), ((1187, 1221), 'numpy.array', 'np.array', (['[0, 7, 6, 5, 4, 3, 2, 1]'], {}), '([0, 7, 6, 5, 4, 3, 2, 1])\n', (1195, 1221), True, 'import numpy as np\n'), ((1658, 1672), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (1666, 1672), True, 'import numpy as np\n'), ((3110, 3131), 'numpy.array', 'np.array', (['start_pixel'], {}), '(start_pixel)\n', (3118, 3131), True, 'import numpy as np\n'), ((3144, 3229), 'numpy.array', 'np.array', (['[[-1, 0], [-1, 1], [0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1]]'], {}), '([[-1, 0], [-1, 1], [0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1]]\n )\n', (3152, 3229), True, 'import numpy as np\n'), ((3222, 3256), 'numpy.array', 'np.array', (['[0, 7, 6, 5, 4, 3, 2, 1]'], {}), '([0, 7, 6, 5, 4, 3, 2, 1])\n', (3230, 3256), True, 'import numpy as np\n'), ((4160, 4264), 'scipy.ndimage.morphology.binary_propagation', 'ndimage.morphology.binary_propagation', (['current_image0'], {'structure': 'close_structure', 'mask': 'binary_image'}), '(current_image0, structure=\n close_structure, mask=binary_image)\n', (4197, 4264), False, 'from scipy import ndimage, misc\n'), ((4599, 4670), 'scipy.ndimage.morphology.binary_fill_holes', 'ndimage.morphology.binary_fill_holes', (['binary_image'], {'structure': 'structure'}), '(binary_image, structure=structure)\n', (4635, 4670), False, 'from scipy import ndimage, misc\n'), ((4915, 4928), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4921, 4928), True, 'import numpy as np\n'), ((4943, 4956), 'numpy.min', 'np.min', (['image'], {}), '(image)\n', (4949, 4956), True, 'import numpy as np\n'), ((4967, 4988), 'scipy.misc.bytescale', 'misc.bytescale', (['image'], {}), '(image)\n', (4981, 4988), False, 'from scipy import ndimage, misc\n'), ((5031, 5057), 'numpy.histogram', 'np.histogram', (['imb'], {'bins': 'xh'}), '(imb, bins=xh)\n', (5043, 5057), True, 'import numpy as np\n'), ((5148, 5158), 'numpy.sum', 'np.sum', (['yh'], {}), '(yh)\n', (5154, 5158), True, 'import numpy as np\n'), ((5685, 5711), 'numpy.where', 'np.where', (['(image <= lev_min)'], {}), '(image <= lev_min)\n', (5693, 5711), True, 'import numpy as np\n'), ((5745, 5771), 'numpy.where', 'np.where', (['(image >= lev_max)'], {}), '(image >= lev_max)\n', (5753, 5771), True, 'import numpy as np\n'), ((2488, 2522), 'numpy.roll', 'np.roll', (['ardir', '(7 - ishift)'], {'axis': '(0)'}), '(ardir, 7 - ishift, axis=0)\n', (2495, 2522), True, 'import numpy as np\n'), ((2529, 2555), 'numpy.roll', 'np.roll', (['ccdir', '(7 - ishift)'], {}), '(ccdir, 7 - ishift)\n', (2536, 2555), True, 'import numpy as np\n'), ((3964, 4039), 'scipy.ndimage.morphology.binary_erosion', 'ndimage.morphology.binary_erosion', (['current_image0'], {'structure': 'open_structure'}), '(current_image0, structure=open_structure)\n', (3997, 4039), False, 'from scipy import ndimage, misc\n'), ((1734, 1766), 'numpy.where', 'np.where', (['(indices[0] == cc_x_pix)'], {}), '(indices[0] == cc_x_pix)\n', (1742, 1766), True, 'import numpy as np\n'), ((3380, 3395), 'numpy.int64', 'np.int64', (['X[-1]'], {}), '(X[-1])\n', (3388, 3395), True, 'import numpy as np\n'), ((3441, 3456), 'numpy.int64', 'np.int64', (['Y[-1]'], {}), '(Y[-1])\n', (3449, 3456), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torch import nn
def batch_generator(texts, iterations=1, batch_size=32, device=None):
np.random.shuffle(texts)
all_batches_number = len(texts) // batch_size
if iterations == 0:
iterations = all_batches_number
print('Process {} iterations. {} available.'.format(iterations, all_batches_number))
for _ in range(iterations):
resulted_batch = []
for _ in range(batch_size):
anchor_idx = torch.tensor(np.random.choice(len(texts), size=1)[0])
anchor_text = texts[anchor_idx]
resulted_batch.append(torch.tensor(anchor_text))
if device is not None:
yield torch.tensor(nn.utils.rnn.pad_sequence(resulted_batch)).long().transpose(0, 1).to(device)
else:
yield torch.tensor(nn.utils.rnn.pad_sequence(resulted_batch)).long().transpose(0, 1)
def neg_batch_generator(texts, neg_size=10, batch_size=32, device=None):
while True:
resulted_batch = []
for _ in range(batch_size):
anchor_idx = torch.tensor(np.random.choice(len(texts), size=neg_size))
anchor_text = texts[anchor_idx]
resulted_batch.append(torch.tensor(anchor_text))
if device is not None:
yield torch.tensor(nn.utils.rnn.pad_sequence(resulted_batch)).long().transpose(0, 1).to(device)
else:
yield torch.tensor(nn.utils.rnn.pad_sequence(resulted_batch)).long().transpose(0, 1) | [
"torch.tensor",
"torch.nn.utils.rnn.pad_sequence",
"numpy.random.shuffle"
] | [((129, 153), 'numpy.random.shuffle', 'np.random.shuffle', (['texts'], {}), '(texts)\n', (146, 153), True, 'import numpy as np\n'), ((612, 637), 'torch.tensor', 'torch.tensor', (['anchor_text'], {}), '(anchor_text)\n', (624, 637), False, 'import torch\n'), ((1207, 1232), 'torch.tensor', 'torch.tensor', (['anchor_text'], {}), '(anchor_text)\n', (1219, 1232), False, 'import torch\n'), ((824, 865), 'torch.nn.utils.rnn.pad_sequence', 'nn.utils.rnn.pad_sequence', (['resulted_batch'], {}), '(resulted_batch)\n', (849, 865), False, 'from torch import nn\n'), ((1419, 1460), 'torch.nn.utils.rnn.pad_sequence', 'nn.utils.rnn.pad_sequence', (['resulted_batch'], {}), '(resulted_batch)\n', (1444, 1460), False, 'from torch import nn\n'), ((702, 743), 'torch.nn.utils.rnn.pad_sequence', 'nn.utils.rnn.pad_sequence', (['resulted_batch'], {}), '(resulted_batch)\n', (727, 743), False, 'from torch import nn\n'), ((1297, 1338), 'torch.nn.utils.rnn.pad_sequence', 'nn.utils.rnn.pad_sequence', (['resulted_batch'], {}), '(resulted_batch)\n', (1322, 1338), False, 'from torch import nn\n')] |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the strategy class."""
import numpy as np
from tensorflow import keras
from aea.configurations.constants import DEFAULT_LEDGER
from aea.helpers.search.generic import (
AGENT_LOCATION_MODEL,
AGENT_REMOVE_SERVICE_MODEL,
AGENT_SET_SERVICE_MODEL,
SIMPLE_DATA_MODEL,
)
from aea.helpers.search.models import Description, Location, Query
from aea.skills.base import Model
DEFAULT_PRICE_PER_DATA_BATCH = 10
DEFAULT_BATCH_SIZE = 32
DEFAULT_SELLER_TX_FEE = 0
DEFAULT_BUYER_TX_FEE = 0
DEFAULT_CURRENCY_PBK = "FET"
DEFAULT_LEDGER_ID = DEFAULT_LEDGER
DEFAULT_LOCATION = {"longitude": 51.5194, "latitude": 0.1270}
DEFAULT_SERVICE_DATA = {"key": "dataset_id", "value": "fmnist"}
class Strategy(Model):
"""This class defines a strategy for the agent."""
def __init__(self, **kwargs) -> None:
"""Initialize the strategy of the agent."""
self.price_per_data_batch = kwargs.pop(
"price_per_data_batch", DEFAULT_PRICE_PER_DATA_BATCH
)
self.batch_size = kwargs.pop("batch_size", DEFAULT_BATCH_SIZE)
self.seller_tx_fee = kwargs.pop("seller_tx_fee", DEFAULT_SELLER_TX_FEE)
self.buyer_tx_fee = kwargs.pop("buyer_tx_fee", DEFAULT_BUYER_TX_FEE)
self.currency_id = kwargs.pop("currency_id", DEFAULT_CURRENCY_PBK)
self._ledger_id = kwargs.pop("ledger_id", DEFAULT_LEDGER_ID)
self._is_ledger_tx = kwargs.pop("is_ledger_tx", False)
location = kwargs.pop("location", DEFAULT_LOCATION)
self._agent_location = {
"location": Location(location["longitude"], location["latitude"])
}
self._set_service_data = kwargs.pop("service_data", DEFAULT_SERVICE_DATA)
assert (
len(self._set_service_data) == 2
and "key" in self._set_service_data
and "value" in self._set_service_data
), "service_data must contain keys `key` and `value`"
self._remove_service_data = {"key": self._set_service_data["key"]}
self._simple_service_data = {
self._set_service_data["key"]: self._set_service_data["value"]
}
super().__init__(**kwargs)
# loading ML dataset
# TODO this should be parametrized
(
(self.train_x, self.train_y),
(self.test_x, self.test_y),
) = keras.datasets.fashion_mnist.load_data()
@property
def ledger_id(self) -> str:
"""Get the ledger id."""
return self._ledger_id
@property
def is_ledger_tx(self) -> str:
"""Get the is_ledger_tx."""
return self._is_ledger_tx
def get_location_description(self) -> Description:
"""
Get the location description.
:return: a description of the agent's location
"""
description = Description(
self._agent_location, data_model=AGENT_LOCATION_MODEL,
)
return description
def get_register_service_description(self) -> Description:
"""
Get the register service description.
:return: a description of the offered services
"""
description = Description(
self._set_service_data, data_model=AGENT_SET_SERVICE_MODEL,
)
return description
def get_service_description(self) -> Description:
"""
Get the simple service description.
:return: a description of the offered services
"""
description = Description(
self._simple_service_data, data_model=SIMPLE_DATA_MODEL,
)
return description
def get_unregister_service_description(self) -> Description:
"""
Get the unregister service description.
:return: a description of the to be removed service
"""
description = Description(
self._remove_service_data, data_model=AGENT_REMOVE_SERVICE_MODEL,
)
return description
def sample_data(self, n: int):
"""Sample N rows from data."""
idx = np.arange(self.train_x.shape[0])
mask = np.zeros_like(idx, dtype=bool)
selected = np.random.choice(idx, n, replace=False)
mask[selected] = True
x_sample = self.train_x[mask]
y_sample = self.train_y[mask]
return x_sample, y_sample
def is_matching_supply(self, query: Query) -> bool:
"""
Check if the query matches the supply.
:param query: the query
:return: bool indiciating whether matches or not
"""
service_desc = self.get_service_description()
return query.check(service_desc)
def generate_terms(self) -> Description:
"""
Generate a proposal.
:return: a tuple of proposal and the weather data
"""
address = self.context.agent_addresses[self.ledger_id]
proposal = Description(
{
"batch_size": self.batch_size,
"price": self.price_per_data_batch,
"seller_tx_fee": self.seller_tx_fee,
"buyer_tx_fee": self.buyer_tx_fee,
"currency_id": self.currency_id,
"ledger_id": self.ledger_id,
"address": address,
}
)
return proposal
def is_valid_terms(self, terms: Description) -> bool:
"""
Check the terms are valid.
:param terms: the terms
:return: boolean
"""
return terms == self.generate_terms()
| [
"tensorflow.keras.datasets.fashion_mnist.load_data",
"numpy.random.choice",
"aea.helpers.search.models.Description",
"numpy.zeros_like",
"aea.helpers.search.models.Location",
"numpy.arange"
] | [((3134, 3174), 'tensorflow.keras.datasets.fashion_mnist.load_data', 'keras.datasets.fashion_mnist.load_data', ([], {}), '()\n', (3172, 3174), False, 'from tensorflow import keras\n'), ((3602, 3668), 'aea.helpers.search.models.Description', 'Description', (['self._agent_location'], {'data_model': 'AGENT_LOCATION_MODEL'}), '(self._agent_location, data_model=AGENT_LOCATION_MODEL)\n', (3613, 3668), False, 'from aea.helpers.search.models import Description, Location, Query\n'), ((3931, 4002), 'aea.helpers.search.models.Description', 'Description', (['self._set_service_data'], {'data_model': 'AGENT_SET_SERVICE_MODEL'}), '(self._set_service_data, data_model=AGENT_SET_SERVICE_MODEL)\n', (3942, 4002), False, 'from aea.helpers.search.models import Description, Location, Query\n'), ((4254, 4322), 'aea.helpers.search.models.Description', 'Description', (['self._simple_service_data'], {'data_model': 'SIMPLE_DATA_MODEL'}), '(self._simple_service_data, data_model=SIMPLE_DATA_MODEL)\n', (4265, 4322), False, 'from aea.helpers.search.models import Description, Location, Query\n'), ((4594, 4671), 'aea.helpers.search.models.Description', 'Description', (['self._remove_service_data'], {'data_model': 'AGENT_REMOVE_SERVICE_MODEL'}), '(self._remove_service_data, data_model=AGENT_REMOVE_SERVICE_MODEL)\n', (4605, 4671), False, 'from aea.helpers.search.models import Description, Location, Query\n'), ((4811, 4843), 'numpy.arange', 'np.arange', (['self.train_x.shape[0]'], {}), '(self.train_x.shape[0])\n', (4820, 4843), True, 'import numpy as np\n'), ((4859, 4889), 'numpy.zeros_like', 'np.zeros_like', (['idx'], {'dtype': 'bool'}), '(idx, dtype=bool)\n', (4872, 4889), True, 'import numpy as np\n'), ((4910, 4949), 'numpy.random.choice', 'np.random.choice', (['idx', 'n'], {'replace': '(False)'}), '(idx, n, replace=False)\n', (4926, 4949), True, 'import numpy as np\n'), ((5644, 5891), 'aea.helpers.search.models.Description', 'Description', (["{'batch_size': self.batch_size, 'price': self.price_per_data_batch,\n 'seller_tx_fee': self.seller_tx_fee, 'buyer_tx_fee': self.buyer_tx_fee,\n 'currency_id': self.currency_id, 'ledger_id': self.ledger_id, 'address':\n address}"], {}), "({'batch_size': self.batch_size, 'price': self.\n price_per_data_batch, 'seller_tx_fee': self.seller_tx_fee,\n 'buyer_tx_fee': self.buyer_tx_fee, 'currency_id': self.currency_id,\n 'ledger_id': self.ledger_id, 'address': address})\n", (5655, 5891), False, 'from aea.helpers.search.models import Description, Location, Query\n'), ((2356, 2409), 'aea.helpers.search.models.Location', 'Location', (["location['longitude']", "location['latitude']"], {}), "(location['longitude'], location['latitude'])\n", (2364, 2409), False, 'from aea.helpers.search.models import Description, Location, Query\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat 28/05/2021
@author: Group 1
"""
import os
import folium
from folium.plugins import HeatMap
import branca
import numpy as np
import pandas as pd
import geopandas as gp
from psycopg2 import connect
from folium.plugins import MarkerCluster
from datetime import datetime
import geopy
# connect to database and catch data
conn = connect(host='localhost', port='5432', dbname='iUrbanDB', user='postgres', password='<PASSWORD>')
sql_command = "select * from tdata"
try:
df = pd.read_sql(sql_command, conn)
except:
print("load data from postgres failure !")
exit()
# set some static values
originX=df.longitude[0]
originY=df.latitude[0]
X=df.longitude
Y=df.latitude
humidity=df.humidity
data=df[["latitude", "longitude", "humidity"]]
colorlist = ['#ffffff', '#b6f6ff', '#6eedfe', '#25e4ff', '#00e6f1', '#19ffbe', '#46fe9c',
'#f6fa0f', '#ffde00', '#fbae00', '#ff7f00', '#ff5300', '#fe2200', '#ef0602', '#c20000', '#950101',
'#640002']
def city_map():
city_map=folium.Map(location=[originY, originX], zoom_start=10, disable_3d = True)
city_map.add_child(folium.LatLngPopup())
city_map.save("citymap.html")
return city_map
city_map()
def marker():
markmap=city_map()
colorbar = branca.colormap.StepColormap(colorlist,vmin = df.humidity.min(),vmax = df.humidity.max(), caption= 'humidity')
for i in range(len(X)):
folium.Circle(
location=[Y[i],X[i]],
radius=2,
popup=df.humidity[i],
color=colorbar(df.humidity[i]),
fill=True,
fill_opacity=0.5
).add_to(markmap)
markmap.add_child(folium.LatLngPopup())
markmap.add_child(colorbar)
markmap.save("markmap.html")
return markmap
marker()
def heatmap():
heatmap=city_map()
HeatMap(data).add_to(heatmap)
heatmap.add_child(folium.LatLngPopup())
heatmap.save('heatmap.html')
return heatmap
heatmap()
def clustered():
clustermap=city_map()
marker_cluster = MarkerCluster().add_to(clustermap)
for i in range(len(X)):
folium.Marker(
location=[Y[i], X[i]],
icon=None,
popup=df.humidity[i],
).add_to(marker_cluster)
clustermap.add_child(marker_cluster)
clustermap.add_child(folium.LatLngPopup())
clustermap.save("clustermap.html")
return clustermap
clustered()
def trade():
linedata=df.loc[df['name']=='<NAME>']
linedata=linedata[["date","time","latitude","longitude"]]
linedata['exact_time']=linedata.apply(lambda x:x['date']+" "+x['time'],axis=1)
# date_time=[]
# for i in range(len(linedata)):
# date_time.append([linedata.iloc[i,0]+' '+linedata.iloc[i,1], linedata.iloc[i,2], linedata.iloc[i,3]])
linedata=linedata.drop(['date'] , axis = 1)
linedata=linedata.drop(['time'] , axis = 1)
linedata=np.array(linedata)
linedata=linedata.tolist()
loc=[]
listdata=sorted(linedata,key=lambda t:datetime.strptime(t[2], '%d/%m/%Y %H:%M:%S'))
for i in range(len(linedata)):
loc.append([listdata[i][0],listdata[i][1]])
trademap=city_map()
folium.PolyLine(
loc,
weight=4,
color='red',
opacity=0.8,
).add_to(trademap)
folium.Marker(loc[0], popup='<b>Starting Point</b>').add_to(trademap)
folium.Marker(loc[-1], popup='<b>End Point</b>').add_to(trademap)
trademap.add_child(folium.LatLngPopup())
trademap.save("trademap.html")
return trademap
trade()
def polygon():
poly_map = gp.GeoDataFrame.from_file("selected.shp", encoding='utf-8')
Poly_map = city_map()
# Poly_map.choropleth(
# geo_data=poly_map,
# key_on= 'feature.properties.NAME_1',
# fill_color='Red',
# fill_opacity=0.05,
# line_opacity=0.2)
for _, r in poly_map.iterrows():
# without simplifying the representation of each borough, the map might not be displayed
# sim_geo = gpd.GeoSeries(r['geometry'])
sim_geo = gp.GeoSeries(r['geometry']).simplify(tolerance=0.001)
geo_j = sim_geo.to_json()
geo_j = folium.GeoJson(data=geo_j,
style_function=lambda x: {'fillColor': 'orange'})
folium.Popup(r['NAME_1']).add_to(geo_j)
geo_j.add_to(Poly_map)
colorbar = branca.colormap.StepColormap(colorlist,vmin = df.humidity.min(),vmax = df.humidity.max(), caption= 'humidity')
for i in range(len(X)):
folium.Circle(
location=[Y[i],X[i]],
radius=2,
popup=df.humidity[i],
color=colorbar(df.humidity[i]),
fill=True,
fill_opacity=0.5
).add_to(Poly_map)
position=gp.tools.geocode('Larco Museum', 'Nominatim' , user_agent='myuseragent')
buffer=position.to_crs(epsg=7855).buffer(3000).to_crs(epsg=4326)
folium.GeoJson(buffer).add_to(Poly_map)
folium.GeoJson(position, tooltip=folium.GeoJsonTooltip(['address'])).add_to(Poly_map)
Poly_map.add_child(folium.LatLngPopup())
Poly_map.add_child(colorbar)
Poly_map.save("poly_map.html")
return Poly_map
polygon()
def geo_code():
geocodemap = city_map()
colorbar = branca.colormap.StepColormap(colorlist, vmin=df.humidity.min(), vmax=df.humidity.max(),
caption='humidity')
for i in range(len(X)):
folium.Circle(
location=[Y[i], X[i]],
radius=2,
popup=df.humidity[i],
color=colorbar(df.humidity[i]),
fill=True,
fill_opacity=0.5
).add_to(geocodemap)
position = gp.tools.geocode('Larco Museum', 'Nominatim', user_agent='myuseragent')
position2 = gp.tools.geocode('Larco Museum', 'Nominatim', user_agent='myuseragent')
buffer = position.to_crs(epsg=7855).buffer(3000).to_crs(epsg=4326)
folium.GeoJson(buffer).add_to(Poly_map)
folium.GeoJson(position, tooltip=folium.GeoJsonTooltip(['address'])).add_to(geocodemap)
geocodemap.add_child(folium.LatLngPopup())
geocodemap.add_child(colorbar)
geocodemap.save("geocode_map.html")
return geocodemap
def spatial_join():
# shpjs = convert_toGJ('selected.shp')
poly_map = gp.GeoDataFrame.from_file("selected.shp", encoding='utf-8')
sjmap = city_map()
gdf = gp.GeoDataFrame(df, geometry=gp.points_from_xy(df.longitude, df.latitude))
statistic = gp.sjoin(poly_map, gdf, how='inner', op='intersects')
sjmap.choropleth(
geo_data=statistic,
key_on='feature.properties.NAME_1',
fill_color='Red',
fill_opacity=0.05,
line_opacity=0.2)
sjmap.save('sjmap.html')
return
spatial_join()
| [
"psycopg2.connect",
"folium.PolyLine",
"geopandas.tools.geocode",
"geopandas.sjoin",
"folium.LatLngPopup",
"folium.GeoJson",
"folium.Marker",
"datetime.datetime.strptime",
"geopandas.GeoDataFrame.from_file",
"folium.GeoJsonTooltip",
"folium.Map",
"numpy.array",
"folium.Popup",
"folium.plug... | [((370, 471), 'psycopg2.connect', 'connect', ([], {'host': '"""localhost"""', 'port': '"""5432"""', 'dbname': '"""iUrbanDB"""', 'user': '"""postgres"""', 'password': '"""<PASSWORD>"""'}), "(host='localhost', port='5432', dbname='iUrbanDB', user='postgres',\n password='<PASSWORD>')\n", (377, 471), False, 'from psycopg2 import connect\n'), ((518, 548), 'pandas.read_sql', 'pd.read_sql', (['sql_command', 'conn'], {}), '(sql_command, conn)\n', (529, 548), True, 'import pandas as pd\n'), ((1041, 1112), 'folium.Map', 'folium.Map', ([], {'location': '[originY, originX]', 'zoom_start': '(10)', 'disable_3d': '(True)'}), '(location=[originY, originX], zoom_start=10, disable_3d=True)\n', (1051, 1112), False, 'import folium\n'), ((2934, 2952), 'numpy.array', 'np.array', (['linedata'], {}), '(linedata)\n', (2942, 2952), True, 'import numpy as np\n'), ((3596, 3655), 'geopandas.GeoDataFrame.from_file', 'gp.GeoDataFrame.from_file', (['"""selected.shp"""'], {'encoding': '"""utf-8"""'}), "('selected.shp', encoding='utf-8')\n", (3621, 3655), True, 'import geopandas as gp\n'), ((4817, 4888), 'geopandas.tools.geocode', 'gp.tools.geocode', (['"""Larco Museum"""', '"""Nominatim"""'], {'user_agent': '"""myuseragent"""'}), "('Larco Museum', 'Nominatim', user_agent='myuseragent')\n", (4833, 4888), True, 'import geopandas as gp\n'), ((5731, 5802), 'geopandas.tools.geocode', 'gp.tools.geocode', (['"""Larco Museum"""', '"""Nominatim"""'], {'user_agent': '"""myuseragent"""'}), "('Larco Museum', 'Nominatim', user_agent='myuseragent')\n", (5747, 5802), True, 'import geopandas as gp\n'), ((5819, 5890), 'geopandas.tools.geocode', 'gp.tools.geocode', (['"""Larco Museum"""', '"""Nominatim"""'], {'user_agent': '"""myuseragent"""'}), "('Larco Museum', 'Nominatim', user_agent='myuseragent')\n", (5835, 5890), True, 'import geopandas as gp\n'), ((6322, 6381), 'geopandas.GeoDataFrame.from_file', 'gp.GeoDataFrame.from_file', (['"""selected.shp"""'], {'encoding': '"""utf-8"""'}), "('selected.shp', encoding='utf-8')\n", (6347, 6381), True, 'import geopandas as gp\n'), ((6506, 6559), 'geopandas.sjoin', 'gp.sjoin', (['poly_map', 'gdf'], {'how': '"""inner"""', 'op': '"""intersects"""'}), "(poly_map, gdf, how='inner', op='intersects')\n", (6514, 6559), True, 'import geopandas as gp\n'), ((1138, 1158), 'folium.LatLngPopup', 'folium.LatLngPopup', ([], {}), '()\n', (1156, 1158), False, 'import folium\n'), ((1706, 1726), 'folium.LatLngPopup', 'folium.LatLngPopup', ([], {}), '()\n', (1724, 1726), False, 'import folium\n'), ((1916, 1936), 'folium.LatLngPopup', 'folium.LatLngPopup', ([], {}), '()\n', (1934, 1936), False, 'import folium\n'), ((2362, 2382), 'folium.LatLngPopup', 'folium.LatLngPopup', ([], {}), '()\n', (2380, 2382), False, 'import folium\n'), ((3480, 3500), 'folium.LatLngPopup', 'folium.LatLngPopup', ([], {}), '()\n', (3498, 3500), False, 'import folium\n'), ((4195, 4271), 'folium.GeoJson', 'folium.GeoJson', ([], {'data': 'geo_j', 'style_function': "(lambda x: {'fillColor': 'orange'})"}), "(data=geo_j, style_function=lambda x: {'fillColor': 'orange'})\n", (4209, 4271), False, 'import folium\n'), ((5116, 5136), 'folium.LatLngPopup', 'folium.LatLngPopup', ([], {}), '()\n', (5134, 5136), False, 'import folium\n'), ((6123, 6143), 'folium.LatLngPopup', 'folium.LatLngPopup', ([], {}), '()\n', (6141, 6143), False, 'import folium\n'), ((1864, 1877), 'folium.plugins.HeatMap', 'HeatMap', (['data'], {}), '(data)\n', (1871, 1877), False, 'from folium.plugins import HeatMap\n'), ((2065, 2080), 'folium.plugins.MarkerCluster', 'MarkerCluster', ([], {}), '()\n', (2078, 2080), False, 'from folium.plugins import MarkerCluster\n'), ((3198, 3254), 'folium.PolyLine', 'folium.PolyLine', (['loc'], {'weight': '(4)', 'color': '"""red"""', 'opacity': '(0.8)'}), "(loc, weight=4, color='red', opacity=0.8)\n", (3213, 3254), False, 'import folium\n'), ((3317, 3369), 'folium.Marker', 'folium.Marker', (['loc[0]'], {'popup': '"""<b>Starting Point</b>"""'}), "(loc[0], popup='<b>Starting Point</b>')\n", (3330, 3369), False, 'import folium\n'), ((3391, 3439), 'folium.Marker', 'folium.Marker', (['loc[-1]'], {'popup': '"""<b>End Point</b>"""'}), "(loc[-1], popup='<b>End Point</b>')\n", (3404, 3439), False, 'import folium\n'), ((4963, 4985), 'folium.GeoJson', 'folium.GeoJson', (['buffer'], {}), '(buffer)\n', (4977, 4985), False, 'import folium\n'), ((5966, 5988), 'folium.GeoJson', 'folium.GeoJson', (['buffer'], {}), '(buffer)\n', (5980, 5988), False, 'import folium\n'), ((6444, 6488), 'geopandas.points_from_xy', 'gp.points_from_xy', (['df.longitude', 'df.latitude'], {}), '(df.longitude, df.latitude)\n', (6461, 6488), True, 'import geopandas as gp\n'), ((2136, 2205), 'folium.Marker', 'folium.Marker', ([], {'location': '[Y[i], X[i]]', 'icon': 'None', 'popup': 'df.humidity[i]'}), '(location=[Y[i], X[i]], icon=None, popup=df.humidity[i])\n', (2149, 2205), False, 'import folium\n'), ((3037, 3081), 'datetime.datetime.strptime', 'datetime.strptime', (['t[2]', '"""%d/%m/%Y %H:%M:%S"""'], {}), "(t[2], '%d/%m/%Y %H:%M:%S')\n", (3054, 3081), False, 'from datetime import datetime\n'), ((4091, 4118), 'geopandas.GeoSeries', 'gp.GeoSeries', (["r['geometry']"], {}), "(r['geometry'])\n", (4103, 4118), True, 'import geopandas as gp\n'), ((4311, 4336), 'folium.Popup', 'folium.Popup', (["r['NAME_1']"], {}), "(r['NAME_1'])\n", (4323, 4336), False, 'import folium\n'), ((5040, 5074), 'folium.GeoJsonTooltip', 'folium.GeoJsonTooltip', (["['address']"], {}), "(['address'])\n", (5061, 5074), False, 'import folium\n'), ((6043, 6077), 'folium.GeoJsonTooltip', 'folium.GeoJsonTooltip', (["['address']"], {}), "(['address'])\n", (6064, 6077), False, 'import folium\n')] |
import pandas as pd
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
import os
# travel time.
# travel_training_df = pd.read_csv('dataSets/training/training_travel_time_dataset.csv', dtype=np.float32, index_col=0)
# travel_training_df = travel_training_df.dropna()
# travel_training_df.wind_direction[travel_training_df.wind_direction > 360.0] = 0.0
# travel_test_df = pd.read_csv('dataSets/testing-phase1/test1_travel_time_dataset.csv', dtype=np.float32, index_col=0)
# travel_test_df = travel_test_df.dropna()
# travel_test_df.wind_direction[travel_test_df.wind_direction > 360.0] = 0.0
# submission_df = pd.read_csv('dataSets/testing-phase1/submission_travel_time_dataset.csv', dtype=np.float32, index_col=0)
# print('before filtered dirty rows, submission size: ', submission_df.shape)
# submission_df = submission_df.dropna()
# submission_df.wind_direction[submission_df.wind_direction > 360.0] = 0.0
# print('after filtered dirty rows, submission size: ', submission_df.shape)
# submission_sample = 'dataSets/testing-phase1/submission_sample_travelTime.csv'
# ylimit = 1.0
# volume.
travel_training_df = pd.read_csv('dataSets/training/training_volume_dataset.csv', dtype=np.float32, index_col=0)
travel_training_df = travel_training_df.dropna()
travel_training_df.wind_direction[travel_training_df.wind_direction > 360.0] = 0.0
travel_test_df = pd.read_csv('dataSets/testing-phase1/test1_volume_dataset.csv', dtype=np.float32, index_col=0)
travel_test_df = travel_test_df.dropna()
travel_test_df.wind_direction[travel_test_df.wind_direction > 360.0] = 0.0
submission_df = pd.read_csv('dataSets/testing-phase1/submission_volume_dataset.csv', dtype=np.float32, index_col=0)
print('before filtered dirty rows, submission size: ', submission_df.shape)
submission_df = submission_df.dropna()
submission_df.wind_direction[submission_df.wind_direction > 360.0] = 0.0
print('after filtered dirty rows, submission size: ', submission_df.shape)
submission_sample = 'dataSets/testing-phase1/submission_sample_volume.csv'
ylimit = 10.0
def feature_normalize(dataset, mu=None, sigma=None):
if mu is None:
mu = np.mean(dataset, axis=0)
if sigma is None:
sigma = np.std(dataset, axis=0) + 0.1 # in case zero division.
return (dataset - mu) / sigma, mu, sigma
travel_training_dataset, mu, sigma = feature_normalize(travel_training_df.as_matrix(
columns=travel_training_df.columns[: -1]))
travel_training_labels = travel_training_df.as_matrix(
columns=[travel_training_df.columns[-1]])
travel_test_dataset, _, _ = feature_normalize(travel_test_df.as_matrix(
columns=travel_test_df.columns[:-1]), mu=mu, sigma=sigma)
travel_test_labels = travel_test_df.as_matrix(
columns=[travel_test_df.columns[-1]])
submission_dataset, _, _ = feature_normalize(submission_df.as_matrix(
columns=submission_df.columns[:-1]), mu=mu, sigma=sigma)
submission_rst = submission_sample.split('_')[0] + '_' + submission_sample.split('_')[2]
train_dataset = travel_training_dataset
print('training dataset size: ', train_dataset.shape)
train_labels = np.reshape(travel_training_labels, newshape=[-1, 1])
test_dataset = travel_test_dataset
print('test dataset size: ', test_dataset.shape)
test_labels = np.reshape(travel_test_labels, newshape=[-1, 1])
n_dim = train_dataset.shape[1]
num_epochs = 10
batch_size = travel_training_dataset.shape[0] // 1000
# testing
# num_steps = 10
num_steps = travel_training_dataset.shape[0] // batch_size
report_interval = 5
cost_history = []
cost_epoch_history = []
loss_history = []
graph = tf.Graph()
with graph.as_default():
tf_train_dataset = tf.placeholder(dtype=tf.float32, shape=[None, n_dim])
tf_train_labels = tf.placeholder(dtype=tf.float32, shape=[None, 1])
tf_valid_dataset = tf.constant(test_dataset)
tf_valid_labels = tf.constant(test_labels)
tf_test_dataset = tf.constant(test_dataset)
tf_test_labels = tf.constant(test_labels)
global_step = tf.Variable(0) # count the number of steps taken.
initial_learning_rate = 0.05
final_learning_rate = 0.01
decay_rate = 0.96
learning_rate = tf.train.exponential_decay(initial_learning_rate, global_step, decay_rate=decay_rate,
decay_steps=num_steps / (
np.log(final_learning_rate / initial_learning_rate) / np.log(
decay_rate)))
weights = tf.get_variable('weights', [n_dim, 1], initializer=tf.contrib.layers.xavier_initializer())
bias = tf.Variable(tf.ones(shape=[1]))
# MSE loss
# loss = tf.reduce_mean(tf.square(tf.divide(tf.nn.xw_plus_b(tf_train_dataset, weights, bias) - tf_train_labels,
# tf_train_labels)))
predicts = tf.nn.xw_plus_b(tf_train_dataset, weights, bias)
loss = tf.reduce_mean(tf.square(predicts - tf_train_labels))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step)
# MSPE metric
training_metric = tf.reduce_mean(tf.abs(tf.divide(tf.nn.xw_plus_b(tf_train_dataset, weights, bias) - tf_train_labels,
tf_train_labels)))
validation_metric = tf.reduce_mean(tf.abs(tf.divide(tf.nn.xw_plus_b(tf_valid_dataset, weights, bias) - tf_valid_labels,
tf_valid_labels)))
test_metric = tf.reduce_mean(tf.abs(tf.divide(tf.maximum(tf.nn.xw_plus_b(tf_test_dataset, weights, bias), 0.0) - tf_test_labels,
tf_test_labels)))
with tf.Session(graph=graph) as sess:
tf.global_variables_initializer().run()
for epoch in range(num_epochs):
shuffle = np.random.permutation(train_dataset.shape[0])
train_dataset = train_dataset[shuffle]
train_labels = train_labels[shuffle]
for step in range(num_steps):
offset = batch_size * step % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels}
_, l, tm = sess.run(fetches=[optimizer, loss, training_metric], feed_dict=feed_dict)
if step % report_interval:
print('Minibatch loss at step %d: %.4f' % (step, l))
print('Minibatch metric: %.4f' % tm)
print('Validation metric: %.4f\n' % validation_metric.eval())
cost_history.append(tm)
loss_history.append(l)
print('Test metric: %.4f' % test_metric.eval())
cost_epoch_history.append(test_metric.eval())
fig, (ax1, ax2, ax3) = plt.subplots(ncols=1, nrows=3)
ax1.plot(range(len(loss_history)), loss_history)
ax1.set_xlim([0, len(loss_history)])
ax1.set_ylim([0, np.max(loss_history)])
ax2.plot(range(len(cost_history)), cost_history)
ax2.set_xlim([0, len(cost_history)])
ax2.set_ylim([0, ylimit])
ax3.scatter(range(len(cost_epoch_history)), cost_epoch_history)
ax3.set_xlim([0, len(cost_epoch_history)])
ax3.set_ylim([0, 1.0])
plt.show()
# predict
preds, = sess.run([predicts], feed_dict={tf_train_dataset: submission_dataset})
# generate submission
with open(submission_sample, 'r') as f_in:
with open(submission_rst, 'w') as f_out:
idx = 0
for line in f_in:
if idx == 0:
f_out.write(line.rstrip() + os.linesep)
else:
line = line.rstrip().rsplit(',', maxsplit=1)[0]
pre = str.format('%.2f' % preds[idx-1, 0])
f_out.write(line + ',' + pre + os.linesep)
idx += 1
print('finished prediction.')
| [
"pandas.read_csv",
"numpy.log",
"tensorflow.Graph",
"numpy.mean",
"numpy.reshape",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.max",
"tensorflow.square",
"numpy.random.permutation",
"tensorflow.Variable",
"tensorflow.nn.xw_plus_b",
"tensorflow.train.GradientDescentOptimizer",
"n... | [((1144, 1240), 'pandas.read_csv', 'pd.read_csv', (['"""dataSets/training/training_volume_dataset.csv"""'], {'dtype': 'np.float32', 'index_col': '(0)'}), "('dataSets/training/training_volume_dataset.csv', dtype=np.\n float32, index_col=0)\n", (1155, 1240), True, 'import pandas as pd\n'), ((1385, 1484), 'pandas.read_csv', 'pd.read_csv', (['"""dataSets/testing-phase1/test1_volume_dataset.csv"""'], {'dtype': 'np.float32', 'index_col': '(0)'}), "('dataSets/testing-phase1/test1_volume_dataset.csv', dtype=np.\n float32, index_col=0)\n", (1396, 1484), True, 'import pandas as pd\n'), ((1612, 1716), 'pandas.read_csv', 'pd.read_csv', (['"""dataSets/testing-phase1/submission_volume_dataset.csv"""'], {'dtype': 'np.float32', 'index_col': '(0)'}), "('dataSets/testing-phase1/submission_volume_dataset.csv', dtype=\n np.float32, index_col=0)\n", (1623, 1716), True, 'import pandas as pd\n'), ((3104, 3156), 'numpy.reshape', 'np.reshape', (['travel_training_labels'], {'newshape': '[-1, 1]'}), '(travel_training_labels, newshape=[-1, 1])\n', (3114, 3156), True, 'import numpy as np\n'), ((3255, 3303), 'numpy.reshape', 'np.reshape', (['travel_test_labels'], {'newshape': '[-1, 1]'}), '(travel_test_labels, newshape=[-1, 1])\n', (3265, 3303), True, 'import numpy as np\n'), ((3582, 3592), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (3590, 3592), True, 'import tensorflow as tf\n'), ((3641, 3694), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, n_dim]'}), '(dtype=tf.float32, shape=[None, n_dim])\n', (3655, 3694), True, 'import tensorflow as tf\n'), ((3717, 3766), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, 1]'}), '(dtype=tf.float32, shape=[None, 1])\n', (3731, 3766), True, 'import tensorflow as tf\n'), ((3790, 3815), 'tensorflow.constant', 'tf.constant', (['test_dataset'], {}), '(test_dataset)\n', (3801, 3815), True, 'import tensorflow as tf\n'), ((3838, 3862), 'tensorflow.constant', 'tf.constant', (['test_labels'], {}), '(test_labels)\n', (3849, 3862), True, 'import tensorflow as tf\n'), ((3885, 3910), 'tensorflow.constant', 'tf.constant', (['test_dataset'], {}), '(test_dataset)\n', (3896, 3910), True, 'import tensorflow as tf\n'), ((3932, 3956), 'tensorflow.constant', 'tf.constant', (['test_labels'], {}), '(test_labels)\n', (3943, 3956), True, 'import tensorflow as tf\n'), ((3976, 3990), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {}), '(0)\n', (3987, 3990), True, 'import tensorflow as tf\n'), ((4829, 4877), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['tf_train_dataset', 'weights', 'bias'], {}), '(tf_train_dataset, weights, bias)\n', (4844, 4877), True, 'import tensorflow as tf\n'), ((2151, 2175), 'numpy.mean', 'np.mean', (['dataset'], {'axis': '(0)'}), '(dataset, axis=0)\n', (2158, 2175), True, 'import numpy as np\n'), ((4595, 4613), 'tensorflow.ones', 'tf.ones', ([], {'shape': '[1]'}), '(shape=[1])\n', (4602, 4613), True, 'import tensorflow as tf\n'), ((4904, 4941), 'tensorflow.square', 'tf.square', (['(predicts - tf_train_labels)'], {}), '(predicts - tf_train_labels)\n', (4913, 4941), True, 'import tensorflow as tf\n'), ((5661, 5684), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (5671, 5684), True, 'import tensorflow as tf\n'), ((6902, 6932), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(1)', 'nrows': '(3)'}), '(ncols=1, nrows=3)\n', (6914, 6932), True, 'from matplotlib import pyplot as plt\n'), ((7381, 7391), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7389, 7391), True, 'from matplotlib import pyplot as plt\n'), ((2214, 2237), 'numpy.std', 'np.std', (['dataset'], {'axis': '(0)'}), '(dataset, axis=0)\n', (2220, 2237), True, 'import numpy as np\n'), ((4532, 4570), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (4568, 4570), True, 'import tensorflow as tf\n'), ((4960, 5008), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (4993, 5008), True, 'import tensorflow as tf\n'), ((5804, 5849), 'numpy.random.permutation', 'np.random.permutation', (['train_dataset.shape[0]'], {}), '(train_dataset.shape[0])\n', (5825, 5849), True, 'import numpy as np\n'), ((5702, 5735), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5733, 5735), True, 'import tensorflow as tf\n'), ((7060, 7080), 'numpy.max', 'np.max', (['loss_history'], {}), '(loss_history)\n', (7066, 7080), True, 'import numpy as np\n'), ((4339, 4390), 'numpy.log', 'np.log', (['(final_learning_rate / initial_learning_rate)'], {}), '(final_learning_rate / initial_learning_rate)\n', (4345, 4390), True, 'import numpy as np\n'), ((4393, 4411), 'numpy.log', 'np.log', (['decay_rate'], {}), '(decay_rate)\n', (4399, 4411), True, 'import numpy as np\n'), ((5110, 5158), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['tf_train_dataset', 'weights', 'bias'], {}), '(tf_train_dataset, weights, bias)\n', (5125, 5158), True, 'import tensorflow as tf\n'), ((5307, 5355), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['tf_valid_dataset', 'weights', 'bias'], {}), '(tf_valid_dataset, weights, bias)\n', (5322, 5355), True, 'import tensorflow as tf\n'), ((5511, 5558), 'tensorflow.nn.xw_plus_b', 'tf.nn.xw_plus_b', (['tf_test_dataset', 'weights', 'bias'], {}), '(tf_test_dataset, weights, bias)\n', (5526, 5558), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# *****************************************************************************
"""Slit devices in AMOR"""
from numpy import arctan, degrees, radians, tan
from nicos import session
from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, \
Moveable, Override, Param, Readable, dictwith, oneof, status
from nicos.core.utils import multiStatus
from nicos.devices.generic.slit import Slit, SlitAxis as DefaultSlitAxis
from nicos_sinq.amor.devices.logical_motor import AmorLogicalMotor, \
InterfaceLogicalMotorHandler
class SlitOpening(HasPrecision, DefaultSlitAxis):
"""Device to control the slit opening/height.
Motor dXt changes moves the slit's top slab in turn changing the
slit opening. Motor dXb changes the position of the whole slit
moving it up or down (X is the slit number).
This device reads the current opening using the motor dXt and
changes the opening using combination of the motors dXt and dXb
such that the center remains aligned.
"""
parameter_overrides = {
'unit': Override(mandatory=False, default='mm'),
'fmtstr': Override(userparam=False),
'maxage': Override(userparam=False),
'pollinterval': Override(userparam=False),
'warnlimits': Override(userparam=False),
'precision': Override(userparam=False, default=0.01),
'target': Override(volatile=True)
}
status_to_msg = {
status.ERROR: 'Error in %s',
status.BUSY: 'Moving: %s ...',
status.WARN: 'Warning in %s',
status.NOTREACHED: '%s did not reach target!',
status.UNKNOWN: 'Unknown status in %s!',
status.OK: 'Ready.'
}
def doReadTarget(self):
# Do not allow None as target
target = self._getFromCache('target', self.doRead)
return target if target is not None else self.doRead(0)
def _convertRead(self, positions):
return positions[3]
def _convertStart(self, target, current):
current_opening = current[3]
current_bottom = current[2]
new_bottom = current_bottom + 0.5 * (current_opening - target)
return current[0], current[1], new_bottom, target
def doStatus(self, maxage=0):
# Check for error and warning in the dependent devices
st_devs = multiStatus(self._adevs, maxage)
devs = [dname for dname, d in self._adevs.items()
if d.status()[0] == st_devs[0]]
if st_devs[0] in self.status_to_msg:
msg = self.status_to_msg[st_devs[0]]
if '%' in msg:
msg = msg % ', '.join(devs)
return st_devs[0], msg
return st_devs
def read_divergence(distance, slit):
left, right, bottom, top = slit
s = arctan(top / distance)
d = arctan(bottom / distance)
h = 2 * arctan((left+right) / distance)
return{
'div': degrees(s+d),
'did': degrees((s-d)/2),
'dih': degrees(h)
}
def read_beam_shaping(slit, diaphragm_index):
left, right, bottom, top = slit
return {
f'd{diaphragm_index}v': top+bottom,
f'd{diaphragm_index}d': (top-bottom)/2,
f'd{diaphragm_index}h': left+right
}
class AmorSlitHandler(InterfaceLogicalMotorHandler):
attached_devices = {
'xs': Attach('Sample x position', Readable, missingok=True,
optional=True),
'mu': Attach('Sample omega', Readable, missingok=True,
optional=True),
'nu': Attach('Sample omega', Readable, missingok=True,
optional=True),
'ltz': Attach('Sample x position', Readable, missingok=True,
optional=True),
'xd2': Attach('Sample x position', Readable, missingok=True,
optional=True),
'xl': Attach('Deflector x position', Readable, missingok=True,
optional=True),
'mu_offset': Attach('Sample x position', Readable, missingok=True,
optional=True),
'kappa': Attach('Inclination of the beam after the Selene guide',
Readable, missingok=True, optional=True),
'soz_ideal': Attach('Ideal sample omega', Readable, missingok=True,
optional=True),
'xd3': Attach('', Readable, missingok=True, optional=True),
'slit1': Attach('slit 1', Slit, missingok=True, optional=True),
'slit2': Attach('slit 2', Slit, missingok=True, optional=True),
'slit2z': Attach('Z motor for slit 2', Readable, missingok=True,
optional=True),
'slit3': Attach('slit 3', Slit, missingok=True, optional=True),
'slit3z': Attach('Z motor for slit 3', Readable, missingok=True,
optional=True),
}
def doPreinit(self, mode):
self._status_devs = ['slit1', 'slit2', 'slit2z', 'slit3', 'slit3z']
InterfaceLogicalMotorHandler.doPreinit(self, mode)
self.valuetype = dictwith(div=float, did=float, dih=float, d2v=float,
d2d=float, d2h=float, d3v=float, d3d=float,
d3h=float, )
def doRead(self, maxage=0):
result = {}
if self._is_active('diaphragm1'):
result.update(read_divergence(
self._read_dev('xs'),
self._read_dev('slit1')
))
if self._is_active('diaphragm2'):
result.update(read_beam_shaping(self._read_dev('slit2'), 2))
if self._is_active('diaphragm3'):
result.update(read_beam_shaping(self._read_dev('slit3'), 3))
return result
def _get_move_list(self, targets):
positions = []
if self._is_active('diaphragm1'):
distance = self._read_dev('xs')
div = targets.get('div') or session.getDevice('div').read()
did = targets.get('did') or session.getDevice('did').read()
dih = targets.get('dih') or session.getDevice('dih').read()
top = distance * tan(radians(0.5 * div + did))
bottom = distance * tan(radians(0.5 * div - did))
horizontal = distance * tan(.5 * radians(dih))
positions.extend([(self._get_dev('slit1'),
(horizontal, horizontal, bottom, top))
])
if self._is_active('diaphragm2'):
d2v = targets.get('d2v') or self._read_dev('d2v')
d2d = targets.get('d2d') or self._read_dev('d2d')
d2h = targets.get('d2h') or self._read_dev('d2h')
top = 0.5 * d2v + d2d
bottom = 0.5 * d2v - d2d
horizontal = .5 * d2h
distance = self._read_dev('xd2')
kappa = self._read_dev('kappa')
if self._is_active('deflector'):
ltz = self._read_dev('ltz')
xl = self._read_dev('xl')
mu_offset = self._read_dev('mu_offset')
z = ltz - (distance - xl) * tan(radians(self._read_dev(
'mu') + mu_offset))
else:
z = distance * tan(radians(kappa))
positions.extend([(self._get_dev('slit2z'), z),
(self._get_dev('slit2'),
(top, bottom, horizontal, horizontal))
])
if self._is_active('diaphragm3'):
d3v = targets.get('d3v')
d3d = targets.get('d3d')
d3h = targets.get('d3h')
top = 0.5 * d3v + d3d
bottom = 0.5 * d3v - d3d
horizontal = .5 * d3h
soz_ideal = self._read_dev('soz_ideal')
xd3 = self._read_dev('xd3')
nu = self._read_dev('nu')
xs = self._read_dev('xs')
kappa = self._read_dev('kappa')
z = soz_ideal + (xd3 - xs) * tan(radians(nu + kappa))
positions.extend([(self._get_dev('slit2z'), z),
(self._get_dev('slit2'),
(top, bottom, horizontal, horizontal))
])
return positions
motortypes = ['div', 'dih', 'did', 'd2v', 'd2h', 'd2d', 'd3v', 'd3h', 'd3d']
class AmorSlitLogicalMotor(AmorLogicalMotor):
""" Class to represent the logical slit motors in AMOR.
"""
parameters = {
'motortype': Param('Type of motor %s' % ','.join(motortypes),
type=oneof(*motortypes), mandatory=True),
}
parameter_overrides = {
'unit': Override(mandatory=False, default='degree'),
'target': Override(volatile=True),
'abslimits': Override(mandatory=False),
'userlimits': Override(mandatory=False)
}
attached_devices = {
'controller': Attach('Controller for the logical motors',
AmorSlitHandler)
}
class SlitAxis(DefaultSlitAxis):
"""
The diaphragm consists of 4 blades, much like a standard slit. The use
case is different though: one is no interested in the width and height
but in the divergence.
In addition to the slit, this slit axis attaches a controller that
converts the position of the 4 blades to different quantities related to
the beam divergence.
"""
attached_devices = {
'controller': Attach('Controller, used to connect slit and distance '
'to the axis', Device),
}
class DivergenceAperture(HasAutoDevices, Device):
"""
Slit1 is fix mounted behind the instrument shutter and can not be moved as
a whole. Its center is by definition the origin of the instrument
coordinate system.
The corresponding virtual devices are divergences and angles.
"""
class VerticalDisplacement(SlitAxis):
"""
Defines the vertical displacement (angular) of the beam incident on the
sample
"""
def _convertRead(self, positions):
distance = self._attached_controller._attached_distance.read()
s = arctan(positions[3] / distance)
d = arctan(positions[2] / distance)
return .5 * degrees(s - d)
def _convertStart(self, target, current):
distance = self._attached_controller._attached_distance.read()
vertical = self._attached_controller.vertical.read()
top = distance * tan(radians(0.5 * vertical + target))
bottom = distance * tan(radians(0.5 * vertical - target))
return [current[0], current[1], bottom, top]
class VerticalDivergence(SlitAxis):
"""
Defines the vertical divergence of the beam incident on the sample
"""
def _convertRead(self, positions):
distance = self._attached_controller._attached_distance.read()
s = arctan(positions[3] / distance)
d = arctan(positions[2] / distance)
return degrees(s + d)
def _convertStart(self, target, current):
distance = self._attached_controller._attached_distance.read()
divergence = self._attached_controller.displacement.read()
top = distance * tan(radians(0.5 * target + divergence))
bottom = distance * tan(radians(0.5 * target - divergence))
return [current[0], current[1], bottom, top]
class HorizontalDivergence(SlitAxis):
"""
Defines the horiziontal divergence of the beam incident on the sample
"""
def _convertRead(self, positions):
distance = self._attached_controller._attached_distance.read()
return degrees(2 * arctan(positions[0] / distance))
def _convertStart(self, target, current):
distance = self._attached_controller._attached_distance.read()
tgt = list(current)
tgt[:2] = [distance * tan(.5 * radians(target))] * 2
return tgt
attached_devices = {
'slit': Attach('Corresponding slit', Slit),
'distance': Attach('Sample x position', Moveable),
}
parameter_overrides = {
'unit': Override(mandatory=False, default='degree'),
}
def doInit(self, mode):
for name, cls in [
('displacement', DivergenceAperture.VerticalDisplacement),
('vertical', DivergenceAperture.VerticalDivergence),
('horizontal', DivergenceAperture.HorizontalDivergence),
]:
self.add_autodevice(name, cls,
slit=self._attached_slit,
controller=self,
visibility=self.autodevice_visibility,
unit='deg')
| [
"numpy.radians",
"nicos.core.Override",
"nicos.session.getDevice",
"nicos.core.dictwith",
"nicos.core.utils.multiStatus",
"nicos_sinq.amor.devices.logical_motor.InterfaceLogicalMotorHandler.doPreinit",
"nicos.core.Attach",
"numpy.degrees",
"nicos.core.oneof",
"numpy.arctan"
] | [((3738, 3760), 'numpy.arctan', 'arctan', (['(top / distance)'], {}), '(top / distance)\n', (3744, 3760), False, 'from numpy import arctan, degrees, radians, tan\n'), ((3769, 3794), 'numpy.arctan', 'arctan', (['(bottom / distance)'], {}), '(bottom / distance)\n', (3775, 3794), False, 'from numpy import arctan, degrees, radians, tan\n'), ((2052, 2091), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)', 'default': '"""mm"""'}), "(mandatory=False, default='mm')\n", (2060, 2091), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((2111, 2136), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)'}), '(userparam=False)\n', (2119, 2136), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((2156, 2181), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)'}), '(userparam=False)\n', (2164, 2181), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((2207, 2232), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)'}), '(userparam=False)\n', (2215, 2232), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((2256, 2281), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)'}), '(userparam=False)\n', (2264, 2281), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((2304, 2343), 'nicos.core.Override', 'Override', ([], {'userparam': '(False)', 'default': '(0.01)'}), '(userparam=False, default=0.01)\n', (2312, 2343), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((2363, 2386), 'nicos.core.Override', 'Override', ([], {'volatile': '(True)'}), '(volatile=True)\n', (2371, 2386), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((3291, 3323), 'nicos.core.utils.multiStatus', 'multiStatus', (['self._adevs', 'maxage'], {}), '(self._adevs, maxage)\n', (3302, 3323), False, 'from nicos.core.utils import multiStatus\n'), ((3807, 3840), 'numpy.arctan', 'arctan', (['((left + right) / distance)'], {}), '((left + right) / distance)\n', (3813, 3840), False, 'from numpy import arctan, degrees, radians, tan\n'), ((3866, 3880), 'numpy.degrees', 'degrees', (['(s + d)'], {}), '(s + d)\n', (3873, 3880), False, 'from numpy import arctan, degrees, radians, tan\n'), ((3895, 3915), 'numpy.degrees', 'degrees', (['((s - d) / 2)'], {}), '((s - d) / 2)\n', (3902, 3915), False, 'from numpy import arctan, degrees, radians, tan\n'), ((3928, 3938), 'numpy.degrees', 'degrees', (['h'], {}), '(h)\n', (3935, 3938), False, 'from numpy import arctan, degrees, radians, tan\n'), ((4279, 4347), 'nicos.core.Attach', 'Attach', (['"""Sample x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample x position', Readable, missingok=True, optional=True)\n", (4285, 4347), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((4385, 4448), 'nicos.core.Attach', 'Attach', (['"""Sample omega"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample omega', Readable, missingok=True, optional=True)\n", (4391, 4448), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((4485, 4548), 'nicos.core.Attach', 'Attach', (['"""Sample omega"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample omega', Readable, missingok=True, optional=True)\n", (4491, 4548), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((4586, 4654), 'nicos.core.Attach', 'Attach', (['"""Sample x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample x position', Readable, missingok=True, optional=True)\n", (4592, 4654), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((4693, 4761), 'nicos.core.Attach', 'Attach', (['"""Sample x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample x position', Readable, missingok=True, optional=True)\n", (4699, 4761), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((4799, 4870), 'nicos.core.Attach', 'Attach', (['"""Deflector x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Deflector x position', Readable, missingok=True, optional=True)\n", (4805, 4870), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((4914, 4982), 'nicos.core.Attach', 'Attach', (['"""Sample x position"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Sample x position', Readable, missingok=True, optional=True)\n", (4920, 4982), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((5029, 5130), 'nicos.core.Attach', 'Attach', (['"""Inclination of the beam after the Selene guide"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Inclination of the beam after the Selene guide', Readable,\n missingok=True, optional=True)\n", (5035, 5130), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((5173, 5242), 'nicos.core.Attach', 'Attach', (['"""Ideal sample omega"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Ideal sample omega', Readable, missingok=True, optional=True)\n", (5179, 5242), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((5287, 5338), 'nicos.core.Attach', 'Attach', (['""""""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('', Readable, missingok=True, optional=True)\n", (5293, 5338), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((5357, 5410), 'nicos.core.Attach', 'Attach', (['"""slit 1"""', 'Slit'], {'missingok': '(True)', 'optional': '(True)'}), "('slit 1', Slit, missingok=True, optional=True)\n", (5363, 5410), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((5429, 5482), 'nicos.core.Attach', 'Attach', (['"""slit 2"""', 'Slit'], {'missingok': '(True)', 'optional': '(True)'}), "('slit 2', Slit, missingok=True, optional=True)\n", (5435, 5482), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((5502, 5571), 'nicos.core.Attach', 'Attach', (['"""Z motor for slit 2"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Z motor for slit 2', Readable, missingok=True, optional=True)\n", (5508, 5571), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((5615, 5668), 'nicos.core.Attach', 'Attach', (['"""slit 3"""', 'Slit'], {'missingok': '(True)', 'optional': '(True)'}), "('slit 3', Slit, missingok=True, optional=True)\n", (5621, 5668), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((5688, 5757), 'nicos.core.Attach', 'Attach', (['"""Z motor for slit 3"""', 'Readable'], {'missingok': '(True)', 'optional': '(True)'}), "('Z motor for slit 3', Readable, missingok=True, optional=True)\n", (5694, 5757), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((5906, 5956), 'nicos_sinq.amor.devices.logical_motor.InterfaceLogicalMotorHandler.doPreinit', 'InterfaceLogicalMotorHandler.doPreinit', (['self', 'mode'], {}), '(self, mode)\n', (5944, 5956), False, 'from nicos_sinq.amor.devices.logical_motor import AmorLogicalMotor, InterfaceLogicalMotorHandler\n'), ((5982, 6093), 'nicos.core.dictwith', 'dictwith', ([], {'div': 'float', 'did': 'float', 'dih': 'float', 'd2v': 'float', 'd2d': 'float', 'd2h': 'float', 'd3v': 'float', 'd3d': 'float', 'd3h': 'float'}), '(div=float, did=float, dih=float, d2v=float, d2d=float, d2h=float,\n d3v=float, d3d=float, d3h=float)\n', (5990, 6093), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((9543, 9586), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)', 'default': '"""degree"""'}), "(mandatory=False, default='degree')\n", (9551, 9586), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((9606, 9629), 'nicos.core.Override', 'Override', ([], {'volatile': '(True)'}), '(volatile=True)\n', (9614, 9629), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((9652, 9677), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)'}), '(mandatory=False)\n', (9660, 9677), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((9701, 9726), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)'}), '(mandatory=False)\n', (9709, 9726), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((9781, 9841), 'nicos.core.Attach', 'Attach', (['"""Controller for the logical motors"""', 'AmorSlitHandler'], {}), "('Controller for the logical motors', AmorSlitHandler)\n", (9787, 9841), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((10325, 10400), 'nicos.core.Attach', 'Attach', (['"""Controller, used to connect slit and distance to the axis"""', 'Device'], {}), "('Controller, used to connect slit and distance to the axis', Device)\n", (10331, 10400), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((12947, 12981), 'nicos.core.Attach', 'Attach', (['"""Corresponding slit"""', 'Slit'], {}), "('Corresponding slit', Slit)\n", (12953, 12981), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((13003, 13040), 'nicos.core.Attach', 'Attach', (['"""Sample x position"""', 'Moveable'], {}), "('Sample x position', Moveable)\n", (13009, 13040), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((13093, 13136), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)', 'default': '"""degree"""'}), "(mandatory=False, default='degree')\n", (13101, 13136), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((11043, 11074), 'numpy.arctan', 'arctan', (['(positions[3] / distance)'], {}), '(positions[3] / distance)\n', (11049, 11074), False, 'from numpy import arctan, degrees, radians, tan\n'), ((11091, 11122), 'numpy.arctan', 'arctan', (['(positions[2] / distance)'], {}), '(positions[2] / distance)\n', (11097, 11122), False, 'from numpy import arctan, degrees, radians, tan\n'), ((11822, 11853), 'numpy.arctan', 'arctan', (['(positions[3] / distance)'], {}), '(positions[3] / distance)\n', (11828, 11853), False, 'from numpy import arctan, degrees, radians, tan\n'), ((11870, 11901), 'numpy.arctan', 'arctan', (['(positions[2] / distance)'], {}), '(positions[2] / distance)\n', (11876, 11901), False, 'from numpy import arctan, degrees, radians, tan\n'), ((11921, 11935), 'numpy.degrees', 'degrees', (['(s + d)'], {}), '(s + d)\n', (11928, 11935), False, 'from numpy import arctan, degrees, radians, tan\n'), ((9455, 9473), 'nicos.core.oneof', 'oneof', (['*motortypes'], {}), '(*motortypes)\n', (9460, 9473), False, 'from nicos.core import Attach, Device, HasAutoDevices, HasPrecision, Moveable, Override, Param, Readable, dictwith, oneof, status\n'), ((11147, 11161), 'numpy.degrees', 'degrees', (['(s - d)'], {}), '(s - d)\n', (11154, 11161), False, 'from numpy import arctan, degrees, radians, tan\n'), ((7042, 7066), 'numpy.radians', 'radians', (['(0.5 * div + did)'], {}), '(0.5 * div + did)\n', (7049, 7066), False, 'from numpy import arctan, degrees, radians, tan\n'), ((7104, 7128), 'numpy.radians', 'radians', (['(0.5 * div - did)'], {}), '(0.5 * div - did)\n', (7111, 7128), False, 'from numpy import arctan, degrees, radians, tan\n'), ((11386, 11418), 'numpy.radians', 'radians', (['(0.5 * vertical + target)'], {}), '(0.5 * vertical + target)\n', (11393, 11418), False, 'from numpy import arctan, degrees, radians, tan\n'), ((11456, 11488), 'numpy.radians', 'radians', (['(0.5 * vertical - target)'], {}), '(0.5 * vertical - target)\n', (11463, 11488), False, 'from numpy import arctan, degrees, radians, tan\n'), ((12166, 12200), 'numpy.radians', 'radians', (['(0.5 * target + divergence)'], {}), '(0.5 * target + divergence)\n', (12173, 12200), False, 'from numpy import arctan, degrees, radians, tan\n'), ((12238, 12272), 'numpy.radians', 'radians', (['(0.5 * target - divergence)'], {}), '(0.5 * target - divergence)\n', (12245, 12272), False, 'from numpy import arctan, degrees, radians, tan\n'), ((12626, 12657), 'numpy.arctan', 'arctan', (['(positions[0] / distance)'], {}), '(positions[0] / distance)\n', (12632, 12657), False, 'from numpy import arctan, degrees, radians, tan\n'), ((6832, 6856), 'nicos.session.getDevice', 'session.getDevice', (['"""div"""'], {}), "('div')\n", (6849, 6856), False, 'from nicos import session\n'), ((6904, 6928), 'nicos.session.getDevice', 'session.getDevice', (['"""did"""'], {}), "('did')\n", (6921, 6928), False, 'from nicos import session\n'), ((6976, 7000), 'nicos.session.getDevice', 'session.getDevice', (['"""dih"""'], {}), "('dih')\n", (6993, 7000), False, 'from nicos import session\n'), ((7175, 7187), 'numpy.radians', 'radians', (['dih'], {}), '(dih)\n', (7182, 7187), False, 'from numpy import arctan, degrees, radians, tan\n'), ((8122, 8136), 'numpy.radians', 'radians', (['kappa'], {}), '(kappa)\n', (8129, 8136), False, 'from numpy import arctan, degrees, radians, tan\n'), ((8873, 8892), 'numpy.radians', 'radians', (['(nu + kappa)'], {}), '(nu + kappa)\n', (8880, 8892), False, 'from numpy import arctan, degrees, radians, tan\n'), ((12860, 12875), 'numpy.radians', 'radians', (['target'], {}), '(target)\n', (12867, 12875), False, 'from numpy import arctan, degrees, radians, tan\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2019-05-13 at 11:28
@author: cook
"""
import numpy as np
import warnings
import os
from scipy.ndimage import filters
from apero import core
from apero.core import constants
from apero import lang
from apero.core import math as mp
from apero.core.core import drs_log
from apero.core.core import drs_file
from apero.core.core import drs_database
from apero.io import drs_fits
from apero.io import drs_data
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'science.calib.badpix.py'
__INSTRUMENT__ = 'None'
# Get constants
Constants = constants.load(__INSTRUMENT__)
# Get version and author
__version__ = Constants['DRS_VERSION']
__author__ = Constants['AUTHORS']
__date__ = Constants['DRS_DATE']
__release__ = Constants['DRS_RELEASE']
# get param dict
ParamDict = constants.ParamDict
DrsFitsFile = drs_file.DrsFitsFile
# Get Logging function
WLOG = drs_log.wlog
# Get the text types
TextEntry = lang.drs_text.TextEntry
TextDict = lang.drs_text.TextDict
# alias pcheck
pcheck = core.pcheck
# =============================================================================
# Define functions
# =============================================================================
def normalise_median_flat(params, image, method='new', **kwargs):
"""
Applies a median filter and normalises. Median filter is applied with width
"wmed" or p["BADPIX_FLAT_MED_WID"] if wmed is None) and then normalising by
the 90th percentile
:param params: parameter dictionary, ParamDict containing constants
Must contain at least:
BADPIX_FLAT_MED_WID: float, the median image in the x
dimension over a boxcar of this width
BADPIX_NORM_PERCENTILE: float, the percentile to normalise
to when normalising and median
filtering image
log_opt: string, log option, normally the program name
:param image: numpy array (2D), the iamge to median filter and normalise
:param method: string, "new" or "old" if "new" uses np.nanpercentile else
sorts the flattened image and takes the "percentile" (i.e.
90th) pixel value to normalise
:param kwargs: keyword arguments
:keyword wmed: float or None, if not None defines the median filter width
if None uses p["BADPIX_MED_WID", see
scipy.ndimage.filters.median_filter "size" for more details
:keyword percentile: float or None, if not None degines the percentile to
normalise the image at, if None used from
p["BADPIX_NORM_PERCENTILE"]
:return norm_med_image: numpy array (2D), the median filtered and normalised
image
:return norm_image: numpy array (2D), the normalised image
"""
func_name = __NAME__ + '.normalise_median_flat()'
# log that we are normalising the flat
WLOG(params, '', TextEntry('40-012-00001'))
# get used percentile
percentile = pcheck(params, 'BADPIX_NORM_PERCENTILE', 'percentile', kwargs,
func_name)
# wmed: We construct a "simili" flat by taking the running median of the
# flag in the x dimension over a boxcar width of wmed (suggested
# value of ~7 pix). This assumes that the flux level varies only by
# a small amount over wmed pixels and that the badpixels are
# isolated enough that the median along that box will be representative
# of the flux they should have if they were not bad
wmed = pcheck(params, 'BADPIX_FLAT_MED_WID', 'wmed', kwargs, func_name)
# create storage for median-filtered flat image
image_med = np.zeros_like(image)
# loop around x axis
for i_it in range(image.shape[1]):
# x-spatial filtering and insert filtering into image_med array
image_med[i_it, :] = filters.median_filter(image[i_it, :], wmed)
if method == 'new':
# get the 90th percentile of median image
norm = np.nanpercentile(image_med[np.isfinite(image_med)], percentile)
else:
v = image_med.reshape(np.product(image.shape))
v = np.sort(v)
norm = v[int(np.product(image.shape) * percentile/100.0)]
# apply to flat_med and flat_ref
return image_med/norm, image/norm
def locate_bad_pixels(params, fimage, fmed, dimage, **kwargs):
"""
Locate the bad pixels in the flat image and the dark image
:param params: parameter dictionary, ParamDict containing constants
Must contain at least:
log_opt: string, log option, normally the program name
BADPIX_FLAT_MED_WID: float, the median image in the x
dimension over a boxcar of this width
BADPIX_FLAT_CUT_RATIO: float, the maximum differential pixel
cut ratio
BADPIX_ILLUM_CUT: float, the illumination cut parameter
BADPIX_MAX_HOTPIX: float, the maximum flux in ADU/s to be
considered too hot to be used
:param fimage: numpy array (2D), the flat normalised image
:param fmed: numpy array (2D), the flat median normalised image
:param dimage: numpy array (2D), the dark image
:param wmed: float or None, if not None defines the median filter width
if None uses p["BADPIX_MED_WID", see
scipy.ndimage.filters.median_filter "size" for more details
:return bad_pix_mask: numpy array (2D), the bad pixel mask image
:return badpix_stats: list of floats, the statistics array:
Fraction of hot pixels from dark [%]
Fraction of bad pixels from flat [%]
Fraction of NaN pixels in dark [%]
Fraction of NaN pixels in flat [%]
Fraction of bad pixels with all criteria [%]
"""
func_name = __NAME__ + '.locate_bad_pixels()'
# log that we are looking for bad pixels
WLOG(params, '', TextEntry('40-012-00005'))
# -------------------------------------------------------------------------
# wmed: We construct a "simili" flat by taking the running median of the
# flag in the x dimension over a boxcar width of wmed (suggested
# value of ~7 pix). This assumes that the flux level varies only by
# a small amount over wmed pixels and that the badpixels are
# isolated enough that the median along that box will be representative
# of the flux they should have if they were not bad
wmed = pcheck(params, 'BADPIX_FLAT_MED_WID', 'wmed', kwargs, func_name)
# maxi differential pixel response relative to the expected value
cut_ratio = pcheck(params, 'BADPIX_FLAT_CUT_RATIO', 'cut_ratio', kwargs,
func_name)
# illumination cut parameter. If we only cut the pixels that
# fractionnally deviate by more than a certain amount, we are going
# to have lots of bad pixels in unillumnated regions of the array.
# We therefore need to set a threshold below which a pixels is
# considered unilluminated. First of all, the flat field image is
# normalized by its 90th percentile. This sets the brighter orders
# to about 1. We then set an illumination threshold below which
# only the dark current will be a relevant parameter to decide that
# a pixel is "bad"
illum_cut = pcheck(params, 'BADPIX_ILLUM_CUT', 'illum_cut', kwargs,
func_name)
# hotpix. Max flux in ADU/s to be considered too hot to be used
max_hotpix = pcheck(params, 'BADPIX_MAX_HOTPIX', 'max_hotpix', kwargs,
func_name)
# -------------------------------------------------------------------------
# create storage for ratio of flat_ref to flat_med
fratio = np.zeros_like(fimage)
# create storage for bad dark pixels
badpix_dark = np.zeros_like(dimage, dtype=bool)
# -------------------------------------------------------------------------
# complain if the flat image and dark image do not have the same dimensions
if dimage.shape != fimage.shape:
eargs = [fimage.shape, dimage.shape, func_name]
WLOG(params, 'error', TextEntry('09-012-00002', args=eargs))
# -------------------------------------------------------------------------
# as there may be a small level of scattered light and thermal
# background in the dark we subtract the running median to look
# only for isolate hot pixels
for i_it in range(fimage.shape[1]):
dimage[i_it, :] -= filters.median_filter(dimage[i_it, :], wmed)
# work out how much do flat pixels deviate compared to expected value
zmask = fmed != 0
fratio[zmask] = fimage[zmask] / fmed[zmask]
# catch the warnings
with warnings.catch_warnings(record=True) as _:
# if illumination is low, then consider pixel valid for this criterion
fratio[fmed < illum_cut] = 1
# catch the warnings
with warnings.catch_warnings(record=True) as _:
# where do pixels deviate too much
badpix_flat = (np.abs(fratio - 1)) > cut_ratio
# -------------------------------------------------------------------------
# get finite flat pixels
valid_flat = np.isfinite(fimage)
# -------------------------------------------------------------------------
# get finite dark pixels
valid_dark = np.isfinite(dimage)
# -------------------------------------------------------------------------
# select pixels that are hot
badpix_dark[valid_dark] = dimage[valid_dark] > max_hotpix
# -------------------------------------------------------------------------
# construct the bad pixel mask
badpix_map = badpix_flat | badpix_dark | ~valid_flat | ~valid_dark
# -------------------------------------------------------------------------
# log results
badpix_stats = [(np.sum(badpix_dark) / np.array(badpix_dark).size) * 100,
(np.sum(badpix_flat) / np.array(badpix_flat).size) * 100,
(np.sum(~valid_dark) / np.array(valid_dark).size) * 100,
(np.sum(~valid_flat) / np.array(valid_flat).size) * 100,
(np.sum(badpix_map) / np.array(badpix_map).size) * 100]
WLOG(params, '', TextEntry('40-012-00006', args=badpix_stats))
# -------------------------------------------------------------------------
# return bad pixel map
return badpix_map, badpix_stats
def locate_bad_pixels_full(params, image, **kwargs):
"""
Locate the bad pixels identified from the full engineering flat image
(location defined from p['BADPIX_FULL_FLAT'])
:param p: parameter dictionary, ParamDict containing constants
Must contain at least:
IC_IMAGE_TYPE: string, the detector type (this step is only for
H4RG)
LOG_OPT: string, log option, normally the program name
BADPIX_FULL_FLAT: string, the full engineering flat filename
BADPIX_FULL_THRESHOLD: float, the threshold on the engineering
above which the data is good
:param image: numpy array (2D), the image to correct (for size only)
:return newimage: numpy array (2D), the mask of the bad pixels
:return stats: float, the fraction of un-illuminated pixels (percentage)
"""
func_name = __NAME__ + '.locate_bad_pixels_full()'
# log that we are looking for bad pixels
WLOG(params, '', TextEntry('40-012-00002'))
# get parameters from params/kwargs
threshold = pcheck(params, 'BADPIX_FULL_THRESHOLD', 'threshold', kwargs,
func_name)
rotnum = pcheck(params, 'RAW_TO_PP_ROTATION', 'rotnum', kwargs, func_name)
# get full flat
mdata = drs_data.load_full_flat_badpix(params, **kwargs)
# check if the shape of the image and the full flat match
if image.shape != mdata.shape:
eargs = [mdata.shape, image.shape, func_name]
WLOG(params, 'error', TextEntry('09-012-00001', args=eargs))
# apply threshold
mask = np.abs(mp.rot8(mdata, rotnum) - 1) > threshold
# -------------------------------------------------------------------------
# log results
badpix_stats = (np.sum(mask) / np.array(mask).size) * 100
WLOG(params, '', TextEntry('40-012-00004', args=[badpix_stats]))
# return mask
return mask, badpix_stats
def correction(params, image=None, header=None, return_map=False, **kwargs):
"""
Corrects "image" for "BADPIX" using calibDB file (header must contain
value of p['ACQTIME_KEY'] as a keyword) - sets all bad pixels to zeros
:param p: parameter dictionary, ParamDict containing constants
Must contain at least:
calibDB: dictionary, the calibration database dictionary
(if not in "p" we construct it and need "max_time_unix"
max_time_unix: float, the unix time to use as the time of
reference (used only if calibDB is not defined)
log_opt: string, log option, normally the program name
DRS_CALIB_DB: string, the directory that the calibration
files should be saved to/read from
:param image: numpy array (2D), the image
:param header: dictionary, the header dictionary created by
spirouFITS.ReadImage
:param return_map: bool, if True returns bad pixel map else returns
corrected image
:returns: numpy array (2D), the corrected image where all bad pixels are
set to zeros or the bad pixel map (if return_map = True)
"""
func_name = __NAME__ + '.correct_for_baxpix()'
# check for filename in kwargs
filename = kwargs.get('filename', None)
# deal with no header
if header is None:
WLOG(params, 'error', TextEntry('00-012-00002', args=[func_name]))
# deal with no image (when return map is False)
if (not return_map) and (image is None):
WLOG(params, 'error', TextEntry('00-012-00003', args=[func_name]))
# get loco file instance
badinst = core.get_file_definition('BADPIX', params['INSTRUMENT'],
kind='red')
# get calibration key
badkey = badinst.get_dbkey(func=func_name)
# -------------------------------------------------------------------------
# get filename
if filename is not None:
badpixfile = filename
else:
# get calibDB
cdb = drs_database.get_full_database(params, 'calibration')
# get filename col
filecol = cdb.file_col
# get the badpix entries
badpixentries = drs_database.get_key_from_db(params, badkey, cdb,
header, n_ent=1)
# get badpix filename
badpixfilename = badpixentries[filecol][0]
badpixfile = os.path.join(params['DRS_CALIB_DB'], badpixfilename)
# -------------------------------------------------------------------------
# get bad pixel file
badpiximage = drs_fits.readfits(params, badpixfile)
# create mask from badpixmask
mask = np.array(badpiximage, dtype=bool)
# -------------------------------------------------------------------------
# if return map just return the bad pixel map
if return_map:
return badpixfile, mask
# else put NaNs into the image
else:
# log that we are setting background pixels to NaN
WLOG(params, '', TextEntry('40-012-00008', args=[badpixfile]))
# correct image (set bad pixels to zero)
corrected_image = np.array(image)
corrected_image[mask] = np.nan
# finally return corrected_image
return badpixfile, corrected_image
# =============================================================================
# write files and qc functions
# =============================================================================
def quality_control(params):
# set passed variable and fail message list
fail_msg, qc_values, qc_names, qc_logic, qc_pass = [], [], [], [], []
# ------------------------------------------------------------------
# TODO: Needs quality control doing
# add to qc header lists
qc_values.append('None')
qc_names.append('None')
qc_logic.append('None')
qc_pass.append(1)
# ------------------------------------------------------------------
# finally log the failed messages and set QC = 1 if we pass the
# quality control QC = 0 if we fail quality control
if np.sum(qc_pass) == len(qc_pass):
WLOG(params, 'info', TextEntry('40-005-10001'))
passed = 1
else:
for farg in fail_msg:
WLOG(params, 'warning', TextEntry('40-005-10002') + farg)
passed = 0
# store in qc_params
qc_params = [qc_names, qc_values, qc_logic, qc_pass]
# return qc_params and passed
return qc_params, passed
def write_files(params, recipe, flatfile, darkfile, backmap, combine,
rawflatfiles, rawdarkfiles, bstats_a, bstats_b, btotal,
bad_pixel_map1, qc_params):
badpixfile = recipe.outputs['BADPIX'].newcopy(recipe=recipe)
# construct the filename from file instance
badpixfile.construct_filename(params, infile=flatfile)
# ------------------------------------------------------------------
# define header keys for output file
# copy keys from input file
badpixfile.copy_original_keys(flatfile)
# add version
badpixfile.add_hkey('KW_VERSION', value=params['DRS_VERSION'])
# add dates
badpixfile.add_hkey('KW_DRS_DATE', value=params['DRS_DATE'])
badpixfile.add_hkey('KW_DRS_DATE_NOW', value=params['DATE_NOW'])
# add process id
badpixfile.add_hkey('KW_PID', value=params['PID'])
# add output tag
badpixfile.add_hkey('KW_OUTPUT', value=badpixfile.name)
# add input files
if combine:
hfiles1, hfiles2 = rawflatfiles, rawdarkfiles
else:
hfiles1, hfiles2 = [flatfile.basename], [darkfile.basename]
badpixfile.add_hkey_1d('KW_INFILE1', values=hfiles1,
dim1name='flatfile')
badpixfile.add_hkey_1d('KW_INFILE2', values=hfiles2,
dim1name='darkfile')
# add qc parameters
badpixfile.add_qckeys(qc_params)
# add background statistics
badpixfile.add_hkey('KW_BHOT', value=bstats_a[0])
badpixfile.add_hkey('KW_BBFLAT', value=bstats_a[1])
badpixfile.add_hkey('KW_BNDARK', value=bstats_a[2])
badpixfile.add_hkey('KW_BNFLAT', value=bstats_a[3])
badpixfile.add_hkey('KW_BBAD', value=bstats_a[4])
badpixfile.add_hkey('KW_BNILUM', value=bstats_b)
badpixfile.add_hkey('KW_BTOT', value=btotal)
# write to file
bad_pixel_map1 = np.array(bad_pixel_map1, dtype=int)
# copy data
badpixfile.data = bad_pixel_map1
# ------------------------------------------------------------------
# log that we are saving rotated image
WLOG(params, '', TextEntry('40-012-00013', args=[badpixfile.filename]))
# write image to file
badpixfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(badpixfile)
# ----------------------------------------------------------------------
# Save background map file
# ----------------------------------------------------------------------
backmapfile = recipe.outputs['BACKMAP'].newcopy(recipe=recipe)
# construct the filename from file instance
backmapfile.construct_filename(params, infile=flatfile)
# ------------------------------------------------------------------
# define header keys for output file (copy of badpixfile)
backmapfile.copy_hdict(badpixfile)
# add output tag
backmapfile.add_hkey('KW_OUTPUT', value=backmapfile.name)
# write to file
backmap = np.array(backmap, dtype=int)
# copy data
backmapfile.data = backmap
# ------------------------------------------------------------------
# log that we are saving rotated image
WLOG(params, '', TextEntry('40-012-00014', args=[backmapfile.filename]))
# write image to file
backmapfile.write_file()
# add to output files (for indexing)
recipe.add_output_file(backmapfile)
# return output files
return badpixfile, backmapfile
def summary(recipe, it, params, bstats_a, bstats_b, btotal):
# add stats
recipe.plot.add_stat('KW_VERSION', value=params['DRS_VERSION'])
recipe.plot.add_stat('KW_DRS_DATE', value=params['DRS_DATE'])
recipe.plot.add_stat('KW_BHOT', value=bstats_a[0])
recipe.plot.add_stat('KW_BBFLAT', value=bstats_a[1])
recipe.plot.add_stat('KW_BNDARK', value=bstats_a[2])
recipe.plot.add_stat('KW_BNFLAT', value=bstats_a[3])
recipe.plot.add_stat('KW_BBAD', value=bstats_a[4])
recipe.plot.add_stat('KW_BNILUM', value=bstats_b)
recipe.plot.add_stat('KW_BTOT', value=btotal)
# construct summary
recipe.plot.summary_document(it)
# =============================================================================
# Start of code
# =============================================================================
# Main code here
if __name__ == "__main__":
# ----------------------------------------------------------------------
# print 'Hello World!'
print("Hello World!")
# =============================================================================
# End of code
# =============================================================================
| [
"numpy.product",
"scipy.ndimage.filters.median_filter",
"numpy.abs",
"apero.core.math.rot8",
"apero.core.get_file_definition",
"apero.core.constants.load",
"numpy.sort",
"warnings.catch_warnings",
"apero.io.drs_data.load_full_flat_badpix",
"os.path.join",
"apero.core.core.drs_database.get_key_fr... | [((779, 809), 'apero.core.constants.load', 'constants.load', (['__INSTRUMENT__'], {}), '(__INSTRUMENT__)\n', (793, 809), False, 'from apero.core import constants\n'), ((3955, 3975), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (3968, 3975), True, 'import numpy as np\n'), ((8133, 8154), 'numpy.zeros_like', 'np.zeros_like', (['fimage'], {}), '(fimage)\n', (8146, 8154), True, 'import numpy as np\n'), ((8214, 8247), 'numpy.zeros_like', 'np.zeros_like', (['dimage'], {'dtype': 'bool'}), '(dimage, dtype=bool)\n', (8227, 8247), True, 'import numpy as np\n'), ((9570, 9589), 'numpy.isfinite', 'np.isfinite', (['fimage'], {}), '(fimage)\n', (9581, 9589), True, 'import numpy as np\n'), ((9716, 9735), 'numpy.isfinite', 'np.isfinite', (['dimage'], {}), '(dimage)\n', (9727, 9735), True, 'import numpy as np\n'), ((12101, 12149), 'apero.io.drs_data.load_full_flat_badpix', 'drs_data.load_full_flat_badpix', (['params'], {}), '(params, **kwargs)\n', (12131, 12149), False, 'from apero.io import drs_data\n'), ((14468, 14536), 'apero.core.get_file_definition', 'core.get_file_definition', (['"""BADPIX"""', "params['INSTRUMENT']"], {'kind': '"""red"""'}), "('BADPIX', params['INSTRUMENT'], kind='red')\n", (14492, 14536), False, 'from apero import core\n'), ((15420, 15457), 'apero.io.drs_fits.readfits', 'drs_fits.readfits', (['params', 'badpixfile'], {}), '(params, badpixfile)\n', (15437, 15457), False, 'from apero.io import drs_fits\n'), ((15503, 15536), 'numpy.array', 'np.array', (['badpiximage'], {'dtype': 'bool'}), '(badpiximage, dtype=bool)\n', (15511, 15536), True, 'import numpy as np\n'), ((19135, 19170), 'numpy.array', 'np.array', (['bad_pixel_map1'], {'dtype': 'int'}), '(bad_pixel_map1, dtype=int)\n', (19143, 19170), True, 'import numpy as np\n'), ((20201, 20229), 'numpy.array', 'np.array', (['backmap'], {'dtype': 'int'}), '(backmap, dtype=int)\n', (20209, 20229), True, 'import numpy as np\n'), ((4142, 4185), 'scipy.ndimage.filters.median_filter', 'filters.median_filter', (['image[i_it, :]', 'wmed'], {}), '(image[i_it, :], wmed)\n', (4163, 4185), False, 'from scipy.ndimage import filters\n'), ((4417, 4427), 'numpy.sort', 'np.sort', (['v'], {}), '(v)\n', (4424, 4427), True, 'import numpy as np\n'), ((8887, 8931), 'scipy.ndimage.filters.median_filter', 'filters.median_filter', (['dimage[i_it, :]', 'wmed'], {}), '(dimage[i_it, :], wmed)\n', (8908, 8931), False, 'from scipy.ndimage import filters\n'), ((9110, 9146), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (9133, 9146), False, 'import warnings\n'), ((9303, 9339), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (9326, 9339), False, 'import warnings\n'), ((14853, 14906), 'apero.core.core.drs_database.get_full_database', 'drs_database.get_full_database', (['params', '"""calibration"""'], {}), "(params, 'calibration')\n", (14883, 14906), False, 'from apero.core.core import drs_database\n'), ((15022, 15088), 'apero.core.core.drs_database.get_key_from_db', 'drs_database.get_key_from_db', (['params', 'badkey', 'cdb', 'header'], {'n_ent': '(1)'}), '(params, badkey, cdb, header, n_ent=1)\n', (15050, 15088), False, 'from apero.core.core import drs_database\n'), ((15244, 15296), 'os.path.join', 'os.path.join', (["params['DRS_CALIB_DB']", 'badpixfilename'], {}), "(params['DRS_CALIB_DB'], badpixfilename)\n", (15256, 15296), False, 'import os\n'), ((15968, 15983), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (15976, 15983), True, 'import numpy as np\n'), ((16904, 16919), 'numpy.sum', 'np.sum', (['qc_pass'], {}), '(qc_pass)\n', (16910, 16919), True, 'import numpy as np\n'), ((4380, 4403), 'numpy.product', 'np.product', (['image.shape'], {}), '(image.shape)\n', (4390, 4403), True, 'import numpy as np\n'), ((9412, 9430), 'numpy.abs', 'np.abs', (['(fratio - 1)'], {}), '(fratio - 1)\n', (9418, 9430), True, 'import numpy as np\n'), ((12568, 12580), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (12574, 12580), True, 'import numpy as np\n'), ((4303, 4325), 'numpy.isfinite', 'np.isfinite', (['image_med'], {}), '(image_med)\n', (4314, 4325), True, 'import numpy as np\n'), ((10216, 10235), 'numpy.sum', 'np.sum', (['badpix_dark'], {}), '(badpix_dark)\n', (10222, 10235), True, 'import numpy as np\n'), ((10294, 10313), 'numpy.sum', 'np.sum', (['badpix_flat'], {}), '(badpix_flat)\n', (10300, 10313), True, 'import numpy as np\n'), ((10372, 10391), 'numpy.sum', 'np.sum', (['(~valid_dark)'], {}), '(~valid_dark)\n', (10378, 10391), True, 'import numpy as np\n'), ((10449, 10468), 'numpy.sum', 'np.sum', (['(~valid_flat)'], {}), '(~valid_flat)\n', (10455, 10468), True, 'import numpy as np\n'), ((10526, 10544), 'numpy.sum', 'np.sum', (['badpix_map'], {}), '(badpix_map)\n', (10532, 10544), True, 'import numpy as np\n'), ((12410, 12432), 'apero.core.math.rot8', 'mp.rot8', (['mdata', 'rotnum'], {}), '(mdata, rotnum)\n', (12417, 12432), True, 'from apero.core import math as mp\n'), ((12583, 12597), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (12591, 12597), True, 'import numpy as np\n'), ((10238, 10259), 'numpy.array', 'np.array', (['badpix_dark'], {}), '(badpix_dark)\n', (10246, 10259), True, 'import numpy as np\n'), ((10316, 10337), 'numpy.array', 'np.array', (['badpix_flat'], {}), '(badpix_flat)\n', (10324, 10337), True, 'import numpy as np\n'), ((10394, 10414), 'numpy.array', 'np.array', (['valid_dark'], {}), '(valid_dark)\n', (10402, 10414), True, 'import numpy as np\n'), ((10471, 10491), 'numpy.array', 'np.array', (['valid_flat'], {}), '(valid_flat)\n', (10479, 10491), True, 'import numpy as np\n'), ((10547, 10567), 'numpy.array', 'np.array', (['badpix_map'], {}), '(badpix_map)\n', (10555, 10567), True, 'import numpy as np\n'), ((4449, 4472), 'numpy.product', 'np.product', (['image.shape'], {}), '(image.shape)\n', (4459, 4472), True, 'import numpy as np\n')] |
'''
Created on 20 jun. 2018
@author: fmplaza
'''
import os
from keras.datasets.imdb import get_word_index
from model.glove_word_embedings import GloveWordEmbednigs
import pandas as pd
from nltk.tokenize.casual import TweetTokenizer
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, LSTM, Embedding, Bidirectional, Conv1D, GlobalAveragePooling1D, MaxPooling1D, Dropout, Activation, Flatten, GlobalMaxPooling1D, ActivityRegularization
from mpl_toolkits.axes_grid1.axes_size import Padded
from keras.utils import np_utils
from sklearn import metrics
from nltk.tokenize.casual import TweetTokenizer
from keras.preprocessing import sequence
from keras.callbacks import EarlyStopping
from keras.initializers import glorot_normal, glorot_uniform
from keras import regularizers
import random
from tensorflow import set_random_seed
from scipy import stats
import csv
RANDOM_SEED = 666
np.random.seed(RANDOM_SEED)
set_random_seed(RANDOM_SEED)
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
TWEET_TOKENIZER = TweetTokenizer(preserve_case=False, reduce_len=True, strip_handles=False)
CLASSES = []
EMBEDDING_DIM = 214
#load GloVe model
glove = GloveWordEmbednigs()
glove_file = "./embeddings/glove.twitter.27B/glove.twitter.27B.200d.txt"
glove.path_file = glove_file
#Load the Glove vectors file into memory, 3 index reserved (0: paddind, 1: word not present in embedding, 2: magic word)
number_features = 500000
begin_ofset = 3
glove.load(number_features, begin_ofset)
#Load the WASSA corpus
def read_corpus():
classes_append = CLASSES.append
tweets_train_labels_numeric = []
tweets_dev_labels_numeric = []
tweets_train = pd.read_csv('./corpus/train-v2.csv', sep="\t", header=0)
tweets_train_labels = tweets_train['emotion']
tweets_dev = pd.read_csv('./corpus/trial-v2.csv', sep="\t", header=0)
tweets_dev_labels = pd.read_csv('./corpus/trial-v2.labels', sep="\t", header=0)
tweets_dev_labels = tweets_dev_labels['emotion']
#convert categorical labels into numeric labels
for label in tweets_train_labels.tolist():
if(label not in CLASSES):
classes_append(label)
tweets_train_labels_numeric.append(CLASSES.index(label))
for label in tweets_dev_labels.tolist():
tweets_dev_labels_numeric.append(CLASSES.index(label))
tweets_train_labels_numeric = np_utils.to_categorical(tweets_train_labels_numeric)
return tweets_train.tweet, tweets_train_labels_numeric, tweets_dev.tweet, tweets_dev_labels_numeric
def read_lexicon():
#read lexicon emolex
emolex_lexicon = {}
with open('lexicon/emolex_english.csv') as csvfile:
csvresult = csv.reader(csvfile, delimiter='\t')
next(csvresult)
for row in csvresult:
term = row[0]
anger = int(row[1])
disgust = int(row[2])
fear = int(row[3])
joy = int(row[4])
sadness = int(row[5])
surprise = int(row[6])
trust = int(row[7])
anticipation = int(row[8])
list_emotion = [term, anger, disgust, fear, joy, sadness, surprise, trust, anticipation]
if(term not in emolex_lexicon):
list_emotion = [anger, disgust, fear, joy, sadness, surprise, trust, anticipation]
emolex_lexicon[term] = list_emotion
else:
value = emolex_lexicon[term]
value[0]+=anger
value[1]+=disgust
value[2]+=fear
value[3]+=joy
value[4]+=sadness
value[5]+=surprise
value[6]+=trust
value[7]+=anticipation
#read lexicon emojis
emoji_lexicon = {}
with open('lexicon/emojis.txt') as csvfile:
csvresult = csv.reader(csvfile, delimiter=' ')
next(csvresult)
for row in csvresult:
if(row[1] == "anger"):
emoji_lexicon[row[0]] = [1,0,0,0,0,0]
if(row[1] == "disgust"):
emoji_lexicon[row[0]] = [0,1,0,0,0,0]
if(row[1] == "fear"):
emoji_lexicon[row[0]] = [0,0,1,0,0,0]
if(row[1] == "joy"):
emoji_lexicon[row[0]] = [0,0,0,1,0,0]
if(row[1] == "sad"):
emoji_lexicon[row[0]] = [0,0,0,0,1,0]
if(row[1] == "surprise"):
emoji_lexicon[row[0]] = [0,0,0,0,0,1]
return emolex_lexicon, emoji_lexicon
def tokenize(text):
#preprocessing data
text_tokenized = TWEET_TOKENIZER.tokenize(text)
return text_tokenized
def fit_transform_vocabulary(corpus):
#generate vocabulary of corpus
#index 0: padding
#index 1: word not present in the embedding
#index 2: word magic (triggerword)
#corpus_indexes: index of each word of tweet in the embedding model
corpus_indexes = []
corpus_lengths = []
own_append_corpus_lengths = corpus_lengths.append
own_lower = str.lower
for doc in corpus:
tweet_indexes = []
tokens = tokenize(doc)
own_append_corpus_lengths(len(tokens))
for token in tokens:
if(token != "#<PASSWORD>"):
if(glove.is_word(own_lower(token))):
word_index_embedding = glove.get_word_index(own_lower(token))
tweet_indexes.append(word_index_embedding)
else:
index = 1
tweet_indexes.append(index)
else:
index = 2
tweet_indexes.append(index)
corpus_indexes.append(tweet_indexes)
print(np.max(corpus_lengths))
print(np.mean(corpus_lengths))
print(stats.mode(corpus_lengths, axis=0))
return corpus_indexes
def classification_embedings_rnn(tweets_train, tweets_train_labels_numeric, tweets_dev, emolex_lexicon, emoji_lexicon):
#Classification with RNN and embedings (pre-trained)
#calculate vocabulary
corpus_train_index = fit_transform_vocabulary(tweets_train)
corpus_dev_index = fit_transform_vocabulary(tweets_dev)
max_len_input = 27
train_features_pad = sequence.pad_sequences(corpus_train_index, maxlen=max_len_input, padding="post", truncating="post", value = 0)
padded_docs_dev = sequence.pad_sequences(corpus_dev_index, maxlen=max_len_input, padding="post", truncating="post", value = 0)
# define RNN model
model = Sequential()
#assign special index
trigger_word_vector = 2 * 0.1 * np.random.rand(EMBEDDING_DIM) - 1
glove.set_embedding_vector(1, trigger_word_vector)
vector_word_not_present = 2 * 0.1 * np.random.rand(EMBEDDING_DIM) - 1
glove.set_embedding_vector(2, vector_word_not_present)
#number of features in embeddings model
feature_size = number_features + 3
embedding_matrix = np.zeros((feature_size, EMBEDDING_DIM))
for word, idx in glove.word_indexes.items():
emolex_vector = [0,0,0,0,0,0,0,0]
emoji_vector = [0,0,0,0,0,0]
if(word in emolex_lexicon.keys()):
emolex_vector = emolex_lexicon[word]
if(word in emoji_lexicon.keys()):
emoji_vector = emoji_lexicon[word]
list_features_lexicons = emolex_vector + emoji_vector
embedding_vec = glove.get_word_embedding(word)
embedding_vec = np.concatenate((embedding_vec, list_features_lexicons), axis=0)
if embedding_vec is not None and embedding_vec.shape[0]==EMBEDDING_DIM:
embedding_matrix[idx] = np.asarray(embedding_vec)
#input_length: Length of input sequences, when it is constant
e = Embedding(feature_size, EMBEDDING_DIM, input_length=max_len_input, weights=[embedding_matrix], trainable=False)
model.add(e)
#number of features:_32 each vector of 200 dim is converted to a vector of 32 dim
#model.add(LSTM(128, return_sequences=True))
model.add(Bidirectional(LSTM(128, return_sequences=True)))
model.add(Dense(128, activation='relu', kernel_initializer=glorot_uniform(seed=RANDOM_SEED)))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2, strides=1, padding="same"))
model.add(Flatten())
model.add(Dense(64, activation='relu', kernel_initializer=glorot_uniform(seed=RANDOM_SEED)))
model.add(Dropout(0.5))
model.add(Dense(len(CLASSES), activation='softmax'))
model.add(ActivityRegularization(l1=0.0,l2=0.0001))
# summarize the model
print(model.summary())
print("Compiling the model...")
# compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
print("Training the model...")
earlyStopping = EarlyStopping('loss', patience=3, mode='min')
#model.fit(train_features_pad, tweets_train_labels_numeric, batch_size=64, epochs=1, verbose=1, validation_data=(train_features_pad,tweets_train_labels_numeric), callbacks=[earlyStopping])
model.fit(train_features_pad, tweets_train_labels_numeric, batch_size=64, epochs=30, verbose=1, callbacks=[earlyStopping])
loss, accuracy = model.evaluate(train_features_pad, tweets_train_labels_numeric, batch_size=64, verbose=1)
print('Accuracy trainning: %f' % (accuracy*100))
#prediction
tweets_dev_classified_labels = model.predict_classes(padded_docs_dev, batch_size=64, verbose=1)
return tweets_dev_classified_labels
def calculate_quality_performamnce(y_labels, y_classified_labels, model_name):
classes_index = [CLASSES.index(c) for c in CLASSES]
accruacy = metrics.accuracy_score(y_labels, y_classified_labels)
macro_precision = metrics.precision_score(y_labels, y_classified_labels, labels=classes_index, average="macro")
macro_recall = metrics.recall_score(y_labels, y_classified_labels, labels=classes_index, average="macro")
macro_f1 = metrics.f1_score(y_labels, y_classified_labels, labels=classes_index, average="macro")
print("\n*** Results " + model_name + " ***")
print("Macro-Precision: " + str(macro_precision))
print("Macro-Recall: " + str(macro_recall))
print("Macro-F1: " + str(macro_f1))
print("Accuracy: " + str(accruacy))
def main ():
tweets_train, tweets_train_labels_numeric, tweets_dev, tweets_dev_labels = read_corpus()
emolex_lexicon, emoji_lexicon = read_lexicon()
tweets_dev_classified_labels = classification_embedings_rnn(tweets_train, tweets_train_labels_numeric, tweets_dev, emolex_lexicon, emoji_lexicon)
calculate_quality_performamnce(tweets_dev_labels, tweets_dev_classified_labels, "RNN_LSTM")
if __name__ == '__main__':
main()
| [
"numpy.random.rand",
"pandas.read_csv",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"model.glove_word_embedings.GloveWordEmbednigs",
"nltk.tokenize.casual.TweetTokenizer",
"keras.preprocessing.sequence.pad_sequences",
"tensorflow.set_random_seed",
"numpy.mean",
"keras.layers... | [((973, 1000), 'numpy.random.seed', 'np.random.seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (987, 1000), True, 'import numpy as np\n'), ((1001, 1029), 'tensorflow.set_random_seed', 'set_random_seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (1016, 1029), False, 'from tensorflow import set_random_seed\n'), ((1088, 1161), 'nltk.tokenize.casual.TweetTokenizer', 'TweetTokenizer', ([], {'preserve_case': '(False)', 'reduce_len': '(True)', 'strip_handles': '(False)'}), '(preserve_case=False, reduce_len=True, strip_handles=False)\n', (1102, 1161), False, 'from nltk.tokenize.casual import TweetTokenizer\n'), ((1222, 1242), 'model.glove_word_embedings.GloveWordEmbednigs', 'GloveWordEmbednigs', ([], {}), '()\n', (1240, 1242), False, 'from model.glove_word_embedings import GloveWordEmbednigs\n'), ((1730, 1786), 'pandas.read_csv', 'pd.read_csv', (['"""./corpus/train-v2.csv"""'], {'sep': '"""\t"""', 'header': '(0)'}), "('./corpus/train-v2.csv', sep='\\t', header=0)\n", (1741, 1786), True, 'import pandas as pd\n'), ((1854, 1910), 'pandas.read_csv', 'pd.read_csv', (['"""./corpus/trial-v2.csv"""'], {'sep': '"""\t"""', 'header': '(0)'}), "('./corpus/trial-v2.csv', sep='\\t', header=0)\n", (1865, 1910), True, 'import pandas as pd\n'), ((1935, 1994), 'pandas.read_csv', 'pd.read_csv', (['"""./corpus/trial-v2.labels"""'], {'sep': '"""\t"""', 'header': '(0)'}), "('./corpus/trial-v2.labels', sep='\\t', header=0)\n", (1946, 1994), True, 'import pandas as pd\n'), ((2473, 2525), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['tweets_train_labels_numeric'], {}), '(tweets_train_labels_numeric)\n', (2496, 2525), False, 'from keras.utils import np_utils\n'), ((6425, 6538), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['corpus_train_index'], {'maxlen': 'max_len_input', 'padding': '"""post"""', 'truncating': '"""post"""', 'value': '(0)'}), "(corpus_train_index, maxlen=max_len_input, padding=\n 'post', truncating='post', value=0)\n", (6447, 6538), False, 'from keras.preprocessing import sequence\n'), ((6558, 6669), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['corpus_dev_index'], {'maxlen': 'max_len_input', 'padding': '"""post"""', 'truncating': '"""post"""', 'value': '(0)'}), "(corpus_dev_index, maxlen=max_len_input, padding=\n 'post', truncating='post', value=0)\n", (6580, 6669), False, 'from keras.preprocessing import sequence\n'), ((6703, 6715), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6713, 6715), False, 'from keras.models import Sequential\n'), ((7135, 7174), 'numpy.zeros', 'np.zeros', (['(feature_size, EMBEDDING_DIM)'], {}), '((feature_size, EMBEDDING_DIM))\n', (7143, 7174), True, 'import numpy as np\n'), ((7917, 8033), 'keras.layers.Embedding', 'Embedding', (['feature_size', 'EMBEDDING_DIM'], {'input_length': 'max_len_input', 'weights': '[embedding_matrix]', 'trainable': '(False)'}), '(feature_size, EMBEDDING_DIM, input_length=max_len_input, weights=\n [embedding_matrix], trainable=False)\n', (7926, 8033), False, 'from keras.layers import Dense, LSTM, Embedding, Bidirectional, Conv1D, GlobalAveragePooling1D, MaxPooling1D, Dropout, Activation, Flatten, GlobalMaxPooling1D, ActivityRegularization\n'), ((8971, 9016), 'keras.callbacks.EarlyStopping', 'EarlyStopping', (['"""loss"""'], {'patience': '(3)', 'mode': '"""min"""'}), "('loss', patience=3, mode='min')\n", (8984, 9016), False, 'from keras.callbacks import EarlyStopping\n'), ((9827, 9880), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_labels', 'y_classified_labels'], {}), '(y_labels, y_classified_labels)\n', (9849, 9880), False, 'from sklearn import metrics\n'), ((9903, 10000), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['y_labels', 'y_classified_labels'], {'labels': 'classes_index', 'average': '"""macro"""'}), "(y_labels, y_classified_labels, labels=classes_index,\n average='macro')\n", (9926, 10000), False, 'from sklearn import metrics\n'), ((10016, 10110), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['y_labels', 'y_classified_labels'], {'labels': 'classes_index', 'average': '"""macro"""'}), "(y_labels, y_classified_labels, labels=classes_index,\n average='macro')\n", (10036, 10110), False, 'from sklearn import metrics\n'), ((10122, 10212), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['y_labels', 'y_classified_labels'], {'labels': 'classes_index', 'average': '"""macro"""'}), "(y_labels, y_classified_labels, labels=classes_index,\n average='macro')\n", (10138, 10212), False, 'from sklearn import metrics\n'), ((2791, 2826), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '"""\t"""'}), "(csvfile, delimiter='\\t')\n", (2801, 2826), False, 'import csv\n'), ((3946, 3980), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""" """'}), "(csvfile, delimiter=' ')\n", (3956, 3980), False, 'import csv\n'), ((5886, 5908), 'numpy.max', 'np.max', (['corpus_lengths'], {}), '(corpus_lengths)\n', (5892, 5908), True, 'import numpy as np\n'), ((5920, 5943), 'numpy.mean', 'np.mean', (['corpus_lengths'], {}), '(corpus_lengths)\n', (5927, 5943), True, 'import numpy as np\n'), ((5955, 5989), 'scipy.stats.mode', 'stats.mode', (['corpus_lengths'], {'axis': '(0)'}), '(corpus_lengths, axis=0)\n', (5965, 5989), False, 'from scipy import stats\n'), ((7630, 7693), 'numpy.concatenate', 'np.concatenate', (['(embedding_vec, list_features_lexicons)'], {'axis': '(0)'}), '((embedding_vec, list_features_lexicons), axis=0)\n', (7644, 7693), True, 'import numpy as np\n'), ((8361, 8373), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8368, 8373), False, 'from keras.layers import Dense, LSTM, Embedding, Bidirectional, Conv1D, GlobalAveragePooling1D, MaxPooling1D, Dropout, Activation, Flatten, GlobalMaxPooling1D, ActivityRegularization\n'), ((8389, 8441), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)', 'strides': '(1)', 'padding': '"""same"""'}), "(pool_size=2, strides=1, padding='same')\n", (8401, 8441), False, 'from keras.layers import Dense, LSTM, Embedding, Bidirectional, Conv1D, GlobalAveragePooling1D, MaxPooling1D, Dropout, Activation, Flatten, GlobalMaxPooling1D, ActivityRegularization\n'), ((8457, 8466), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8464, 8466), False, 'from keras.layers import Dense, LSTM, Embedding, Bidirectional, Conv1D, GlobalAveragePooling1D, MaxPooling1D, Dropout, Activation, Flatten, GlobalMaxPooling1D, ActivityRegularization\n'), ((8579, 8591), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8586, 8591), False, 'from keras.layers import Dense, LSTM, Embedding, Bidirectional, Conv1D, GlobalAveragePooling1D, MaxPooling1D, Dropout, Activation, Flatten, GlobalMaxPooling1D, ActivityRegularization\n'), ((8664, 8705), 'keras.layers.ActivityRegularization', 'ActivityRegularization', ([], {'l1': '(0.0)', 'l2': '(0.0001)'}), '(l1=0.0, l2=0.0001)\n', (8686, 8705), False, 'from keras.layers import Dense, LSTM, Embedding, Bidirectional, Conv1D, GlobalAveragePooling1D, MaxPooling1D, Dropout, Activation, Flatten, GlobalMaxPooling1D, ActivityRegularization\n'), ((6783, 6812), 'numpy.random.rand', 'np.random.rand', (['EMBEDDING_DIM'], {}), '(EMBEDDING_DIM)\n', (6797, 6812), True, 'import numpy as np\n'), ((6917, 6946), 'numpy.random.rand', 'np.random.rand', (['EMBEDDING_DIM'], {}), '(EMBEDDING_DIM)\n', (6931, 6946), True, 'import numpy as np\n'), ((7811, 7836), 'numpy.asarray', 'np.asarray', (['embedding_vec'], {}), '(embedding_vec)\n', (7821, 7836), True, 'import numpy as np\n'), ((8214, 8246), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'return_sequences': '(True)'}), '(128, return_sequences=True)\n', (8218, 8246), False, 'from keras.layers import Dense, LSTM, Embedding, Bidirectional, Conv1D, GlobalAveragePooling1D, MaxPooling1D, Dropout, Activation, Flatten, GlobalMaxPooling1D, ActivityRegularization\n'), ((8312, 8344), 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': 'RANDOM_SEED'}), '(seed=RANDOM_SEED)\n', (8326, 8344), False, 'from keras.initializers import glorot_normal, glorot_uniform\n'), ((8530, 8562), 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': 'RANDOM_SEED'}), '(seed=RANDOM_SEED)\n', (8544, 8562), False, 'from keras.initializers import glorot_normal, glorot_uniform\n')] |
"""
MIT License
Copyright (c) [2016] [<NAME>]
Software = Python Scripts in the [Imundbo Quant v1.9] series
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
IMUNDBO QUANT v1.9 (Gridsearch script)
"""
from sklearn.ensemble import RandomForestClassifier
import os
import numpy as np
import pandas as pd
import time
import random
import traceback
from config.IQConfig import IQConfig
from gui.console import Console
from metrics.Timer import Timer
c = Console(
""" ____ _ _ _ _ _
/ ___|_ __ ___ ___ ___ __ ____ _| (_) __| | __ _| |_(_) ___ _ __
| | | '__/ _ \/ __/ __| \ \ / / _` | | |/ _` |/ _` | __| |/ _ \| '_ \
| |___| | | (_) \__ \__ \ \ V / (_| | | | (_| | (_| | |_| | (_) | | | |
\____|_| \___/|___/___/ \_/ \__,_|_|_|\__,_|\__,_|\__|_|\___/|_| |_|
""")
config = IQConfig()
TrainLocation = config.crossValidation.getTrainingFilePath()
fileSize = os.path.getsize(TrainLocation) / 1024. / 1024.
print ('Reading {0:.2f}MB of training data from {1}...'.format(fileSize, TrainLocation))
trainData = pd.read_excel(TrainLocation, parse_dates=['_DateStamp'])
print("Dropping rows containing, Inf, -Inf, and NaN...")
trainData.replace([np.inf, -np.inf], np.nan)
trainData.dropNA()
# ============ REMOVE ALL BUT 1000 ROWS TO SPEED UP TESTING
#newNumRows = 10000
#print("REMOVING ALL BUT {} ROWS (from {}) FOR TRAINING!".format(newNumRows, len(trainData)))
#trainData = trainData.head(newNumRows)
# ==== END REMOVE =====
#print(trainData)
c.timer.print_elapsed("Reading of training data complete")
#print ("\nData types of columns (should normally show floats, ints and one date column only):")
#print(trainData.dtypes)
_featureToCheck = "Slump"
#_featureToCheck = "_Date"
#_featureToCheck = "_Diff_CtoL"
#_featureToCheck = "_Diff_CtoH"
#_featureToCheck = "_Low34_L"
#_featureToCheck = "_PastSCH05to34"
#_featureToCheck = "_DiffU34_L3"
#_featureToCheck = "_SMA3_C"
#_featureToCheck = "_SMA89vs144"
#_featureToCheck = "_DiffD34_C"
#_featureToCheck = "_Diff_CtoO"
#_featureToCheck = "_Diff_CtoC3"
#_featureToCheck = "_PastSCH13to34"
#_featureToCheck = "_SMA13_C"
#_featureToCheck = "_Low55_L"
#_featureToCheck = "_Low10_L"
#_featureToCheck = "_sign5Stoch144"
#_featureToCheck = "_PastSCH05to13"
#_featureToCheck = "_diffStochSign144"
#_featureToCheck = "_SMA55vs89"
#_featureToCheck = "_DiffD3_C"
#_featureToCheck = "Diff_RL3_RL13"
_CV = 3 #USE 60 to slit in to seperate 3M periods or 180 to 1M periods
_fileNameOfResults = os.path.join(config.crossValidation.getTrainingFolder(), 'IQ19p_' + str(_CV) + _featureToCheck + '.txt') # put in path and filename for results
print("Sorting training data by {0}...".format(_featureToCheck))
trainData = trainData.sort_values(by=[_featureToCheck], ascending=False)
#print(trainData)
c.timer.print_elapsed("Sorting complete")
numIterations = config.crossValidation.numIterations
for countX in range(1, numIterations+1):
iterationTimer = Timer()
print("\n\nStarting iteration {0} / {1}".format(countX, numIterations))
#units = 16
#max_feat_Rand = 12
time.sleep(1)
####---------------------------------------------------------------------------
#### This part optimized 2016-10-10
try:
units = random.randrange(17,25,1)
#unitsHalf = units/2
min_samples_leaf_Rand = 50 #random.randrange(20, 200, 1)# [100],
#_delare2 = 0.5
_delare1 = random.randrange(618, 850, 1)
_delare2 = round(_delare1/1000.0001,8)
max_feat_Rand = int(round(units * _delare2,0))
max_leaf_nodes_Rand = 10000 #random.randrange(10000, 12000, 1)# [10000 rätt],
min_samples_split_Rand = 150 #random.randrange(30, 300, 1)# [150 rätt],
max_depth_Rand = 50 #random.randrange(30, 300, 1)#[150 rätt]
n_estimators_Rand = 50 #random.randrange(100, 250, 1)# [150 rätt]
FEATURES = (random.sample([
'_STD377_C',
'_Diff_CtoL',
'_SMA89vs144',
'_sign5Stoch144',
'_SMA13vs144',
'_Diff_CtoC6',
'_Low233_L',
'_Diff_CtoC1',
'_Diff_CtoH',
'_PastSCH05to13',
'_BBU13',
'_DiffU34_L3',
'_diffStochSign144',
'_stoch55Level',
'_SMA21vs144',
'_Diff_CtoL9',
'_STD13sign',
'_DiffU233_L3',
'_PastSCH08to21',
'_Perc100_L20',
'_BBD300',
'Diff_RL13_RL100',
'_DiffD34_C',
'_SMA13_C',
'_EvNo60',
'_stoch377',
'_Perc34_L20',
'_stoch233Level',
'_Perc3_H80',
'_PastSCH13to21',
'Diff_C_RL5',
'_DiffU3_C',
'Diff_RL89_RL200',
'_Perc3_H',
'Diff_RL89_RL100',
'_Diff_CtoH6',
'_PastSCH05to34',
'_Low9_L',
'_DiffD377_H3',
'Diff_C_RL3',
'_Perc8_H',
'_Perc34_L',
'_Perc13_H',
'_High9_H',
'_Perc233_M50',
'Diff_RL8_RL55',
'_stoch21Level',
'_SMA34vs144',
'Diff_C_RL89',
'_Low55_L',
'_stoch377Level',
'Diff_C_RL13',
'Diff_RL3_RL13',
'_BBU55',
'_STD8_C',
'_High6_H',
'_Perc100_H80',
'_diffStochSign55',
'_High17_H',
'Diff_RL200_RL233',
'_STD100sign',
'_Perc21_L',
'_Diff_CtoO',
'RL144',
'_PastSCH08to34',
'_Diff_CtoH24',
'_SMA5vs8',
'_SMA3_C',
'_stoch233',
'_SMA21_C',
'_EvNo900',
'_STD3sign',
'_DiffU21_L3',
'_Low8_L',
'_Low89_L',
'_Diff_CtoO7',
'_Perc200_M50',
'_SMA233vs377',
'_Perc377_L20',
'_BBD34',
'_DiffU200_C',
'_DiffD3_C',
'_STD144sign',
'_Diff_CtoH20',
'_DiffD21_H3',
'_Low10_L',
'_BBU5',
'Diff_RL55_RL200',
'_High7_H',
'_Perc89_M50',
'_BBU34',
'Diff_RL89_RL144',
'_STD5sign',
'_PastSCH13to34',
'_Diff_CtoH1',
'_Perc377_L',
'_Diff_CtoL11',
'Diff_RL3_RL5',
'Diff_C_RL8',
'_SMA55vs89',
'Diff_RL200_RL377',
'_SMA3vs5',
'_Low13_L',
'_High12_H',
'_BBU300',
'_DiffU3_L3',
'_Low34_L',
'_High21_H',
'_diffStochSign300',
'_stoch8',
'_stoch200Level',
'_DiffU300_C',
'_DiffD89_H3',
'_Low15_L',
'_EvNo3000',
'_BBD3',
'RL233',
'_DiffD5_C',
'_SMA55vs233',
'_diffStochSign200',
'RL5',
'Diff_RL5_RL34',
'_STD200sign',
'_SMA5vs21',
'_Perc8_M50',
'_Diff_CtoL5',
'_Diff_CtoC3',
'Diff_RL55_RL144',
'_diffStochSign8',
'_Perc100_H',
'Diff_RL100_RL200',
'Diff_RL5_RL21',
'_STD55vsSign',
'_Perc3_L',
'_DiffD89_C',
'_STD5vsSign',
'_STD300_C',
'Diff_C_RL233',
'_DiffD144_H3',
'_STD21vsSign',
'_SMA3vs8',
'_Perc55_M50',
'RL13',
'_Perc55_H'
], units))
####---------------------------------------------------------------------------
_Horizont = ''.join(random.sample([
'Tgt_SCH05to08',
'Tgt_SCH05to13',
'Tgt_SCH05to21',
'Tgt_SCH05to34',
'Tgt_SCH08to13',
'Tgt_SCH08to21',
'Tgt_SCH08to34',
'Tgt_SCH13to21',
'Tgt_SCH13to34',
'Tgt_SCH21to34'
],1))
_Horizontxxx = ''.join(random.sample([
'Tgt_SCH05to08'
],1))
X = np.array(trainData[FEATURES].values) # making a np array from the pd dataset
y = trainData[_Horizont].values # put in relevant target class
logreg = RandomForestClassifier(n_estimators = n_estimators_Rand,
max_depth = max_depth_Rand,
warm_start='False',
max_features=max_feat_Rand,
min_samples_leaf=min_samples_leaf_Rand,
bootstrap='True',
max_leaf_nodes=max_leaf_nodes_Rand,
min_samples_split=min_samples_split_Rand,
random_state=42,
n_jobs=-1)
except Exception as e:
print("Error setting up initial RandomForestClassifier")
traceback.print_exc()
#pass
try:
from sklearn.model_selection import cross_val_score
scores = cross_val_score(logreg, X, y, cv=_CV)
#print(scores)
_minScore = round(np.amin(scores),6)
_maxScore = round(np.amax(scores),6)
_meanScore = round(np.mean(scores),6)
_stdScore = round(np.std(scores),6)
_SharpMin = round(_minScore/_stdScore,6)
_SharpMean = round(_meanScore/_stdScore,6)
c.timer.print_elapsed("Min score {0}".format(_minScore))
appendFile = open(_fileNameOfResults, 'a') # put in path and filename for results
appendFile.write('\n' + str(_minScore)+
str(',Time:,') +
str(iterationTimer.elapsed()) +
str(',CV:,') +
str(_CV) +
str(',Sort:,') +
str(_featureToCheck) +
str(',_maxScore:,') +
str(_maxScore) +
str(',_meanScore:,') +
str(_meanScore) +
str(',_stdScore:,') +
str(_stdScore) +
str(',_SharpMin:,') +
str(_SharpMin) +
str(',_SharpMean:,') +
str(_SharpMean) +
str(',_Target:,') +
str(_Horizont) +
str(',No Features:,') +
str(units) +
str(',Min Leaf:,') +
str(min_samples_leaf_Rand) +
str(',Max Feat:,') +
str(max_feat_Rand ) +
str(',Leafs Nodes:,') +
str(max_leaf_nodes_Rand) +
str(',Sample Split:,') +
str(min_samples_split_Rand) +
str(',Depth of the tree:,') +
str(max_depth_Rand) +
str(',No of trees:,') +
str(n_estimators_Rand) +
str(',Features: ,') +
str(FEATURES))
print("Appended row to {0}".format(_fileNameOfResults))
appendFile.close()
FEATURES = []
except Exception as e:
print("Error during iteration {0}".format(countX))
traceback.print_exc()
iterationTimer.print_elapsed("Completed iteration {0}".format(countX), False)
c.timer.print_elapsed("Total elapsed")
print("\n\n =================================")
c.timer.print_elapsed("\n\nCompleted processing after {0} iterations".format(numIterations))
print("Min score: {0:.4f}".format(_minScore))
print("CV: {0:.4f}".format(_CV))
print("Sort: {0}".format(_featureToCheck))
print("Max score: {0:.4f}".format(_meanScore))
print("Std score: {0:.4f}".format(_stdScore))
print("Sharp min: {0:.4f}".format(_SharpMin))
print("Sharp mean: {0:.4f}".format(_SharpMean))
print("Num feats: {0}".format(units))
print("Min leaf: {0}".format(min_samples_leaf_Rand))
print("Max feat: {0}".format(max_feat_Rand))
print("Leaf nodes: {0}".format(max_leaf_nodes_Rand))
print("Depth of tree: {0}".format(max_depth_Rand))
print("Num trees: {0}".format(n_estimators_Rand))
| [
"numpy.mean",
"os.path.getsize",
"random.sample",
"numpy.amin",
"random.randrange",
"numpy.std",
"sklearn.ensemble.RandomForestClassifier",
"time.sleep",
"metrics.Timer.Timer",
"config.IQConfig.IQConfig",
"numpy.array",
"pandas.read_excel",
"gui.console.Console",
"traceback.print_exc",
"... | [((1427, 1849), 'gui.console.Console', 'Console', (['""" ____ _ _ _ _ _ \n / ___|_ __ ___ ___ ___ __ ____ _| (_) __| | __ _| |_(_) ___ _ __ \n | | | \'__/ _ \\\\/ __/ __| \\\\ \\\\ / / _` | | |/ _` |/ _` | __| |/ _ \\\\| \'_ \\\\ \n | |___| | | (_) \\\\__ \\\\__ \\\\ \\\\ V / (_| | | | (_| | (_| | |_| | (_) | | | | \n \\\\____|_| \\\\___/|___/___/ \\\\_/ \\\\__,_|_|_|\\\\__,_|\\\\__,_|\\\\__|_|\\\\___/|_| |_| \n"""'], {}), '(\n """ ____ _ _ _ _ _ \n / ___|_ __ ___ ___ ___ __ ____ _| (_) __| | __ _| |_(_) ___ _ __ \n | | | \'__/ _ \\\\/ __/ __| \\\\ \\\\ / / _` | | |/ _` |/ _` | __| |/ _ \\\\| \'_ \\\\ \n | |___| | | (_) \\\\__ \\\\__ \\\\ \\\\ V / (_| | | | (_| | (_| | |_| | (_) | | | | \n \\\\____|_| \\\\___/|___/___/ \\\\_/ \\\\__,_|_|_|\\\\__,_|\\\\__,_|\\\\__|_|\\\\___/|_| |_| \n"""\n )\n', (1434, 1849), False, 'from gui.console import Console\n'), ((1835, 1845), 'config.IQConfig.IQConfig', 'IQConfig', ([], {}), '()\n', (1843, 1845), False, 'from config.IQConfig import IQConfig\n'), ((2074, 2130), 'pandas.read_excel', 'pd.read_excel', (['TrainLocation'], {'parse_dates': "['_DateStamp']"}), "(TrainLocation, parse_dates=['_DateStamp'])\n", (2087, 2130), True, 'import pandas as pd\n'), ((3970, 3977), 'metrics.Timer.Timer', 'Timer', ([], {}), '()\n', (3975, 3977), False, 'from metrics.Timer import Timer\n'), ((4101, 4114), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (4111, 4114), False, 'import time\n'), ((1923, 1953), 'os.path.getsize', 'os.path.getsize', (['TrainLocation'], {}), '(TrainLocation)\n', (1938, 1953), False, 'import os\n'), ((4267, 4294), 'random.randrange', 'random.randrange', (['(17)', '(25)', '(1)'], {}), '(17, 25, 1)\n', (4283, 4294), False, 'import random\n'), ((4447, 4476), 'random.randrange', 'random.randrange', (['(618)', '(850)', '(1)'], {}), '(618, 850, 1)\n', (4463, 4476), False, 'import random\n'), ((4919, 7155), 'random.sample', 'random.sample', (["['_STD377_C', '_Diff_CtoL', '_SMA89vs144', '_sign5Stoch144', '_SMA13vs144',\n '_Diff_CtoC6', '_Low233_L', '_Diff_CtoC1', '_Diff_CtoH',\n '_PastSCH05to13', '_BBU13', '_DiffU34_L3', '_diffStochSign144',\n '_stoch55Level', '_SMA21vs144', '_Diff_CtoL9', '_STD13sign',\n '_DiffU233_L3', '_PastSCH08to21', '_Perc100_L20', '_BBD300',\n 'Diff_RL13_RL100', '_DiffD34_C', '_SMA13_C', '_EvNo60', '_stoch377',\n '_Perc34_L20', '_stoch233Level', '_Perc3_H80', '_PastSCH13to21',\n 'Diff_C_RL5', '_DiffU3_C', 'Diff_RL89_RL200', '_Perc3_H',\n 'Diff_RL89_RL100', '_Diff_CtoH6', '_PastSCH05to34', '_Low9_L',\n '_DiffD377_H3', 'Diff_C_RL3', '_Perc8_H', '_Perc34_L', '_Perc13_H',\n '_High9_H', '_Perc233_M50', 'Diff_RL8_RL55', '_stoch21Level',\n '_SMA34vs144', 'Diff_C_RL89', '_Low55_L', '_stoch377Level',\n 'Diff_C_RL13', 'Diff_RL3_RL13', '_BBU55', '_STD8_C', '_High6_H',\n '_Perc100_H80', '_diffStochSign55', '_High17_H', 'Diff_RL200_RL233',\n '_STD100sign', '_Perc21_L', '_Diff_CtoO', 'RL144', '_PastSCH08to34',\n '_Diff_CtoH24', '_SMA5vs8', '_SMA3_C', '_stoch233', '_SMA21_C',\n '_EvNo900', '_STD3sign', '_DiffU21_L3', '_Low8_L', '_Low89_L',\n '_Diff_CtoO7', '_Perc200_M50', '_SMA233vs377', '_Perc377_L20', '_BBD34',\n '_DiffU200_C', '_DiffD3_C', '_STD144sign', '_Diff_CtoH20',\n '_DiffD21_H3', '_Low10_L', '_BBU5', 'Diff_RL55_RL200', '_High7_H',\n '_Perc89_M50', '_BBU34', 'Diff_RL89_RL144', '_STD5sign',\n '_PastSCH13to34', '_Diff_CtoH1', '_Perc377_L', '_Diff_CtoL11',\n 'Diff_RL3_RL5', 'Diff_C_RL8', '_SMA55vs89', 'Diff_RL200_RL377',\n '_SMA3vs5', '_Low13_L', '_High12_H', '_BBU300', '_DiffU3_L3',\n '_Low34_L', '_High21_H', '_diffStochSign300', '_stoch8',\n '_stoch200Level', '_DiffU300_C', '_DiffD89_H3', '_Low15_L', '_EvNo3000',\n '_BBD3', 'RL233', '_DiffD5_C', '_SMA55vs233', '_diffStochSign200',\n 'RL5', 'Diff_RL5_RL34', '_STD200sign', '_SMA5vs21', '_Perc8_M50',\n '_Diff_CtoL5', '_Diff_CtoC3', 'Diff_RL55_RL144', '_diffStochSign8',\n '_Perc100_H', 'Diff_RL100_RL200', 'Diff_RL5_RL21', '_STD55vsSign',\n '_Perc3_L', '_DiffD89_C', '_STD5vsSign', '_STD300_C', 'Diff_C_RL233',\n '_DiffD144_H3', '_STD21vsSign', '_SMA3vs8', '_Perc55_M50', 'RL13',\n '_Perc55_H']", 'units'], {}), "(['_STD377_C', '_Diff_CtoL', '_SMA89vs144', '_sign5Stoch144',\n '_SMA13vs144', '_Diff_CtoC6', '_Low233_L', '_Diff_CtoC1', '_Diff_CtoH',\n '_PastSCH05to13', '_BBU13', '_DiffU34_L3', '_diffStochSign144',\n '_stoch55Level', '_SMA21vs144', '_Diff_CtoL9', '_STD13sign',\n '_DiffU233_L3', '_PastSCH08to21', '_Perc100_L20', '_BBD300',\n 'Diff_RL13_RL100', '_DiffD34_C', '_SMA13_C', '_EvNo60', '_stoch377',\n '_Perc34_L20', '_stoch233Level', '_Perc3_H80', '_PastSCH13to21',\n 'Diff_C_RL5', '_DiffU3_C', 'Diff_RL89_RL200', '_Perc3_H',\n 'Diff_RL89_RL100', '_Diff_CtoH6', '_PastSCH05to34', '_Low9_L',\n '_DiffD377_H3', 'Diff_C_RL3', '_Perc8_H', '_Perc34_L', '_Perc13_H',\n '_High9_H', '_Perc233_M50', 'Diff_RL8_RL55', '_stoch21Level',\n '_SMA34vs144', 'Diff_C_RL89', '_Low55_L', '_stoch377Level',\n 'Diff_C_RL13', 'Diff_RL3_RL13', '_BBU55', '_STD8_C', '_High6_H',\n '_Perc100_H80', '_diffStochSign55', '_High17_H', 'Diff_RL200_RL233',\n '_STD100sign', '_Perc21_L', '_Diff_CtoO', 'RL144', '_PastSCH08to34',\n '_Diff_CtoH24', '_SMA5vs8', '_SMA3_C', '_stoch233', '_SMA21_C',\n '_EvNo900', '_STD3sign', '_DiffU21_L3', '_Low8_L', '_Low89_L',\n '_Diff_CtoO7', '_Perc200_M50', '_SMA233vs377', '_Perc377_L20', '_BBD34',\n '_DiffU200_C', '_DiffD3_C', '_STD144sign', '_Diff_CtoH20',\n '_DiffD21_H3', '_Low10_L', '_BBU5', 'Diff_RL55_RL200', '_High7_H',\n '_Perc89_M50', '_BBU34', 'Diff_RL89_RL144', '_STD5sign',\n '_PastSCH13to34', '_Diff_CtoH1', '_Perc377_L', '_Diff_CtoL11',\n 'Diff_RL3_RL5', 'Diff_C_RL8', '_SMA55vs89', 'Diff_RL200_RL377',\n '_SMA3vs5', '_Low13_L', '_High12_H', '_BBU300', '_DiffU3_L3',\n '_Low34_L', '_High21_H', '_diffStochSign300', '_stoch8',\n '_stoch200Level', '_DiffU300_C', '_DiffD89_H3', '_Low15_L', '_EvNo3000',\n '_BBD3', 'RL233', '_DiffD5_C', '_SMA55vs233', '_diffStochSign200',\n 'RL5', 'Diff_RL5_RL34', '_STD200sign', '_SMA5vs21', '_Perc8_M50',\n '_Diff_CtoL5', '_Diff_CtoC3', 'Diff_RL55_RL144', '_diffStochSign8',\n '_Perc100_H', 'Diff_RL100_RL200', 'Diff_RL5_RL21', '_STD55vsSign',\n '_Perc3_L', '_DiffD89_C', '_STD5vsSign', '_STD300_C', 'Diff_C_RL233',\n '_DiffD144_H3', '_STD21vsSign', '_SMA3vs8', '_Perc55_M50', 'RL13',\n '_Perc55_H'], units)\n", (4932, 7155), False, 'import random\n'), ((12677, 12713), 'numpy.array', 'np.array', (['trainData[FEATURES].values'], {}), '(trainData[FEATURES].values)\n', (12685, 12713), True, 'import numpy as np\n'), ((12865, 13175), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_estimators_Rand', 'max_depth': 'max_depth_Rand', 'warm_start': '"""False"""', 'max_features': 'max_feat_Rand', 'min_samples_leaf': 'min_samples_leaf_Rand', 'bootstrap': '"""True"""', 'max_leaf_nodes': 'max_leaf_nodes_Rand', 'min_samples_split': 'min_samples_split_Rand', 'random_state': '(42)', 'n_jobs': '(-1)'}), "(n_estimators=n_estimators_Rand, max_depth=\n max_depth_Rand, warm_start='False', max_features=max_feat_Rand,\n min_samples_leaf=min_samples_leaf_Rand, bootstrap='True',\n max_leaf_nodes=max_leaf_nodes_Rand, min_samples_split=\n min_samples_split_Rand, random_state=42, n_jobs=-1)\n", (12887, 13175), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((13763, 13800), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['logreg', 'X', 'y'], {'cv': '_CV'}), '(logreg, X, y, cv=_CV)\n', (13778, 13800), False, 'from sklearn.model_selection import cross_val_score\n'), ((11822, 12018), 'random.sample', 'random.sample', (["['Tgt_SCH05to08', 'Tgt_SCH05to13', 'Tgt_SCH05to21', 'Tgt_SCH05to34',\n 'Tgt_SCH08to13', 'Tgt_SCH08to21', 'Tgt_SCH08to34', 'Tgt_SCH13to21',\n 'Tgt_SCH13to34', 'Tgt_SCH21to34']", '(1)'], {}), "(['Tgt_SCH05to08', 'Tgt_SCH05to13', 'Tgt_SCH05to21',\n 'Tgt_SCH05to34', 'Tgt_SCH08to13', 'Tgt_SCH08to21', 'Tgt_SCH08to34',\n 'Tgt_SCH13to21', 'Tgt_SCH13to34', 'Tgt_SCH21to34'], 1)\n", (11835, 12018), False, 'import random\n'), ((12487, 12522), 'random.sample', 'random.sample', (["['Tgt_SCH05to08']", '(1)'], {}), "(['Tgt_SCH05to08'], 1)\n", (12500, 12522), False, 'import random\n'), ((13623, 13644), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (13642, 13644), False, 'import traceback\n'), ((13860, 13875), 'numpy.amin', 'np.amin', (['scores'], {}), '(scores)\n', (13867, 13875), True, 'import numpy as np\n'), ((13905, 13920), 'numpy.amax', 'np.amax', (['scores'], {}), '(scores)\n', (13912, 13920), True, 'import numpy as np\n'), ((13951, 13966), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (13958, 13966), True, 'import numpy as np\n'), ((13996, 14010), 'numpy.std', 'np.std', (['scores'], {}), '(scores)\n', (14002, 14010), True, 'import numpy as np\n'), ((16249, 16270), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (16268, 16270), False, 'import traceback\n')] |
import pandas as pd
import numpy as np
import nltk
import collections
import pickle
import pyhanlp
from collections import Iterable
from collections import Counter
from pandas import DataFrame
from sklearn.decomposition import PCA
from pyhanlp import *
"""
一、加载数据:
1.加载问答对
2.加载预训练的词向量模型
二、问题的向量化:
1.问题分词,得到语料库
2.将词转化为向量
3.将问题由词向量矩阵转化为问题向量
4.将问题向量组合得到问题向量矩阵
三、将结果保存
"""
#数据输入
"""
输入:csv文件的路径
输出:Dataframe, 列名称为Question Answer
"""
def load_data(data_path):
"""
data = pd.read_excel(data_path)
new_data = data.iloc[:, 0:2]
new_data.columns = [["Question", "Answers"]]
new_data = new_data[:-1]"""
data = pd.read_csv(data_path)
return data
#清洗数据(分词, 删除停用词)
"""
输入:str原始问题,停用词,分词器
输出:str[],分好词的列表
"""
def clean(sentence, stop_words, segment):
#words_list = jieba.lcut(sentence, cut_all=True)
clean_words = []
words_list = segment.segment(sentence)
for word in words_list:
if str(word) not in stop_words:
clean_words.append(str(word))
"""
words_list = segment.segment(sentence)
clean_words = [str(word) for word in words_list if str(word) not in stop_words]
"""
return clean_words
#获得输入文本
"""
输入:导入的文件 Dataframe, 对应的column名称, stop_words停用词
输出:list, 文本集
"""
def get_corpus(source, column, stop_words):
HanLP.Config.ShowTermNature = False
newSegment = JClass("com.hankcs.hanlp.tokenizer.NLPTokenizer")
corpus = []
for i in source[column]:
tmp_Q = clean(i, stop_words, newSegment)
corpus.append(tmp_Q)
return corpus
#raw sentence vector
"""
输入:词向量模型, 分好词的句子
输出:句子对应的向量
注意,由于有些词的词频过低, 对于keyerror,这里将以0向量代替
"""
def s2v(model, sentence):
vec = []
for word in sentence:
try:
tmp = model[word]
except KeyError:
tmp = [0 for _ in range(300)]
#print("missing word:%s \n" %(word))
vec.append(tmp)
return vec
#获得问题矩阵
def get_Question_matrix(model, corpus, pattern = 'SIF', SIF_weight = 0.0001):
if pattern == 'AVG':
Q = []
for query in corpus:
tmp_vec = np.array(s2v(model, query))
Q_vec = tmp_vec.mean(axis = 0)
Q.append(Q_vec)
Q_matrix = np.array(Q)
return (Q_matrix, 0, 0)
elif pattern == 'SIF':
Q = []
raw = []
merge = []
weight = []
for i in range(len(corpus)):
merge.extend(corpus[i])
fdist = nltk.probability.FreqDist(merge)
count = 0
for query in corpus:
tmp_vec = np.array(s2v(model, query))
weight_matrix = np.array([[SIF_weight/(SIF_weight + fdist[word]/fdist.N()) for word in query]])
tmp = tmp_vec * weight_matrix.T
Q_vec = tmp.mean(axis = 0)
Q.append(Q_vec)
weight.append(weight_matrix)
raw.append(tmp_vec)
#print(weight[3455])
#print(raw[3455])
Q_matrix = np.array(Q)
#print(Q_matrix[3455])
pca = PCA(n_components = 1)
u = pca.fit_transform(Q_matrix.T)
res = Q_matrix - np.dot(Q_matrix, np.dot(u, u.T))
#print(res[3455])
return (res, fdist, u)
class question_database():
def __init__(self, Q_matrix, fdist, singular_v):
self.Q_matrix = Q_matrix
self.fdist = fdist
self.singular_v = singular_v
def main():
stop_words = {'。', ',', '?', '年', '的', ''}
path = "C:/Users/leo/Desktop/knowledge_quiz"
#model = Word2Vec.load("trained_on_wiki.model")
model = np.load('simplified_model.npy').item()
print("load model successfully")
data = load_data(path + "/DATA/clean.csv")
print("load data successfully")
corpus = get_corpus(data, "Question", stop_words)
print("generate corpus successfully")
Q_matrix, fdist, singular_v = get_Question_matrix(model, corpus, 'SIF')
print("generate question matrix successfully")
#print(Q_matrix)
QD = question_database(Q_matrix, fdist, singular_v)
with open(path+"/DATA/QD.txt", 'wb') as f:
pickle.dump(QD, f, 0)
print("question database saved successfully")
return 0
if __name__ == '__main__':
main()
| [
"pickle.dump",
"pandas.read_csv",
"sklearn.decomposition.PCA",
"nltk.probability.FreqDist",
"numpy.array",
"numpy.dot",
"numpy.load"
] | [((689, 711), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {}), '(data_path)\n', (700, 711), True, 'import pandas as pd\n'), ((2318, 2329), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (2326, 2329), True, 'import numpy as np\n'), ((4228, 4249), 'pickle.dump', 'pickle.dump', (['QD', 'f', '(0)'], {}), '(QD, f, 0)\n', (4239, 4249), False, 'import pickle\n'), ((2558, 2590), 'nltk.probability.FreqDist', 'nltk.probability.FreqDist', (['merge'], {}), '(merge)\n', (2583, 2590), False, 'import nltk\n'), ((3095, 3106), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (3103, 3106), True, 'import numpy as np\n'), ((3154, 3173), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(1)'}), '(n_components=1)\n', (3157, 3173), False, 'from sklearn.decomposition import PCA\n'), ((3695, 3726), 'numpy.load', 'np.load', (['"""simplified_model.npy"""'], {}), "('simplified_model.npy')\n", (3702, 3726), True, 'import numpy as np\n'), ((3262, 3276), 'numpy.dot', 'np.dot', (['u', 'u.T'], {}), '(u, u.T)\n', (3268, 3276), True, 'import numpy as np\n')] |
import numpy as np
from math import sqrt, ceil
import matplotlib.pyplot as plt
E = [4000.0, 5000.0, 6000.0, 7000.0, 8000.0, 9000.0, 10000.0, 11000.0, 12000.0, 13000.0, 14000.0, 15000.0, 16000.0, 17000.0, 18000.0, 19000.0, 20000.0, 21000.0, 22000.0, 23000.0, 24000.0, 25000.0, 26000.0, 27000.0, 28000.0, 29000.0, 30000.0]
u_E = [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
v_mid = [1967.507563683817, 2754.1121967433296, 2860.155389711077, 3303.8327070484092, 3770.8463319490797, 4211.488224983781, 4670.49747090135, 5122.67095943773, 5590.896451460232, 6054.076862913025, 6513.368518488584, 6974.155656164995, 7447.4068462055475, 7919.285573840541, 8406.96122868986, 8923.404248465697, 9438.595897897289, 9899.36337995089, 10446.310238334934, 10974.36184263928, 11484.136638546455, 12028.900499092439, 12584.90256820592, 13155.586546920835, 13945.725185965473, 14282.550900481981, 14925.958726932711]
v_peak = [1940.10627499059, 2435.6421950840186, 2840.7086934476856, 3291.573741914031, 3741.337438154025, 4205.362961940324, 4663.185498585765, 5124.352363172149, 5599.6226798745665, 6069.668276677691, 6534.792714111471, 7005.335045153709, 7482.18486270738, 7984.82894052917, 8484.957715727543, 9051.852681598622, 9573.311424213358, 10052.137704173512, 10602.084829781665, 11138.238435537047, 11676.199148538035, 12235.946687745394, 12813.84091501093, 13442.96834271769, 14281.806753263145, 14706.691029357778, 15539.092296024906]
u_v_mid = [16.626768053362852, 920.7144631940888, 24.156348988963423, 21.265222564629177, 78.51924533820657, 26.868581862752542, 27.415787319620566, 32.082206839590235, 35.62009821301411, 32.12488402586501, 32.7911750074625, 30.601459803293313, 45.507132069806666, 23.761379291261804, 43.78278801721636, 63.81869271582654, 66.53453935004718, 71.62495792443546, 72.39941902059736, 86.3181138990365, 89.47952122648749, 86.24839853473648, 109.52306113885368, 120.47428718277249, 1163.8559826028068, 128.4508633961017, 148.62757370194214]
u_v_peak = [16.74191802902827, 314.24045664483367, 24.477749149691544, 25.821599580885223, 20.932770689482854, 27.243558019380284, 29.15092608354166, 33.42128818792276, 33.15286419640866, 33.7291401492823, 28.08068728247306, 34.37447556018603, 41.146221031108965, 32.821068312441525, 55.20175326714631, 78.268327762063, 75.59082707197807, 80.33348051801744, 63.69121628762421, 79.41375293138934, 83.91940615657937, 82.34918473609656, 97.15785721929063, 110.62032928448185, 1207.3014024489328, 231.54108489966268, 236.54416251005458]
v_avg = []
u2_int = []
u2_ext = []
for i in range(len(v_peak)):
u = 1.0 / (1.0 / u_v_mid[i] ** 2.0 + 1.0 / u_v_peak[i] ** 2.0)
u2_int.append(u)
v = (v_mid[i] / u_v_mid[i]**2 + v_peak[i] / u_v_peak[i]**2) * u
v_avg.append(v)
u_e = ((v_mid[i] - v)**2.0 / u_v_mid[i] + (v_peak[i] - v)**2.0 / u_v_peak[i]) * u
u2_ext.append(u_e)
u_v = []
for i in range(len(u2_int)):
u_v.append(sqrt(max(u2_int[i], u2_ext[i])))
print('Noise removal...')
for i in range(len(u_E) - 1, 0, -1):
if u_v[i] > v_avg[i]:
del u_v[i]
del v_avg[i]
del E[i]
del u_E[i]
print('Rounding values...')
for i in range(len(u_v)):
u_v[i] = round(u_v[i])
v_avg[i] = ceil(v_avg[i] / 100) * 100
print('Counting trend line coeffs...')
trend, cov = np.polyfit(x=E, y=v_avg, deg=2, cov=True)
a = float(trend[0])
b = float(trend[1])
c = float(trend[2])
trend_y_vals = []
trend_x_vals = []
for x_val in range(3000, 31000):
trend_y_vals.append(a * (x_val * x_val) + b * x_val + c)
trend_x_vals.append(x_val)
print(trend)
for el in np.diag(cov):
print(sqrt(el))
print('Plotting...')
print('=======================================')
print(E)
print(v_avg)
print(u_v)
print(u_E)
print('=======================================')
x_string = ''
y_string = ''
for i in range(len(v_avg)):
x_string += str(E[i]) + '&'
y_string += '$' + str(int(str(v_avg[i]).split('.')[0])/1000.0) + '\pm' + str(u_v[i] / 1000.0)[:-1] + '$&'
print(len(v_avg))
print(len(u_v))
print(x_string)
print(y_string)
chi = 0
for i in range(len(E)):
chi += ((v_avg[i] - (a * E[i]**2 + b * E[i] + c)) / u_v[i])**2
chi /= len(E) - 1
print(f'chi = {chi}')
plt.errorbar(E, v_avg, xerr=u_E, yerr=u_v, fmt='o')
plt.plot(trend_x_vals, trend_y_vals)
plt.ylabel('v (m / s)')
plt.xlabel('E (V / m)')
plt.show()
print('Finished.')
| [
"math.ceil",
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"math.sqrt",
"numpy.diag",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.show"
] | [((3348, 3389), 'numpy.polyfit', 'np.polyfit', ([], {'x': 'E', 'y': 'v_avg', 'deg': '(2)', 'cov': '(True)'}), '(x=E, y=v_avg, deg=2, cov=True)\n', (3358, 3389), True, 'import numpy as np\n'), ((3639, 3651), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (3646, 3651), True, 'import numpy as np\n'), ((4249, 4300), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['E', 'v_avg'], {'xerr': 'u_E', 'yerr': 'u_v', 'fmt': '"""o"""'}), "(E, v_avg, xerr=u_E, yerr=u_v, fmt='o')\n", (4261, 4300), True, 'import matplotlib.pyplot as plt\n'), ((4301, 4337), 'matplotlib.pyplot.plot', 'plt.plot', (['trend_x_vals', 'trend_y_vals'], {}), '(trend_x_vals, trend_y_vals)\n', (4309, 4337), True, 'import matplotlib.pyplot as plt\n'), ((4338, 4361), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""v (m / s)"""'], {}), "('v (m / s)')\n", (4348, 4361), True, 'import matplotlib.pyplot as plt\n'), ((4362, 4385), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""E (V / m)"""'], {}), "('E (V / m)')\n", (4372, 4385), True, 'import matplotlib.pyplot as plt\n'), ((4386, 4396), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4394, 4396), True, 'import matplotlib.pyplot as plt\n'), ((3267, 3287), 'math.ceil', 'ceil', (['(v_avg[i] / 100)'], {}), '(v_avg[i] / 100)\n', (3271, 3287), False, 'from math import sqrt, ceil\n'), ((3663, 3671), 'math.sqrt', 'sqrt', (['el'], {}), '(el)\n', (3667, 3671), False, 'from math import sqrt, ceil\n')] |
import os
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from flopy.utils import MfList
def export_pdf(filename, array, text,
nodata=None, mfarray_type='array2d',
float_fmt='{:.2f}', verbose=False):
t0 = time.time()
if array.min() < 0.01:
float_fmt = '{:.6e}'
elif 'int' in array.dtype.name:
float_fmt = '{:.0f}'
if len(array.shape) > 2:
multipage_pdf = PdfPages(filename)
if mfarray_type == 'array2d':
multipage_pdf = False
array = [np.reshape(array, (1, array.shape[0],
array.shape[1]))]
elif mfarray_type == 'array3d':
array = [array]
elif mfarray_type == 'transient2d' or mfarray_type == 'transientlist':
pass
for per, array3d in enumerate(array):
for k, array2d in enumerate(array3d):
fig, ax = plt.subplots()
arr = array2d.astype(float)
if nodata is not None:
arr[arr == nodata] = np.nan
mn = np.nanmin(arr)
mx = np.nanmax(arr)
mean = np.nanmean(arr)
im = ax.imshow(array2d)
titletxt = '{0}'.format(text)
if mfarray_type == 'array3d':
titletxt += ', layer {}'.format(k)
elif mfarray_type == 'transientlist':
titletxt += ', period {}, layer {}'.format(per, k)
titletxt += '\nmean: {0}, min: {0}, max: {0}'.format(float_fmt)
ax.set_title(titletxt.format(mean, mn, mx))
plt.colorbar(im, shrink=0.8)
if multipage_pdf:
multipage_pdf.savefig()
else:
plt.savefig(filename)
plt.close()
if multipage_pdf:
multipage_pdf.close()
if verbose:
print("pdf export took {:.2f}s".format(time.time() - t0))
def export_pdf_bar_summary(filename, array, title=None, xlabel='Stress Period',
method='mean'):
period_sums = getattr(np.ma, method)(array.data, axis=tuple(range(1, array.ndim))).data
fig, ax = plt.subplots()
periods = np.arange(len(period_sums), dtype=int)
ax.bar(periods, period_sums)
stride = int(np.round(len(period_sums) / 10, 0))
stride = 1 if stride < 1 else stride
ax.set_xticks(periods[::stride])
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
ax.set_ylabel(f'{method.capitalize()}, in model units')
plt.savefig(filename)
plt.close()
def sfr_baseflow_pdf(outfile, df, pointsize=0.5, verbose=False):
"""make a scatter plot of base flow
(with point size proportional to Q)
"""
t0 = time.time()
fig, ax = plt.subplots()
wet = df.loc[df.Qmean != 0]
dry = df.loc[df.Qmean == 0]
ax.scatter(dry.j, dry.i, s=pointsize, color='0.5')
Qpointsizes = np.log10(wet.Qmean)
Qpointsizes[Qpointsizes < 0] = 0.1
ax.scatter(wet.j, wet.i, s=Qpointsizes, alpha=0.5)
ax.invert_yaxis()
ax.set_title('Simulated base flow')
plt.savefig(outfile)
print('wrote {}'.format(outfile))
plt.close()
if verbose:
print("pdf export took {:.2f}s".format(time.time() - t0))
def sfr_qaquifer_pdf(outfile, df, pointsize=0.5, verbose=False):
"""make a scatter plot of Qaquifer
(with color proportional to flux, scaled to largest gaining flow)
"""
t0 = time.time()
fig, ax = plt.subplots()
gaining = df.loc[df.Qaquifer < 0]
losing = df.loc[df.Qaquifer > 0]
dry = df.loc[df.Qmean == 0]
ax.scatter(dry.j, dry.i, pointsize, color='0.5')
if len(losing) > 0:
Qpointcolors_l = np.abs(losing.Qaquifer)
vmax = None
if len(gaining) > 0:
vmax = np.percentile(np.abs(gaining.Qaquifer), 95)
ax.scatter(losing.j, losing.i,
s=pointsize, c=Qpointcolors_l,
vmax=vmax,
cmap='Reds')
if len(gaining) > 0:
Qpointcolors_g = np.abs(gaining.Qaquifer)
vmax = np.percentile(Qpointcolors_g, 95)
ax.scatter(gaining.j, gaining.i,
s=pointsize, c=Qpointcolors_g,
vmax=vmax,
cmap='Blues')
ax.invert_yaxis()
ax.set_title('Simulated stream-aquifer interactions')
plt.savefig(outfile)
print('wrote {}'.format(outfile))
plt.close()
if verbose:
print("pdf export took {:.2f}s".format(time.time() - t0))
| [
"numpy.abs",
"numpy.log10",
"matplotlib.pyplot.savefig",
"numpy.reshape",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.close",
"numpy.nanmean",
"numpy.nanmin",
"numpy.nanmax",
"matplotlib.backends.backend_pdf.PdfPages",
"numpy.percentile",
"time.time",
"matplotlib.pyplot.subplots"
] | [((309, 320), 'time.time', 'time.time', ([], {}), '()\n', (318, 320), False, 'import time\n'), ((2159, 2173), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2171, 2173), True, 'import matplotlib.pyplot as plt\n'), ((2566, 2587), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (2577, 2587), True, 'import matplotlib.pyplot as plt\n'), ((2592, 2603), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2601, 2603), True, 'import matplotlib.pyplot as plt\n'), ((2768, 2779), 'time.time', 'time.time', ([], {}), '()\n', (2777, 2779), False, 'import time\n'), ((2794, 2808), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2806, 2808), True, 'import matplotlib.pyplot as plt\n'), ((2946, 2965), 'numpy.log10', 'np.log10', (['wet.Qmean'], {}), '(wet.Qmean)\n', (2954, 2965), True, 'import numpy as np\n'), ((3126, 3146), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {}), '(outfile)\n', (3137, 3146), True, 'import matplotlib.pyplot as plt\n'), ((3189, 3200), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3198, 3200), True, 'import matplotlib.pyplot as plt\n'), ((3476, 3487), 'time.time', 'time.time', ([], {}), '()\n', (3485, 3487), False, 'import time\n'), ((3502, 3516), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3514, 3516), True, 'import matplotlib.pyplot as plt\n'), ((4377, 4397), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {}), '(outfile)\n', (4388, 4397), True, 'import matplotlib.pyplot as plt\n'), ((4440, 4451), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4449, 4451), True, 'import matplotlib.pyplot as plt\n'), ((496, 514), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['filename'], {}), '(filename)\n', (504, 514), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((3727, 3750), 'numpy.abs', 'np.abs', (['losing.Qaquifer'], {}), '(losing.Qaquifer)\n', (3733, 3750), True, 'import numpy as np\n'), ((4064, 4088), 'numpy.abs', 'np.abs', (['gaining.Qaquifer'], {}), '(gaining.Qaquifer)\n', (4070, 4088), True, 'import numpy as np\n'), ((4104, 4137), 'numpy.percentile', 'np.percentile', (['Qpointcolors_g', '(95)'], {}), '(Qpointcolors_g, 95)\n', (4117, 4137), True, 'import numpy as np\n'), ((597, 651), 'numpy.reshape', 'np.reshape', (['array', '(1, array.shape[0], array.shape[1])'], {}), '(array, (1, array.shape[0], array.shape[1]))\n', (607, 651), True, 'import numpy as np\n'), ((947, 961), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (959, 961), True, 'import matplotlib.pyplot as plt\n'), ((1100, 1114), 'numpy.nanmin', 'np.nanmin', (['arr'], {}), '(arr)\n', (1109, 1114), True, 'import numpy as np\n'), ((1132, 1146), 'numpy.nanmax', 'np.nanmax', (['arr'], {}), '(arr)\n', (1141, 1146), True, 'import numpy as np\n'), ((1166, 1181), 'numpy.nanmean', 'np.nanmean', (['arr'], {}), '(arr)\n', (1176, 1181), True, 'import numpy as np\n'), ((1615, 1643), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'shrink': '(0.8)'}), '(im, shrink=0.8)\n', (1627, 1643), True, 'import matplotlib.pyplot as plt\n'), ((1782, 1793), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1791, 1793), True, 'import matplotlib.pyplot as plt\n'), ((1748, 1769), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (1759, 1769), True, 'import matplotlib.pyplot as plt\n'), ((3833, 3857), 'numpy.abs', 'np.abs', (['gaining.Qaquifer'], {}), '(gaining.Qaquifer)\n', (3839, 3857), True, 'import numpy as np\n'), ((1909, 1920), 'time.time', 'time.time', ([], {}), '()\n', (1918, 1920), False, 'import time\n'), ((3264, 3275), 'time.time', 'time.time', ([], {}), '()\n', (3273, 3275), False, 'import time\n'), ((4515, 4526), 'time.time', 'time.time', ([], {}), '()\n', (4524, 4526), False, 'import time\n')] |
"""Credit to <NAME>:
https://github.com/MilesCranmer/easy_normalizing_flow/blob/master/flow.py
"""
import torch
from torch import nn, optim
from torch.functional import F
import numpy as np
####
# From Karpathy's MADE implementation
####
DEBUG = False
class MaskedLinear(nn.Linear):
""" same as Linear except has a configurable mask on the weights """
def __init__(self, in_features, out_features, bias=True):
super().__init__(in_features, out_features, bias)
self.register_buffer('mask', torch.ones(out_features, in_features))
def set_mask(self, mask):
self.mask.data.copy_(torch.from_numpy(mask.astype(np.uint8).T))
def forward(self, input):
if DEBUG:
print("masked linear: ", torch.any(torch.isnan(input)), input.mean())
return F.linear(input, self.mask * self.weight, self.bias)
class MADE(nn.Module):
def __init__(self, nin, hidden_sizes,
nout, num_masks=1, natural_ordering=False):
"""
nin: integer; number of inputs
hidden sizes: a list of integers; number of units in hidden layers
nout: integer; number of outputs, which usually collectively parameterize some kind of 1D distribution
note: if nout is e.g. 2x larger than nin (perhaps the mean and std), then the first nin
will be all the means and the second nin will be stds. i.e. output dimensions depend on the
same input dimensions in "chunks" and should be carefully decoded downstream appropriately.
the output of running the tests for this file makes this a bit more clear with examples.
num_masks: can be used to train ensemble over orderings/connections
natural_ordering: force natural ordering of dimensions, don't use random permutations
"""
super().__init__()
self.nin = nin
self.nout = nout
self.hidden_sizes = hidden_sizes
assert self.nout % self.nin == 0, "nout must be integer multiple of nin"
# define a simple MLP neural net
self.net = []
hs = [nin] + hidden_sizes + [nout]
for h0, h1 in zip(hs, hs[1:]):
self.net.extend([
MaskedLinear(h0, h1),
nn.ReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
self.net = nn.Sequential(*self.net)
# seeds for orders/connectivities of the model ensemble
self.natural_ordering = natural_ordering
self.num_masks = num_masks
self.seed = 0 # for cycling through num_masks orderings
self.m = {}
self.update_masks() # builds the initial self.m connectivity
# note, we could also precompute the masks and cache them, but this
# could get memory expensive for large number of masks.
def update_masks(self):
if self.m and self.num_masks == 1: return # only a single seed, skip for efficiency
L = len(self.hidden_sizes)
# fetch the next seed and construct a random stream
rng = np.random.RandomState(self.seed)
self.seed = (self.seed + 1) % self.num_masks
# sample the order of the inputs and the connectivity of all neurons
self.m[-1] = np.arange(self.nin) if self.natural_ordering else rng.permutation(self.nin)
for l in range(L):
self.m[l] = rng.randint(self.m[l-1].min(), self.nin-1, size=self.hidden_sizes[l])
# construct the mask matrices
masks = [self.m[l-1][:,None] <= self.m[l][None,:] for l in range(L)]
masks.append(self.m[L-1][:,None] < self.m[-1][None,:])
# handle the case where nout = nin * k, for integer k > 1
if self.nout > self.nin:
k = int(self.nout / self.nin)
# replicate the mask across the other outputs
masks[-1] = np.concatenate([masks[-1]]*k, axis=1)
# set the masks in all MaskedLinear layers
layers = [l for l in self.net.modules() if isinstance(l, MaskedLinear)]
for l,m in zip(layers, masks):
l.set_mask(m)
def forward(self, x):
return self.net(x)
####
# End Karpathy's code
####
class MAF(nn.Module):
"""x0 only depends on x0, etc"""
def __init__(self, features, context, hidden=100, nlayers=1):
super(self.__class__, self).__init__()
self._fmualpha = MADE(features+context,
[hidden]*nlayers, 2*(features+context),
natural_ordering=True)
self.context_map = nn.Linear(context, context)
self.context = context
self.features = features
def fmualpha(self, x):
# Only return the data parts: (conditioned on whole context vector)
out = self._fmualpha(x)
mu = out[:, self.context:self.context+self.features]
alpha = out[:, 2*self.context+self.features:]
return mu, alpha
def load_context(self, x, context):
return torch.cat((self.context_map(context), x), dim=1)
def invert(self, u, context):
_x = self.load_context(u, context)
mu, alpha = self.fmualpha(_x)
x = u * torch.exp(alpha) + mu
return x
def forward(self, x, context):
# Invert the flow
_x = self.load_context(x, context)
if DEBUG:
print("_x is nan:", torch.any(torch.isnan(_x)), _x.mean())
mu, alpha = self.fmualpha(_x)
if DEBUG:
print("mu is nan:", torch.any(torch.isnan(mu)), mu.mean())
print("alpha is nan:", torch.any(torch.isnan(alpha)), alpha.mean())
u = (x - mu) * torch.exp(-alpha)
log_det = - torch.sum(alpha, dim=1)
return u, log_det
class Perm(nn.Module):
def __init__(self, nvars, perm=None):
super(self.__class__, self).__init__()
# If perm is none, chose some random permutation that gets fixed at initialization
if perm is None:
perm = torch.randperm(nvars)
self.perm = perm
self.reverse_perm = torch.argsort(perm)
def forward(self, x, context):
idx = self.perm.to(x.device)
return x[:, idx], 0
def invert(self, x, context):
rev_idx = self.reverse_perm.to(x.device)
return x[:, rev_idx]
class Flow(nn.Module):
def __init__(self, *layers):
super(self.__class__, self).__init__()
self.layers = nn.ModuleList(layers)
def forward(self, x, context):
log_det = None
for layer in self.layers:
x, _log_det = layer(x, context)
log_det = (log_det if log_det is not None else 0) + _log_det
# Same ordering as input:
for layer in self.layers[::-1]:
if 'Perm' not in str(layer):
continue
x = x[:, layer.reverse_perm]
return x, log_det
def invert(self, u, context):
for layer in self.layers:
if 'Perm' not in str(layer):
continue
u = u[:, layer.perm]
for layer in self.layers[::-1]:
u = layer.invert(u, context)
return u
| [
"torch.nn.ReLU",
"torch.randperm",
"numpy.arange",
"torch.nn.Sequential",
"torch.nn.ModuleList",
"torch.exp",
"torch.functional.F.linear",
"torch.argsort",
"torch.sum",
"torch.nn.Linear",
"numpy.concatenate",
"torch.isnan",
"numpy.random.RandomState",
"torch.ones"
] | [((814, 865), 'torch.functional.F.linear', 'F.linear', (['input', '(self.mask * self.weight)', 'self.bias'], {}), '(input, self.mask * self.weight, self.bias)\n', (822, 865), False, 'from torch.functional import F\n'), ((2395, 2419), 'torch.nn.Sequential', 'nn.Sequential', (['*self.net'], {}), '(*self.net)\n', (2408, 2419), False, 'from torch import nn, optim\n'), ((3126, 3158), 'numpy.random.RandomState', 'np.random.RandomState', (['self.seed'], {}), '(self.seed)\n', (3147, 3158), True, 'import numpy as np\n'), ((4653, 4680), 'torch.nn.Linear', 'nn.Linear', (['context', 'context'], {}), '(context, context)\n', (4662, 4680), False, 'from torch import nn, optim\n'), ((6133, 6152), 'torch.argsort', 'torch.argsort', (['perm'], {}), '(perm)\n', (6146, 6152), False, 'import torch\n'), ((6494, 6515), 'torch.nn.ModuleList', 'nn.ModuleList', (['layers'], {}), '(layers)\n', (6507, 6515), False, 'from torch import nn, optim\n'), ((526, 563), 'torch.ones', 'torch.ones', (['out_features', 'in_features'], {}), '(out_features, in_features)\n', (536, 563), False, 'import torch\n'), ((3319, 3338), 'numpy.arange', 'np.arange', (['self.nin'], {}), '(self.nin)\n', (3328, 3338), True, 'import numpy as np\n'), ((3935, 3974), 'numpy.concatenate', 'np.concatenate', (['([masks[-1]] * k)'], {'axis': '(1)'}), '([masks[-1]] * k, axis=1)\n', (3949, 3974), True, 'import numpy as np\n'), ((5721, 5738), 'torch.exp', 'torch.exp', (['(-alpha)'], {}), '(-alpha)\n', (5730, 5738), False, 'import torch\n'), ((5759, 5782), 'torch.sum', 'torch.sum', (['alpha'], {'dim': '(1)'}), '(alpha, dim=1)\n', (5768, 5782), False, 'import torch\n'), ((6058, 6079), 'torch.randperm', 'torch.randperm', (['nvars'], {}), '(nvars)\n', (6072, 6079), False, 'import torch\n'), ((5258, 5274), 'torch.exp', 'torch.exp', (['alpha'], {}), '(alpha)\n', (5267, 5274), False, 'import torch\n'), ((764, 782), 'torch.isnan', 'torch.isnan', (['input'], {}), '(input)\n', (775, 782), False, 'import torch\n'), ((2282, 2291), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2289, 2291), False, 'from torch import nn, optim\n'), ((5462, 5477), 'torch.isnan', 'torch.isnan', (['_x'], {}), '(_x)\n', (5473, 5477), False, 'import torch\n'), ((5589, 5604), 'torch.isnan', 'torch.isnan', (['mu'], {}), '(mu)\n', (5600, 5604), False, 'import torch\n'), ((5663, 5681), 'torch.isnan', 'torch.isnan', (['alpha'], {}), '(alpha)\n', (5674, 5681), False, 'import torch\n')] |
"""
Email: <EMAIL>
Date: 2018/9/28
"""
import datetime as dt
import numpy as np
import torch
import torch.nn as nn
from .loader import get_txt_data
class Timer():
"""计时器类"""
def start(self):
self.start_dt = dt.datetime.now()
def stop(self):
end_dt = dt.datetime.now()
spend = (end_dt - self.start_dt).total_seconds()
print(f'Time taken: {spend:.2f}s')
return spend
def set_device():
"""设置运行设备CPU或者GPU
Returns:
(torch.device): 设备对象
"""
return torch.device('cuda: 0' if torch.cuda.is_available() else 'cpu')
def init_params(net):
"""Init layer parameters."""
for m in net.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
nn.init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant(m.weight, 1)
nn.init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal(m.weight, std=1e-3)
if m.bias:
nn.init.constant(m.bias, 0)
def one_hot_encoding(labels, num_classes):
"""Embedding labels to one-hot.
Args:
labels: (LongTensor) class labels, sized [N,].
num_classes: (int) number of classes.
Returns:
(tensor) encoded labels, sized [N,#classes].
"""
y = torch.eye(num_classes) # [D,D]
return y[labels] # [N,D]
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def get_Granger_Causality(err_cond, err_all):
"""计算 Granger Causality matrix. (err_cond, err_all 应该有相同的数据形式)
Args:
err_cond (matrix like data, numpy.ndarray or torch.Tensor): 条件误差, num_channel * n_point * num_channel
err_all (matrix like data, numpy.ndarray or torch.Tensor): 整体误差, n_point * num_channel
Returns:
(np.ndarray) Granger Causality matrix.
"""
if isinstance(err_cond, np.ndarray) and isinstance(err_all, np.ndarray):
gc_matrix = np.double(err_cond).var(1) / np.double(err_all).var(0)
gc_matrix = np.log(gc_matrix.clip(min=1.))
elif isinstance(err_cond, torch.Tensor) and isinstance(err_all, torch.Tensor):
gc_matrix = err_cond.double().var(1) / err_all.double().var(0)
gc_matrix = gc_matrix.clamp(min=1.).log().cpu().numpy()
else:
raise ValueError('input variables should have the same type(numpy.ndarray or torch.tensor).')
np.fill_diagonal(gc_matrix, 0.) # 不考虑自身影响, 对角线为 0.
return gc_matrix
def get_Granger_Causality1(err_cond, err_all):
"""计算 Granger Causality matrix. (err_cond, err_all 应该有相同的数据形式)
Args:
err_cond (matrix like data, numpy.ndarray or torch.Tensor): 条件误差, num_channel * n_point * num_channel
err_all (matrix like data, numpy.ndarray or torch.Tensor): 整体误差, n_point * num_channel
Returns:
(np.ndarray) Granger Causality matrix.
"""
if isinstance(err_cond, np.ndarray) and isinstance(err_all, np.ndarray):
gc_matrix = np.double(err_all).var(0) / np.double(err_cond).var(1)
gc_matrix = 1 - gc_matrix.clip(min=1.)
elif isinstance(err_cond, torch.Tensor) and isinstance(err_all, torch.Tensor):
gc_matrix = err_all.double().var(0) / err_cond.double().var(1)
gc_matrix = (1 - gc_matrix.clamp(min=1.)).cpu().numpy()
else:
raise ValueError('input variables should have the same type(numpy.ndarray or torch.tensor).')
np.fill_diagonal(gc_matrix, 0.) # 不考虑自身影响, 对角线为 0.
return gc_matrix
def get_gc_precent(gc_matrix):
"""获取 Granger Causality matrix 的百分比矩阵(当前 i 信号对 j 信号影响的百分比)
Args:
gc_matrix (np.ndarray): Granger Causality matrix.
"""
deno = np.sum(gc_matrix, axis=0)
deno[deno == np.zeros(1)] = np.nan
gc_precent = gc_matrix / deno
gc_precent[np.isnan(gc_precent)] = 0.
return gc_precent
def early_stopping(val_loss, patience: int = 5, min_val_loss: float = 0.5):
"""使用 early_stopping 策略,判断是否要停止训练
Args:
val_loss (np.ndarray or list or tuple): 验证损失, 维度(patience,)
patience (int, optional): Defaults to 5. 保持的长度,即验证损失不再提升的状态应保持 patience 个 epoch
min_val_loss (float, optional): Defaults to 0.5. 到目前 epoch 为止的最小验证损失
Returns:
bool: 是否要停止训练
"""
val_loss = np.array(val_loss).reshape(-1, )
if val_loss.shape[0] == patience:
return not np.any(val_loss - min_val_loss <= 0.)
else:
raise ValueError(f'val_loss.shape[1] or val_loss.shape[0] must be {patience}!')
def plot_save_gc_precent(txt_path: str, save_png_path: str, png_title: str, save_txt_path: str):
"""画图并保存 Granger Causality matrix 的百分比矩阵
Args:
txt_path (str): the path to Granger Causality matrix have been saved.
save_png_path (str): the path to save figures.
png_title (str): the title display as figure's title.
save_txt_path (str): the path to save txt files.
"""
import matplotlib.pyplot as plt
data = get_txt_data(txt_path, delimiter=' ')
gc_precent = get_gc_precent(data)
plt.matshow(gc_precent)
plt.title(png_title)
plt.savefig(save_png_path)
np.savetxt(save_txt_path, gc_precent)
def matshow(data: np.ndarray, xlabel: str, ylabel: str, title: str, png_name: str):
"""绘制矩阵图
Args:
data (np.ndarray): 要绘制的数据
xlabel (str): 横向的标签
ylabel (str): 纵向的标签
title (str): 图像的名字
png_name (str): 要保存的图像名字
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
img = ax.imshow(data, cmap="YlGn")
# ax.matshow(data, cmap="YlGn")
# We want to show all ticks
ax.set_xticks(np.arange(len(xlabel)))
ax.set_yticks(np.arange(len(ylabel)))
# and label them with the respective list entries
ax.set_xticklabels(xlabel)
ax.set_yticklabels(ylabel)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Create colorbar
cbar = ax.figure.colorbar(img, ax=ax)
cbar.ax.set_ylabel(title, rotation=-90, va="bottom")
# Loop over data dimensions and create text annotations.
if data.shape[0] < 5:
for i in range(len(xlabel)):
for j in range(len(ylabel)):
ax.text(j, i, round(data[i, j], 4) if not abs(data[i, j]) < 1e-8 else '', ha="center", va="center", color="k")
fig.tight_layout()
plt.savefig(png_name)
def download_file(url: str):
"""download file from url.
Args:
url (str): url.
"""
import wget
wget.download(url)
def save_3Darray(file_path, data):
"""save np.ndarray(3D) into txt file.(Ref2)
Args:
file_path (str or instance of Path(windowns or linux)): the file path to save data.
data (np.ndarray): the data need be saved.
"""
with open(file_path, 'w') as outfile:
# I'm writing a header here just for the sake of readability
# Any line starting with "#" will be ignored by numpy.loadtxt
outfile.write(f'# Array shape: {data.shape}\n')
# Iterating through a ndimensional array produces slices along
# the last axis. This is equivalent to data[i,:,:] in this case
for data_slice in data:
np.savetxt(outfile, data_slice, fmt='%.6f')
# Writing out a break to indicate different slices...
outfile.write('# New trial\n')
| [
"wget.download",
"numpy.array",
"torch.cuda.is_available",
"torch.eye",
"matplotlib.pyplot.savefig",
"numpy.fill_diagonal",
"numpy.any",
"numpy.isnan",
"numpy.savetxt",
"matplotlib.pyplot.title",
"matplotlib.pyplot.matshow",
"torch.nn.init.kaiming_normal",
"numpy.double",
"torch.nn.init.no... | [((1391, 1413), 'torch.eye', 'torch.eye', (['num_classes'], {}), '(num_classes)\n', (1400, 1413), False, 'import torch\n'), ((2629, 2661), 'numpy.fill_diagonal', 'np.fill_diagonal', (['gc_matrix', '(0.0)'], {}), '(gc_matrix, 0.0)\n', (2645, 2661), True, 'import numpy as np\n'), ((3639, 3671), 'numpy.fill_diagonal', 'np.fill_diagonal', (['gc_matrix', '(0.0)'], {}), '(gc_matrix, 0.0)\n', (3655, 3671), True, 'import numpy as np\n'), ((3899, 3924), 'numpy.sum', 'np.sum', (['gc_matrix'], {'axis': '(0)'}), '(gc_matrix, axis=0)\n', (3905, 3924), True, 'import numpy as np\n'), ((5251, 5274), 'matplotlib.pyplot.matshow', 'plt.matshow', (['gc_precent'], {}), '(gc_precent)\n', (5262, 5274), True, 'import matplotlib.pyplot as plt\n'), ((5279, 5299), 'matplotlib.pyplot.title', 'plt.title', (['png_title'], {}), '(png_title)\n', (5288, 5299), True, 'import matplotlib.pyplot as plt\n'), ((5304, 5330), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_png_path'], {}), '(save_png_path)\n', (5315, 5330), True, 'import matplotlib.pyplot as plt\n'), ((5335, 5372), 'numpy.savetxt', 'np.savetxt', (['save_txt_path', 'gc_precent'], {}), '(save_txt_path, gc_precent)\n', (5345, 5372), True, 'import numpy as np\n'), ((5692, 5706), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5704, 5706), True, 'import matplotlib.pyplot as plt\n'), ((6592, 6613), 'matplotlib.pyplot.savefig', 'plt.savefig', (['png_name'], {}), '(png_name)\n', (6603, 6613), True, 'import matplotlib.pyplot as plt\n'), ((6740, 6758), 'wget.download', 'wget.download', (['url'], {}), '(url)\n', (6753, 6758), False, 'import wget\n'), ((229, 246), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (244, 246), True, 'import datetime as dt\n'), ((285, 302), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (300, 302), True, 'import datetime as dt\n'), ((4013, 4033), 'numpy.isnan', 'np.isnan', (['gc_precent'], {}), '(gc_precent)\n', (4021, 4033), True, 'import numpy as np\n'), ((555, 580), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (578, 580), False, 'import torch\n'), ((727, 775), 'torch.nn.init.kaiming_normal', 'nn.init.kaiming_normal', (['m.weight'], {'mode': '"""fan_out"""'}), "(m.weight, mode='fan_out')\n", (749, 775), True, 'import torch.nn as nn\n'), ((3942, 3953), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (3950, 3953), True, 'import numpy as np\n'), ((4482, 4500), 'numpy.array', 'np.array', (['val_loss'], {}), '(val_loss)\n', (4490, 4500), True, 'import numpy as np\n'), ((4572, 4610), 'numpy.any', 'np.any', (['(val_loss - min_val_loss <= 0.0)'], {}), '(val_loss - min_val_loss <= 0.0)\n', (4578, 4610), True, 'import numpy as np\n'), ((7430, 7473), 'numpy.savetxt', 'np.savetxt', (['outfile', 'data_slice'], {'fmt': '"""%.6f"""'}), "(outfile, data_slice, fmt='%.6f')\n", (7440, 7473), True, 'import numpy as np\n'), ((815, 842), 'torch.nn.init.constant', 'nn.init.constant', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (831, 842), True, 'import torch.nn as nn\n'), ((899, 928), 'torch.nn.init.constant', 'nn.init.constant', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (915, 928), True, 'import torch.nn as nn\n'), ((941, 968), 'torch.nn.init.constant', 'nn.init.constant', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (957, 968), True, 'import torch.nn as nn\n'), ((1020, 1055), 'torch.nn.init.normal', 'nn.init.normal', (['m.weight'], {'std': '(0.001)'}), '(m.weight, std=0.001)\n', (1034, 1055), True, 'import torch.nn as nn\n'), ((2188, 2207), 'numpy.double', 'np.double', (['err_cond'], {}), '(err_cond)\n', (2197, 2207), True, 'import numpy as np\n'), ((2217, 2235), 'numpy.double', 'np.double', (['err_all'], {}), '(err_all)\n', (2226, 2235), True, 'import numpy as np\n'), ((3202, 3220), 'numpy.double', 'np.double', (['err_all'], {}), '(err_all)\n', (3211, 3220), True, 'import numpy as np\n'), ((3230, 3249), 'numpy.double', 'np.double', (['err_cond'], {}), '(err_cond)\n', (3239, 3249), True, 'import numpy as np\n'), ((1094, 1121), 'torch.nn.init.constant', 'nn.init.constant', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (1110, 1121), True, 'import torch.nn as nn\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 25 22:03:24 2020
@author: Administrator
"""
import numpy as np
sigmod = lambda x: 1/(1+np.exp(-x))
class DNN:
'''隐藏层均为sigmod激活函数
sizes[5,4,2,1]:输入层5个元,隐藏层有2层,隐1有4个元,隐2有2个元,输出层1个元'''
def initial(self,sizes):
self.B = [np.random.rand(b) for b in sizes[1:]]
self.W = [np.random.rand(w2,w1) for w1,w2 in zip(sizes[:-1],sizes[1:])] ## W:nrows:输出,ncols:输入
def __init__(self,sizes):
self.initial(sizes)
self.sizes = sizes
def predict(self,X):
for w,b in zip(self.W, self.B):
X = np.apply_along_axis(lambda x: sigmod(w.dot(x)+b), 1, X)
return X.argmax(1)
def train(self,X,Y,testX,testY,batch=10,epoch=50,alpha=.1):
'''batch-GD'''
self.info = []
self.initial(self.sizes)
for t in range(epoch):
batches = np.split(np.random.permutation(len(X)),
np.arange(len(X),step=batch)[1:])
for ids in batches:
x, y = X[ids].copy(), Y[ids].copy()
## 前向激活求中间值
F = [x]
for w,b in zip(self.W, self.B):
x = np.apply_along_axis(lambda row: sigmod(w.dot(row)+b),1,x)
F.append(x)
## 后向求误差值
δ = [(x-y)*(x*(1-x))]
for w,f in zip(self.W[1:][::-1],F[1:-1][::-1]):
delta = np.apply_along_axis(lambda row: w.T.dot(row),1,δ[-1])
delta *= f*(1-f)
δ.append(delta)
## 前向更新参数
δ.reverse()
for w,b,d,f in zip(self.W, self.B, δ, F[:-1]):
grad_w = np.sum([i[:,None]*j for i,j in zip(d,f)],axis=0)
w -= alpha/batch * grad_w
b -= alpha/batch * d.sum(0)
## 记录训练信息
Y_hat = self.predict(testX)
self.info.append({'t':t,'right':(Y_hat==testY.argmax(1)).mean()})
return 'train done!'
if __name__=='__main__':
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# load data
iris = load_iris()
iris.target = pd.get_dummies(iris.target).values
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=.3,random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# train model
dnn = DNN(sizes=[4,5,4,3])
dnn.train(X_train,y_train,X_test,y_test,batch=10,epoch=50,alpha=3)
info = pd.DataFrame(dnn.info)
info.plot(x='t',y='right',marker='o',ms=3)
# load data
mnist = fetch_openml('mnist_784', version=1, data_home='E:/Learn/algorithm_ljp')
mnist.target = pd.get_dummies(mnist.target).values
X_train, X_test, y_train, y_test = train_test_split(
mnist.data, mnist.target, test_size=.3,random_state=42)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# train model
dnn = DNN(sizes=[784,30,10])
dnn.train(X_train,y_train,X_test,y_test,batch=10,epoch=30,alpha=10)
info = pd.DataFrame(dnn.info)
info.plot(x='t',y='right',marker='o',ms=3) | [
"sklearn.datasets.load_iris",
"sklearn.datasets.fetch_openml",
"numpy.random.rand",
"sklearn.model_selection.train_test_split",
"pandas.get_dummies",
"sklearn.preprocessing.StandardScaler",
"numpy.exp",
"pandas.DataFrame"
] | [((2469, 2480), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (2478, 2480), False, 'from sklearn.datasets import load_iris\n'), ((2577, 2649), 'sklearn.model_selection.train_test_split', 'train_test_split', (['iris.data', 'iris.target'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(iris.data, iris.target, test_size=0.3, random_state=42)\n', (2593, 2649), False, 'from sklearn.model_selection import train_test_split\n'), ((2674, 2690), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2688, 2690), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2916, 2938), 'pandas.DataFrame', 'pd.DataFrame', (['dnn.info'], {}), '(dnn.info)\n', (2928, 2938), True, 'import pandas as pd\n'), ((3019, 3091), 'sklearn.datasets.fetch_openml', 'fetch_openml', (['"""mnist_784"""'], {'version': '(1)', 'data_home': '"""E:/Learn/algorithm_ljp"""'}), "('mnist_784', version=1, data_home='E:/Learn/algorithm_ljp')\n", (3031, 3091), False, 'from sklearn.datasets import fetch_openml\n'), ((3188, 3262), 'sklearn.model_selection.train_test_split', 'train_test_split', (['mnist.data', 'mnist.target'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(mnist.data, mnist.target, test_size=0.3, random_state=42)\n', (3204, 3262), False, 'from sklearn.model_selection import train_test_split\n'), ((3287, 3303), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3301, 3303), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3532, 3554), 'pandas.DataFrame', 'pd.DataFrame', (['dnn.info'], {}), '(dnn.info)\n', (3544, 3554), True, 'import pandas as pd\n'), ((2500, 2527), 'pandas.get_dummies', 'pd.get_dummies', (['iris.target'], {}), '(iris.target)\n', (2514, 2527), True, 'import pandas as pd\n'), ((3112, 3140), 'pandas.get_dummies', 'pd.get_dummies', (['mnist.target'], {}), '(mnist.target)\n', (3126, 3140), True, 'import pandas as pd\n'), ((142, 152), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (148, 152), True, 'import numpy as np\n'), ((304, 321), 'numpy.random.rand', 'np.random.rand', (['b'], {}), '(b)\n', (318, 321), True, 'import numpy as np\n'), ((361, 383), 'numpy.random.rand', 'np.random.rand', (['w2', 'w1'], {}), '(w2, w1)\n', (375, 383), True, 'import numpy as np\n')] |
import pytest
import torch
import numpy as np
from cplxmodule import Cplx
from cplxmodule.nn import init
def cplx_allclose_numpy(input, other):
other = np.asarray(other)
return (
torch.allclose(input.real, torch.from_numpy(other.real))
and torch.allclose(input.imag, torch.from_numpy(other.imag))
)
@pytest.mark.parametrize('initializer', [
init.cplx_kaiming_normal_,
init.cplx_xavier_normal_,
init.cplx_kaiming_uniform_,
init.cplx_xavier_uniform_,
init.cplx_trabelsi_standard_,
init.cplx_trabelsi_independent_,
init.cplx_uniform_independent_,
])
def test_initializer(initializer):
initializer(Cplx.empty(500, 1250))
initializer(Cplx.empty(1250, 500))
initializer(Cplx.empty(32, 64, 3, 3))
# with pytest.raises(ValueError, match="Fan in and fan out can not be computed"):
# initializer(Cplx.empty(32))
def test_cplx_trabelsi_independent_():
# weight from an embeeding linear layer
weight = Cplx.empty(500, 1250, dtype=torch.double)
init.cplx_trabelsi_independent_(weight)
mat = weight @ weight.conj.t()
assert cplx_allclose_numpy(mat, np.diag(mat.real.diagonal()))
# weight from a bottleneck linear layer
weight = Cplx.empty(1250, 500, dtype=torch.double)
init.cplx_trabelsi_independent_(weight)
mat = weight.conj.t() @ weight
assert cplx_allclose_numpy(mat, np.diag(mat.real.diagonal()))
# weight from a generic 2d convolution
weight = Cplx.empty(32, 64, 3, 3, dtype=torch.double)
init.cplx_trabelsi_independent_(weight)
weight = weight.reshape(weight.shape[:2].numel(), -1)
mat = weight.conj.t() @ weight
assert cplx_allclose_numpy(mat, np.diag(mat.real.diagonal()))
# weight from a really small 2d convolution
weight = Cplx.empty(3, 7, 5, 5, dtype=torch.double)
init.cplx_trabelsi_independent_(weight)
weight = weight.reshape(weight.shape[:2].numel(), -1)
mat = weight @ weight.conj.t()
assert cplx_allclose_numpy(mat, np.diag(mat.real.diagonal()))
| [
"numpy.asarray",
"torch.from_numpy",
"pytest.mark.parametrize",
"cplxmodule.nn.init.cplx_trabelsi_independent_",
"cplxmodule.Cplx.empty"
] | [((334, 592), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""initializer"""', '[init.cplx_kaiming_normal_, init.cplx_xavier_normal_, init.\n cplx_kaiming_uniform_, init.cplx_xavier_uniform_, init.\n cplx_trabelsi_standard_, init.cplx_trabelsi_independent_, init.\n cplx_uniform_independent_]'], {}), "('initializer', [init.cplx_kaiming_normal_, init.\n cplx_xavier_normal_, init.cplx_kaiming_uniform_, init.\n cplx_xavier_uniform_, init.cplx_trabelsi_standard_, init.\n cplx_trabelsi_independent_, init.cplx_uniform_independent_])\n", (357, 592), False, 'import pytest\n'), ((160, 177), 'numpy.asarray', 'np.asarray', (['other'], {}), '(other)\n', (170, 177), True, 'import numpy as np\n'), ((986, 1027), 'cplxmodule.Cplx.empty', 'Cplx.empty', (['(500)', '(1250)'], {'dtype': 'torch.double'}), '(500, 1250, dtype=torch.double)\n', (996, 1027), False, 'from cplxmodule import Cplx\n'), ((1032, 1071), 'cplxmodule.nn.init.cplx_trabelsi_independent_', 'init.cplx_trabelsi_independent_', (['weight'], {}), '(weight)\n', (1063, 1071), False, 'from cplxmodule.nn import init\n'), ((1232, 1273), 'cplxmodule.Cplx.empty', 'Cplx.empty', (['(1250)', '(500)'], {'dtype': 'torch.double'}), '(1250, 500, dtype=torch.double)\n', (1242, 1273), False, 'from cplxmodule import Cplx\n'), ((1278, 1317), 'cplxmodule.nn.init.cplx_trabelsi_independent_', 'init.cplx_trabelsi_independent_', (['weight'], {}), '(weight)\n', (1309, 1317), False, 'from cplxmodule.nn import init\n'), ((1477, 1521), 'cplxmodule.Cplx.empty', 'Cplx.empty', (['(32)', '(64)', '(3)', '(3)'], {'dtype': 'torch.double'}), '(32, 64, 3, 3, dtype=torch.double)\n', (1487, 1521), False, 'from cplxmodule import Cplx\n'), ((1526, 1565), 'cplxmodule.nn.init.cplx_trabelsi_independent_', 'init.cplx_trabelsi_independent_', (['weight'], {}), '(weight)\n', (1557, 1565), False, 'from cplxmodule.nn import init\n'), ((1789, 1831), 'cplxmodule.Cplx.empty', 'Cplx.empty', (['(3)', '(7)', '(5)', '(5)'], {'dtype': 'torch.double'}), '(3, 7, 5, 5, dtype=torch.double)\n', (1799, 1831), False, 'from cplxmodule import Cplx\n'), ((1836, 1875), 'cplxmodule.nn.init.cplx_trabelsi_independent_', 'init.cplx_trabelsi_independent_', (['weight'], {}), '(weight)\n', (1867, 1875), False, 'from cplxmodule.nn import init\n'), ((660, 681), 'cplxmodule.Cplx.empty', 'Cplx.empty', (['(500)', '(1250)'], {}), '(500, 1250)\n', (670, 681), False, 'from cplxmodule import Cplx\n'), ((699, 720), 'cplxmodule.Cplx.empty', 'Cplx.empty', (['(1250)', '(500)'], {}), '(1250, 500)\n', (709, 720), False, 'from cplxmodule import Cplx\n'), ((738, 762), 'cplxmodule.Cplx.empty', 'Cplx.empty', (['(32)', '(64)', '(3)', '(3)'], {}), '(32, 64, 3, 3)\n', (748, 762), False, 'from cplxmodule import Cplx\n'), ((226, 254), 'torch.from_numpy', 'torch.from_numpy', (['other.real'], {}), '(other.real)\n', (242, 254), False, 'import torch\n'), ((295, 323), 'torch.from_numpy', 'torch.from_numpy', (['other.imag'], {}), '(other.imag)\n', (311, 323), False, 'import torch\n')] |
#!/usr/bin/env python3
import argparse
import math
#
import os
import numpy as np
import torch
from flightgym import VisionEnv_v1
from ruamel.yaml import YAML, RoundTripDumper, dump
from stable_baselines3.common.utils import get_device
from stable_baselines3.ppo.policies import MlpPolicy
from rpg_baselines.torch.common.ppo import PPO
from rpg_baselines.torch.envs import vec_env_wrapper as wrapper
from rpg_baselines.torch.common.util import test_policy
def configure_random_seed(seed, env=None):
if env is not None:
env.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def parser():
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument("--train", type=int, default=1, help="Train the policy or evaluate the policy")
parser.add_argument("--render", type=int, default=0, help="Render with Unity")
parser.add_argument("--trial", type=int, default=1, help="PPO trial number")
parser.add_argument("--iter", type=int, default=100, help="PPO iter number")
return parser
def main():
args = parser().parse_args()
# load configurations
cfg = YAML().load(
open(
os.environ["FLIGHTMARE_PATH"] + "/flightpy/configs/vision/config.yaml", "r"
)
)
if not args.train:
cfg["simulation"]["num_envs"] = 1
# create training environment
train_env = VisionEnv_v1(dump(cfg, Dumper=RoundTripDumper), False)
train_env = wrapper.FlightEnvVec(train_env)
# set random seed
configure_random_seed(args.seed, env=train_env)
if args.render:
cfg["unity"]["render"] = "yes"
# create evaluation environment
old_num_envs = cfg["simulation"]["num_envs"]
cfg["simulation"]["num_envs"] = 1
eval_env = wrapper.FlightEnvVec(
VisionEnv_v1(dump(cfg, Dumper=RoundTripDumper), False)
)
cfg["simulation"]["num_envs"] = old_num_envs
# save the configuration and other files
rsg_root = os.path.dirname(os.path.abspath(__file__))
log_dir = rsg_root + "/saved"
os.makedirs(log_dir, exist_ok=True)
#
if args.train:
model = PPO(
tensorboard_log=log_dir,
policy="MlpPolicy",
policy_kwargs=dict(
activation_fn=torch.nn.ReLU,
net_arch=[dict(pi=[256, 256], vf=[512, 512])],
log_std_init=-0.5,
),
env=train_env,
eval_env=eval_env,
use_tanh_act=True,
gae_lambda=0.95,
gamma=0.99,
n_steps=250,
ent_coef=0.0,
vf_coef=0.5,
max_grad_norm=0.5,
batch_size=25000,
clip_range=0.2,
use_sde=False, # don't use (gSDE), doesn't work
env_cfg=cfg,
verbose=1,
)
#
model.learn(total_timesteps=int(5 * 1e7), log_interval=(10, 50))
else:
os.system(os.environ["FLIGHTMARE_PATH"] + "/flightrender/RPG_Flightmare.x86_64 &")
#
weight = rsg_root + "/saved/PPO_{0}/Policy/iter_{1:05d}.pth".format(args.trial, args.iter)
env_rms = rsg_root +"/saved/PPO_{0}/RMS/iter_{1:05d}.npz".format(args.trial, args.iter)
device = get_device("auto")
saved_variables = torch.load(weight, map_location=device)
# Create policy object
policy = MlpPolicy(**saved_variables["data"])
#
policy.action_net = torch.nn.Sequential(policy.action_net, torch.nn.Tanh())
# Load weights
policy.load_state_dict(saved_variables["state_dict"], strict=False)
policy.to(device)
#
eval_env.load_rms(env_rms)
test_policy(eval_env, policy, render=args.render)
if __name__ == "__main__":
main()
| [
"torch.manual_seed",
"rpg_baselines.torch.envs.vec_env_wrapper.FlightEnvVec",
"stable_baselines3.common.utils.get_device",
"os.makedirs",
"argparse.ArgumentParser",
"torch.nn.Tanh",
"torch.load",
"stable_baselines3.ppo.policies.MlpPolicy",
"ruamel.yaml.YAML",
"numpy.random.seed",
"os.path.abspat... | [((554, 574), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (568, 574), True, 'import numpy as np\n'), ((579, 602), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (596, 602), False, 'import torch\n'), ((632, 657), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (655, 657), False, 'import argparse\n'), ((1504, 1535), 'rpg_baselines.torch.envs.vec_env_wrapper.FlightEnvVec', 'wrapper.FlightEnvVec', (['train_env'], {}), '(train_env)\n', (1524, 1535), True, 'from rpg_baselines.torch.envs import vec_env_wrapper as wrapper\n'), ((2096, 2131), 'os.makedirs', 'os.makedirs', (['log_dir'], {'exist_ok': '(True)'}), '(log_dir, exist_ok=True)\n', (2107, 2131), False, 'import os\n'), ((1446, 1479), 'ruamel.yaml.dump', 'dump', (['cfg'], {'Dumper': 'RoundTripDumper'}), '(cfg, Dumper=RoundTripDumper)\n', (1450, 1479), False, 'from ruamel.yaml import YAML, RoundTripDumper, dump\n'), ((2031, 2056), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2046, 2056), False, 'import os\n'), ((2966, 3052), 'os.system', 'os.system', (["(os.environ['FLIGHTMARE_PATH'] + '/flightrender/RPG_Flightmare.x86_64 &')"], {}), "(os.environ['FLIGHTMARE_PATH'] +\n '/flightrender/RPG_Flightmare.x86_64 &')\n", (2975, 3052), False, 'import os\n'), ((3272, 3290), 'stable_baselines3.common.utils.get_device', 'get_device', (['"""auto"""'], {}), "('auto')\n", (3282, 3290), False, 'from stable_baselines3.common.utils import get_device\n'), ((3317, 3356), 'torch.load', 'torch.load', (['weight'], {'map_location': 'device'}), '(weight, map_location=device)\n', (3327, 3356), False, 'import torch\n'), ((3405, 3441), 'stable_baselines3.ppo.policies.MlpPolicy', 'MlpPolicy', ([], {}), "(**saved_variables['data'])\n", (3414, 3441), False, 'from stable_baselines3.ppo.policies import MlpPolicy\n'), ((3715, 3764), 'rpg_baselines.torch.common.util.test_policy', 'test_policy', (['eval_env', 'policy'], {'render': 'args.render'}), '(eval_env, policy, render=args.render)\n', (3726, 3764), False, 'from rpg_baselines.torch.common.util import test_policy\n'), ((1184, 1190), 'ruamel.yaml.YAML', 'YAML', ([], {}), '()\n', (1188, 1190), False, 'from ruamel.yaml import YAML, RoundTripDumper, dump\n'), ((1857, 1890), 'ruamel.yaml.dump', 'dump', (['cfg'], {'Dumper': 'RoundTripDumper'}), '(cfg, Dumper=RoundTripDumper)\n', (1861, 1890), False, 'from ruamel.yaml import YAML, RoundTripDumper, dump\n'), ((3519, 3534), 'torch.nn.Tanh', 'torch.nn.Tanh', ([], {}), '()\n', (3532, 3534), False, 'import torch\n')] |
# Created byMartin.cz
# Copyright (c) <NAME>. All rights reserved.
import numpy
import pero
import perrot
# prepare data
count = 50
x_data = numpy.linspace(-5, 5, count)
y_data_1 = numpy.random.normal(0, 1., count)
y_data_2 = numpy.random.normal(0, 5., count)
# init size scale
size_data = 10 + 30 * numpy.random.random(2 * count)
size_scale = pero.OrdinalScale(out_range=size_data, implicit=True)
size_fn = lambda d: size_scale.scale(d[0])
# init plot
plot = perrot.Plot(
x_axis_title = "x-value",
y_axis_title = "random",
legend_position = perrot.NE,
legend_orientation = perrot.VERTICAL)
# add series
series1 = plot.scatter(
title = "Normal 1",
x = x_data,
y = y_data_1,
marker = 'o',
marker_size = size_fn,
marker_fill_alpha = 150)
series2 = plot.scatter(
title = "Normal 5",
x = x_data,
y = y_data_2,
marker = 'd',
marker_size = size_fn,
marker_fill_alpha = 150)
# show plot
plot.zoom()
plot.view("Scatter Series")
| [
"numpy.random.normal",
"numpy.random.random",
"pero.OrdinalScale",
"perrot.Plot",
"numpy.linspace"
] | [((145, 173), 'numpy.linspace', 'numpy.linspace', (['(-5)', '(5)', 'count'], {}), '(-5, 5, count)\n', (159, 173), False, 'import numpy\n'), ((185, 219), 'numpy.random.normal', 'numpy.random.normal', (['(0)', '(1.0)', 'count'], {}), '(0, 1.0, count)\n', (204, 219), False, 'import numpy\n'), ((230, 264), 'numpy.random.normal', 'numpy.random.normal', (['(0)', '(5.0)', 'count'], {}), '(0, 5.0, count)\n', (249, 264), False, 'import numpy\n'), ((349, 402), 'pero.OrdinalScale', 'pero.OrdinalScale', ([], {'out_range': 'size_data', 'implicit': '(True)'}), '(out_range=size_data, implicit=True)\n', (366, 402), False, 'import pero\n'), ((466, 592), 'perrot.Plot', 'perrot.Plot', ([], {'x_axis_title': '"""x-value"""', 'y_axis_title': '"""random"""', 'legend_position': 'perrot.NE', 'legend_orientation': 'perrot.VERTICAL'}), "(x_axis_title='x-value', y_axis_title='random', legend_position=\n perrot.NE, legend_orientation=perrot.VERTICAL)\n", (477, 592), False, 'import perrot\n'), ((305, 335), 'numpy.random.random', 'numpy.random.random', (['(2 * count)'], {}), '(2 * count)\n', (324, 335), False, 'import numpy\n')] |
#######
# Dashboard of low and high yearly average temperatures given the country
# Data collected from Global Historical Climatology Network-Monthly (https://www.ncdc.noaa.gov/ghcnm/v3.php)
#######
# Import libraries
import dash
# https://dash.plot.ly/dash-core-components
import dash_core_components as dcc
# https://github.com/plotly/dash-html-components/tree/master/src/components
import dash_html_components as html
from dash.dependencies import Input, Output, State
import pandas as pd
import numpy as np
import pickle
app = dash.Dash()
# ghcnm.tavg.v3.3.0.20170710.qca.dat ends up to be 75+ Mb file and can be created from ghcnm.ipynb at https://github.com/cliffwhitworth/environment_explorer
# Unable to upload to Github but has more data than the ghcnm_means.pkl file below
# with open('./ghcnm.tavg.v3.3.0.20170710.qca.dat.pkl', 'rb') as qca_file:
# qca_temps = pickle.load(qca_file)
with open('./global_means.pkl', 'rb') as global_means_file:
global_means = pickle.load(global_means_file)
df = global_means[global_means['YEAR'] == 2014]
# Able to upload to Github
with open('./ghcnm_means.pkl', 'rb') as qca_file:
qca_temps = pickle.load(qca_file)
qca_temps.reset_index(inplace=True)
# Get country names from code
# countries = pd.read_csv('ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/v3/country-codes', names=['codes'], header=None)
# countries[['CountryCode','Country']] = countries["codes"].str.split(" ", 1, expand=True)
# countries.drop('codes', axis = 'columns', inplace = True)
with open('./countrycodes.pkl', 'rb') as countrycodes:
countries = pickle.load(countrycodes)
countries['CountryCode'] = countries['CountryCode'].astype('int64')
countries = countries.sort_values('Country', ascending = True)
pd.options.mode.chained_assignment = None # default='warn'
cntrycode = 101
# mask2 = qca_temps['Country'].str.strip() == 'ALGERIA'
# Here are the country codes: ftp://ftp.ncdc.noaa.gov/pub/data/ghcn/v3/country-codes
df_on_cntrycode = qca_temps[qca_temps['CountryCode'] == cntrycode]
codes = []
for CountryCode, Country in countries.iterrows():
# print (CountryCode, Country)
codes.append({'label': Country.Country.strip(), 'value': Country.CountryCode})
sorted_years = df_on_cntrycode.sort_values("YEAR", ascending = True)
available_years = sorted_years.YEAR.unique()
# drop observations without all 12 months; read documentation regarding missing values
df_on_cntrycode.replace(-9999, np.NaN, inplace=True)
df_on_cntrycode.dropna(inplace=True)
# years to compare
loYear = df_on_cntrycode['YEAR'] == 1856
hiYear = df_on_cntrycode['YEAR'] == 2016
temps_min = df_on_cntrycode[loYear]
temps_max = df_on_cntrycode[hiYear]
app.layout = html.Div([
html.Div([
html.H1('Global Historical Climatology Network-Monthly'),
html.P('Compare low and high yearly average temperatures given the country'),
html.A('Code on Github', href='https://github.com/cliffwhitworth/environment_explorer'),
html.Br(),
html.A('GHCNM site', href='https://www.ncdc.noaa.gov/ghcnm/v3.php'),
html.Br(),
html.A('Observing stations', href='https://www.wmo.int/cpdb/volume_a_observing_stations/list_stations')
]),
html.Hr(),
html.Div([
html.Div([
html.H3('Select Country:'),
dcc.Dropdown(
id='country_code',
options=codes,
value='101' # sets a default value
)
], style={'width': '30%', 'display':'inline-block', 'padding-right': '13px', 'white-space': 'nowrap', 'text-overflow': 'ellipsis'}),
# style={'display':'inline-block', 'padding-right': '13px', 'overflow': 'hidden', 'white-space': 'nowrap', 'text-overflow': 'ellipsis'}),
html.Div([
html.H3('Select Lo Year:'),
dcc.Dropdown(
id='lo_year',
options=[{'label': i, 'value': i} for i in available_years],
value='1856'
)
], style={'display':'inline-block', 'padding-right': '13px'}),
html.Div([
html.H3('Select Hi Year:'),
dcc.Dropdown(
id='hi_year',
options=[{'label': i, 'value': i} for i in available_years],
value='2016'
)
], style={'display':'inline-block'}),
html.Div([
html.Button(
id='submit-button',
n_clicks=0,
children='Submit'
),
], style={'margin-top':'10px'}),
], style={'padding': '7px'}),
html.Hr(),
dcc.Graph(
id='my_graph',
figure={
'data': [
# the commented lines below are for the ghcnm.tavg.v3.3.0.20170710.qca.dat.pkl file
# {'name': 'Lo Year Average', 'x': np.arange(0, 12, 1), 'y': temps_min.iloc[:,4:16].mean().tolist()},
# {'name': 'Hi Year Average', 'x': np.arange(0, 12, 1), 'y': temps_max.iloc[:,4:16].mean().tolist()}
{'name': 'Lo Year Average', 'x': np.arange(0, 12, 1), 'y': temps_min.iloc[:,2:14].values.flatten()},
{'name': 'Hi Year Average', 'x': np.arange(0, 12, 1), 'y': temps_max.iloc[:,2:14].values.flatten()}
]
}
),
html.Hr(),
html.Div([
html.H1('Global Means')
]),
html.Div([
html.H3('Select Year:'),
dcc.Dropdown(
id='global_year',
options=[{'label': i, 'value': i} for i in available_years],
value='2016'
)
], style={'display':'inline-block', 'padding-right': '13px'}),
dcc.Graph(
id='my_graph2',
figure={
'data': [
dict(
type = 'choropleth',
colorscale = 'Rainbow',
reversescale = True,
locations = df['Country'],
locationmode = "country names",
z = df['YearAvg'],
text = df['Country'],
colorbar = {'title' : 'Global Means'},
)
],
'layout': {
'title':'Global Means',
'geo':dict(
showframe = False,
projection = {'type':'natural earth'}
)
}
}
)
], style={'padding': '0px', 'margin': '0px'})
def resetYears(code):
df_on_cntrycode = qca_temps[qca_temps['CountryCode'] == int(code)]
# drop observations without all 12 months; read documentation regarding missing values
df_on_cntrycode.replace(-9999, np.NaN, inplace=True)
df_on_cntrycode.dropna(inplace=True)
sorted_years = df_on_cntrycode.sort_values("YEAR", ascending = True)
available_years = sorted_years.YEAR.unique()
options=[{'label': i, 'value': i} for i in available_years]
return options
@app.callback(
Output('lo_year', 'options'),
[Input('country_code', 'value')])
def callback_lo(cntrycode):
return resetYears(cntrycode)
@app.callback(
Output('hi_year', 'options'),
[Input('country_code', 'value')])
def callback_hi(cntrycode):
return resetYears(cntrycode)
@app.callback(
Output('my_graph', 'figure'),
[Input('submit-button', 'n_clicks')],
[State('country_code', 'value'),
State('lo_year', 'value'),
State('hi_year', 'value')])
def update_graph(n_clicks, cntrycode, loYear, hiYear):
df_on_cntrycode = qca_temps[qca_temps['CountryCode'] == int(cntrycode)]
# drop observations without all 12 months; read documentation regarding missing values
df_on_cntrycode.replace(-9999, np.NaN, inplace=True)
df_on_cntrycode.dropna(inplace=True)
# years to compare
a_loYear = df_on_cntrycode['YEAR'] == int(loYear)
a_hiYear = df_on_cntrycode['YEAR'] == int(hiYear)
temps_min = df_on_cntrycode[a_loYear]
temps_max = df_on_cntrycode[a_hiYear]
fig = {
'data': [
# the commented lines below are for the ghcnm.tavg.v3.3.0.20170710.qca.dat.pkl file
# {'name': 'Lo Year Average', 'x': np.arange(0, 12, 1), 'y': temps_min.iloc[:,4:16].mean().tolist()},
# {'name': 'Hi Year Average', 'x': np.arange(0, 12, 1), 'y': temps_max.iloc[:,4:16].mean().tolist()}
{'name': 'Lo Year Average', 'x': np.arange(0, 12, 1), 'y': temps_min.iloc[:,2:14].values.flatten()},
{'name': 'Hi Year Average', 'x': np.arange(0, 12, 1), 'y': temps_max.iloc[:,2:14].values.flatten()}
],
'layout': {
'title':'Lo Hi Year Comparison',
'xaxis': {'title': 'Month'},
'yaxis': {'title': 'Hundredths degree celcius'}
}
}
return fig
@app.callback(
Output('my_graph2', 'figure'),
[Input('global_year', 'value')])
def update_graph(value):
df = global_means[global_means['YEAR'] == value]
fig = {
'data': [
dict(
type = 'choropleth',
colorscale = 'Rainbow',
reversescale = True,
locations = df['Country'],
locationmode = "country names",
z = df['YearAvg'],
text = df['Country'],
colorbar = {'title' : 'Global Means'},
)
],
'layout': {
'title':'Global Means',
'geo':dict(
showframe = False,
projection = {'type':'natural earth'}
)
}
}
return fig
if __name__ == '__main__':
app.run_server(host='0.0.0.0')
| [
"numpy.arange",
"dash_html_components.P",
"dash_html_components.Button",
"dash.dependencies.Output",
"dash_html_components.H1",
"pickle.load",
"dash_html_components.Br",
"dash_html_components.H3",
"dash.dependencies.Input",
"dash_core_components.Dropdown",
"dash_html_components.Hr",
"dash.depe... | [((533, 544), 'dash.Dash', 'dash.Dash', ([], {}), '()\n', (542, 544), False, 'import dash\n'), ((981, 1011), 'pickle.load', 'pickle.load', (['global_means_file'], {}), '(global_means_file)\n', (992, 1011), False, 'import pickle\n'), ((1155, 1176), 'pickle.load', 'pickle.load', (['qca_file'], {}), '(qca_file)\n', (1166, 1176), False, 'import pickle\n'), ((1580, 1605), 'pickle.load', 'pickle.load', (['countrycodes'], {}), '(countrycodes)\n', (1591, 1605), False, 'import pickle\n'), ((6875, 6903), 'dash.dependencies.Output', 'Output', (['"""lo_year"""', '"""options"""'], {}), "('lo_year', 'options')\n", (6881, 6903), False, 'from dash.dependencies import Input, Output, State\n'), ((7024, 7052), 'dash.dependencies.Output', 'Output', (['"""hi_year"""', '"""options"""'], {}), "('hi_year', 'options')\n", (7030, 7052), False, 'from dash.dependencies import Input, Output, State\n'), ((7173, 7201), 'dash.dependencies.Output', 'Output', (['"""my_graph"""', '"""figure"""'], {}), "('my_graph', 'figure')\n", (7179, 7201), False, 'from dash.dependencies import Input, Output, State\n'), ((8693, 8722), 'dash.dependencies.Output', 'Output', (['"""my_graph2"""', '"""figure"""'], {}), "('my_graph2', 'figure')\n", (8699, 8722), False, 'from dash.dependencies import Input, Output, State\n'), ((3199, 3208), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (3206, 3208), True, 'import dash_html_components as html\n'), ((4551, 4560), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (4558, 4560), True, 'import dash_html_components as html\n'), ((5242, 5251), 'dash_html_components.Hr', 'html.Hr', ([], {}), '()\n', (5249, 5251), True, 'import dash_html_components as html\n'), ((6910, 6940), 'dash.dependencies.Input', 'Input', (['"""country_code"""', '"""value"""'], {}), "('country_code', 'value')\n", (6915, 6940), False, 'from dash.dependencies import Input, Output, State\n'), ((7059, 7089), 'dash.dependencies.Input', 'Input', (['"""country_code"""', '"""value"""'], {}), "('country_code', 'value')\n", (7064, 7089), False, 'from dash.dependencies import Input, Output, State\n'), ((7208, 7242), 'dash.dependencies.Input', 'Input', (['"""submit-button"""', '"""n_clicks"""'], {}), "('submit-button', 'n_clicks')\n", (7213, 7242), False, 'from dash.dependencies import Input, Output, State\n'), ((7250, 7280), 'dash.dependencies.State', 'State', (['"""country_code"""', '"""value"""'], {}), "('country_code', 'value')\n", (7255, 7280), False, 'from dash.dependencies import Input, Output, State\n'), ((7286, 7311), 'dash.dependencies.State', 'State', (['"""lo_year"""', '"""value"""'], {}), "('lo_year', 'value')\n", (7291, 7311), False, 'from dash.dependencies import Input, Output, State\n'), ((7317, 7342), 'dash.dependencies.State', 'State', (['"""hi_year"""', '"""value"""'], {}), "('hi_year', 'value')\n", (7322, 7342), False, 'from dash.dependencies import Input, Output, State\n'), ((8729, 8758), 'dash.dependencies.Input', 'Input', (['"""global_year"""', '"""value"""'], {}), "('global_year', 'value')\n", (8734, 8758), False, 'from dash.dependencies import Input, Output, State\n'), ((2719, 2775), 'dash_html_components.H1', 'html.H1', (['"""Global Historical Climatology Network-Monthly"""'], {}), "('Global Historical Climatology Network-Monthly')\n", (2726, 2775), True, 'import dash_html_components as html\n'), ((2785, 2861), 'dash_html_components.P', 'html.P', (['"""Compare low and high yearly average temperatures given the country"""'], {}), "('Compare low and high yearly average temperatures given the country')\n", (2791, 2861), True, 'import dash_html_components as html\n'), ((2871, 2963), 'dash_html_components.A', 'html.A', (['"""Code on Github"""'], {'href': '"""https://github.com/cliffwhitworth/environment_explorer"""'}), "('Code on Github', href=\n 'https://github.com/cliffwhitworth/environment_explorer')\n", (2877, 2963), True, 'import dash_html_components as html\n'), ((2968, 2977), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2975, 2977), True, 'import dash_html_components as html\n'), ((2987, 3054), 'dash_html_components.A', 'html.A', (['"""GHCNM site"""'], {'href': '"""https://www.ncdc.noaa.gov/ghcnm/v3.php"""'}), "('GHCNM site', href='https://www.ncdc.noaa.gov/ghcnm/v3.php')\n", (2993, 3054), True, 'import dash_html_components as html\n'), ((3064, 3073), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (3071, 3073), True, 'import dash_html_components as html\n'), ((3083, 3191), 'dash_html_components.A', 'html.A', (['"""Observing stations"""'], {'href': '"""https://www.wmo.int/cpdb/volume_a_observing_stations/list_stations"""'}), "('Observing stations', href=\n 'https://www.wmo.int/cpdb/volume_a_observing_stations/list_stations')\n", (3089, 3191), True, 'import dash_html_components as html\n'), ((5276, 5299), 'dash_html_components.H1', 'html.H1', (['"""Global Means"""'], {}), "('Global Means')\n", (5283, 5299), True, 'import dash_html_components as html\n'), ((5331, 5354), 'dash_html_components.H3', 'html.H3', (['"""Select Year:"""'], {}), "('Select Year:')\n", (5338, 5354), True, 'import dash_html_components as html\n'), ((5364, 5473), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""global_year"""', 'options': "[{'label': i, 'value': i} for i in available_years]", 'value': '"""2016"""'}), "(id='global_year', options=[{'label': i, 'value': i} for i in\n available_years], value='2016')\n", (5376, 5473), True, 'import dash_core_components as dcc\n'), ((8281, 8300), 'numpy.arange', 'np.arange', (['(0)', '(12)', '(1)'], {}), '(0, 12, 1)\n', (8290, 8300), True, 'import numpy as np\n'), ((8394, 8413), 'numpy.arange', 'np.arange', (['(0)', '(12)', '(1)'], {}), '(0, 12, 1)\n', (8403, 8413), True, 'import numpy as np\n'), ((3256, 3282), 'dash_html_components.H3', 'html.H3', (['"""Select Country:"""'], {}), "('Select Country:')\n", (3263, 3282), True, 'import dash_html_components as html\n'), ((3296, 3355), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""country_code"""', 'options': 'codes', 'value': '"""101"""'}), "(id='country_code', options=codes, value='101')\n", (3308, 3355), True, 'import dash_core_components as dcc\n'), ((3759, 3785), 'dash_html_components.H3', 'html.H3', (['"""Select Lo Year:"""'], {}), "('Select Lo Year:')\n", (3766, 3785), True, 'import dash_html_components as html\n'), ((3799, 3904), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""lo_year"""', 'options': "[{'label': i, 'value': i} for i in available_years]", 'value': '"""1856"""'}), "(id='lo_year', options=[{'label': i, 'value': i} for i in\n available_years], value='1856')\n", (3811, 3904), True, 'import dash_core_components as dcc\n'), ((4065, 4091), 'dash_html_components.H3', 'html.H3', (['"""Select Hi Year:"""'], {}), "('Select Hi Year:')\n", (4072, 4091), True, 'import dash_html_components as html\n'), ((4105, 4210), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""hi_year"""', 'options': "[{'label': i, 'value': i} for i in available_years]", 'value': '"""2016"""'}), "(id='hi_year', options=[{'label': i, 'value': i} for i in\n available_years], value='2016')\n", (4117, 4210), True, 'import dash_core_components as dcc\n'), ((4346, 4408), 'dash_html_components.Button', 'html.Button', ([], {'id': '"""submit-button"""', 'n_clicks': '(0)', 'children': '"""Submit"""'}), "(id='submit-button', n_clicks=0, children='Submit')\n", (4357, 4408), True, 'import dash_html_components as html\n'), ((5023, 5042), 'numpy.arange', 'np.arange', (['(0)', '(12)', '(1)'], {}), '(0, 12, 1)\n', (5032, 5042), True, 'import numpy as np\n'), ((5140, 5159), 'numpy.arange', 'np.arange', (['(0)', '(12)', '(1)'], {}), '(0, 12, 1)\n', (5149, 5159), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
''' Fingerprinting algorithm module
This module contains the fingerprinting algorithm for daily PV power signals
'''
import numpy as np
from scipy import optimize
from inspect import signature
from solarfingerprinting.pulsefit.pulses import gaussian, gpow, glin, gquad, gatan, g2
from solarfingerprinting.pulsefit.transform import forward_transform
FUNCTIONS = {
'gauss': gaussian,
'gauss_power': gpow,
'gauss_linear': glin,
'gauss_quad': gquad,
'gauss_atan2': gatan,
'gaus_lin_mixture': g2
}
def fingerprint(data, function='gauss_quad', residuals=None, reweight=False,
normalize=True):
max_val = np.max(data)
num_meas_per_hour = len(data) / 24
x = np.arange(0, 24, 1. / num_meas_per_hour)
f = FUNCTIONS[function]
n_args = len(signature(f).parameters) - 1
init = np.zeros(n_args)
if residuals is None:
residuals = np.ones_like(data)
try:
optimal_params, _ = optimize.curve_fit(f, x[1:],
data[1:] / max_val,
p0=init,
sigma=residuals[1:],
maxfev=100000)
except RuntimeError:
encoding = None
fit = None
else:
fit = np.zeros_like(x)
fit[1:] = f(x[1:], *optimal_params) * max_val
residual = data - fit
# Using mean absolute error on normalized residual rather than RMSE
# rmse = np.linalg.norm(residual / max_val) / np.sqrt(len(data))
# inf_error = np.max(np.abs(residual / max_val))
mae = np.sum(np.abs(residual / max_val)) / len(data)
encoding = np.r_[optimal_params, mae]
if reweight:
encoding, fit = fingerprint(data, function=function,
residuals=(residual + 1e-3),
normalize=False)
if normalize and function == 'gauss_quad':
encoding = forward_transform(encoding)
return encoding, fit | [
"scipy.optimize.curve_fit",
"numpy.ones_like",
"numpy.abs",
"solarfingerprinting.pulsefit.transform.forward_transform",
"inspect.signature",
"numpy.max",
"numpy.zeros",
"numpy.zeros_like",
"numpy.arange"
] | [((668, 680), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (674, 680), True, 'import numpy as np\n'), ((728, 769), 'numpy.arange', 'np.arange', (['(0)', '(24)', '(1.0 / num_meas_per_hour)'], {}), '(0, 24, 1.0 / num_meas_per_hour)\n', (737, 769), True, 'import numpy as np\n'), ((854, 870), 'numpy.zeros', 'np.zeros', (['n_args'], {}), '(n_args)\n', (862, 870), True, 'import numpy as np\n'), ((917, 935), 'numpy.ones_like', 'np.ones_like', (['data'], {}), '(data)\n', (929, 935), True, 'import numpy as np\n'), ((973, 1071), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['f', 'x[1:]', '(data[1:] / max_val)'], {'p0': 'init', 'sigma': 'residuals[1:]', 'maxfev': '(100000)'}), '(f, x[1:], data[1:] / max_val, p0=init, sigma=residuals[1\n :], maxfev=100000)\n', (991, 1071), False, 'from scipy import optimize\n'), ((1347, 1363), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (1360, 1363), True, 'import numpy as np\n'), ((2047, 2074), 'solarfingerprinting.pulsefit.transform.forward_transform', 'forward_transform', (['encoding'], {}), '(encoding)\n', (2064, 2074), False, 'from solarfingerprinting.pulsefit.transform import forward_transform\n'), ((814, 826), 'inspect.signature', 'signature', (['f'], {}), '(f)\n', (823, 826), False, 'from inspect import signature\n'), ((1675, 1701), 'numpy.abs', 'np.abs', (['(residual / max_val)'], {}), '(residual / max_val)\n', (1681, 1701), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 21 14:51:28 2018
@author: yujika
"""
import pickle
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#%matplotlib qt
import util
def corners_unwarp(img, nx, ny, mtx, dist):
img_und = cv2.undistort(img, mtx, dist)
gray = cv2.cvtColor( img_und, cv2.COLOR_BGR2GRAY )
ret, corners = cv2.findChessboardCorners(gray, (nx, ny) )
if ( ret ):
img_corner = cv2.drawChessboardCorners(img_und, (9,6), corners,ret)
src = np.float32([[corners[0,0,:]],[corners[1,0,:]],[corners[nx,0,:]],[corners[nx+1,0,:]]])
h,w,c = img.shape
dst = np.float32([[0.5*w/nx,0.5*h/ny],[1.5*w/nx,0.5*h/ny],[0.5*w/nx,1.5*h/ny],[1.5*w/nx,1.5*h/ny]])
M = cv2.getPerspectiveTransform(src,dst)
warped = cv2.warpPerspective(img_corner,M,(w,h))
return warped,M
return img_und, M
def corners_undist_unwarp(img, corners, nx, ny ):
src = np.float32([[corners[0,0,:]],[corners[1,0,:]],[corners[nx,0,:]],[corners[nx+1,0,:]]])
h,w,c = img.shape
dst = np.float32([[0.5*w/nx,0.5*h/ny],[1.5*w/nx,0.5*h/ny],[0.5*w/nx,1.5*h/ny],[1.5*w/nx,1.5*h/ny]])
M = cv2.getPerspectiveTransform(src,dst)
warped = cv2.warpPerspective(img,M,(w,h))
return warped
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('../camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
#cv2.imshow('img',img)
#cv2.waitKey(500)
plt.imshow(img)
plt.show()
#cv2.destroyAllWindows()
#Calibrate camera
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
nx=9
ny=6
img = cv2.imread('../camera_cal/calibration1.jpg')
img_und = cv2.undistort(img, mtx, dist)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(img_und)
ax2.set_title('Undistorted Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.savefig('../output_images/undistort_output.png')
plt.show()
test_file_name = glob.glob('../test_images/straight_lines*.jpg')
for image_file in test_file_name:
base_fn = os.path.basename(image_file).split('.')[0]
image_org = mpimg.imread(image_file)
image_undistort = cv2.undistort(image_org, mtx, dist, None, mtx)
plt.imshow(image_undistort)
plt.show()
plt.imsave('../output_images/' + 'undistort_'+base_fn + '.png', image_undistort )
image = image_undistort
hls_binary = util.hls_select(image, thresh=(90, 255))
ksize = 9 # Choose a larger odd number to smooth gradient measurements
gradx = util.abs_sobel_thresh(hls_binary, orient='x', sobel_kernel=ksize, thresh=(40, 100))
grady = util.abs_sobel_thresh(hls_binary, orient='y', sobel_kernel=ksize, thresh=(40, 100))
mag_binary = util.mag_thresh(hls_binary, sobel_kernel=ksize, mag_thresh=(30, 100))
dir_binary = util.dir_threshold(hls_binary, sobel_kernel=ksize, thresh=(0.7, 1.3))
combined = np.zeros_like(binary)
combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
src_points = np.float32([[571,467],
[717,467],
[1105,720],
[205,720]])
dst_points = np.float32([[1280/4, 0 ],
[1280/4*3, 0 ],
[1280/4*3, 720],
[1280/4, 720]
])
bird_view = util.warper(combined, src_points, dst_points)
plt.title('COMBINED bird view ' + image_file)
plt.imshow(bird_view,cmap='gray')
plt.show()
plt.imsave('../output_images/' + 'binary_combo_warped_' + base_fn + '.png', bird_view*255, cmap='gray' )
bird_view_rgb = util.warper(image_undistort, src_points, dst_points)
bird_view_rgb = cv2.polylines(bird_view_rgb,np.array([dst_points],dtype=np.int32),True, ( 255, 0, 0) ,thickness=10)
image_roi = cv2.polylines(image_undistort, np.array([src_points],dtype=np.int32),True, ( 255, 0, 0 ), thickness=10)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
f.tight_layout()
ax1.imshow(image_roi)
ax1.set_title('Original Image', fontsize=24)
ax2.imshow(bird_view_rgb)
ax2.set_title('bird view Image', fontsize=24)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.savefig('../output_images/' + 'warped_' + base_fn + '.jpg')
plt.show()
save_pickle = {
'mtx' : mtx,
'dist': dist,
'src_points' : src_points,
'dst_points' : dst_points
}
with open(util.camera_mtx_file_name,'wb' ) as f:
pickle.dump(save_pickle, f, pickle.HIGHEST_PROTOCOL)
| [
"matplotlib.image.imread",
"numpy.array",
"cv2.warpPerspective",
"util.warper",
"cv2.calibrateCamera",
"cv2.findChessboardCorners",
"matplotlib.pyplot.imshow",
"cv2.undistort",
"matplotlib.pyplot.subplots",
"glob.glob",
"util.dir_threshold",
"matplotlib.pyplot.savefig",
"cv2.getPerspectiveTr... | [((1380, 1412), 'numpy.zeros', 'np.zeros', (['(6 * 9, 3)', 'np.float32'], {}), '((6 * 9, 3), np.float32)\n', (1388, 1412), True, 'import numpy as np\n'), ((1664, 1707), 'glob.glob', 'glob.glob', (['"""../camera_cal/calibration*.jpg"""'], {}), "('../camera_cal/calibration*.jpg')\n", (1673, 1707), False, 'import glob\n'), ((2378, 2449), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'gray.shape[::-1]', 'None', 'None'], {}), '(objpoints, imgpoints, gray.shape[::-1], None, None)\n', (2397, 2449), False, 'import cv2\n'), ((2465, 2509), 'cv2.imread', 'cv2.imread', (['"""../camera_cal/calibration1.jpg"""'], {}), "('../camera_cal/calibration1.jpg')\n", (2475, 2509), False, 'import cv2\n'), ((2520, 2549), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist'], {}), '(img, mtx, dist)\n', (2533, 2549), False, 'import cv2\n'), ((2567, 2602), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(24, 9)'}), '(1, 2, figsize=(24, 9))\n', (2579, 2602), True, 'import matplotlib.pyplot as plt\n'), ((2749, 2808), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.0)', 'right': '(1)', 'top': '(0.9)', 'bottom': '(0.0)'}), '(left=0.0, right=1, top=0.9, bottom=0.0)\n', (2768, 2808), True, 'import matplotlib.pyplot as plt\n'), ((2807, 2859), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../output_images/undistort_output.png"""'], {}), "('../output_images/undistort_output.png')\n", (2818, 2859), True, 'import matplotlib.pyplot as plt\n'), ((2860, 2870), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2868, 2870), True, 'import matplotlib.pyplot as plt\n'), ((2890, 2937), 'glob.glob', 'glob.glob', (['"""../test_images/straight_lines*.jpg"""'], {}), "('../test_images/straight_lines*.jpg')\n", (2899, 2937), False, 'import glob\n'), ((295, 324), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist'], {}), '(img, mtx, dist)\n', (308, 324), False, 'import cv2\n'), ((336, 377), 'cv2.cvtColor', 'cv2.cvtColor', (['img_und', 'cv2.COLOR_BGR2GRAY'], {}), '(img_und, cv2.COLOR_BGR2GRAY)\n', (348, 377), False, 'import cv2\n'), ((399, 440), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(nx, ny)'], {}), '(gray, (nx, ny))\n', (424, 440), False, 'import cv2\n'), ((981, 1084), 'numpy.float32', 'np.float32', (['[[corners[0, 0, :]], [corners[1, 0, :]], [corners[nx, 0, :]], [corners[nx +\n 1, 0, :]]]'], {}), '([[corners[0, 0, :]], [corners[1, 0, :]], [corners[nx, 0, :]], [\n corners[nx + 1, 0, :]]])\n', (991, 1084), True, 'import numpy as np\n'), ((1099, 1236), 'numpy.float32', 'np.float32', (['[[0.5 * w / nx, 0.5 * h / ny], [1.5 * w / nx, 0.5 * h / ny], [0.5 * w / nx,\n 1.5 * h / ny], [1.5 * w / nx, 1.5 * h / ny]]'], {}), '([[0.5 * w / nx, 0.5 * h / ny], [1.5 * w / nx, 0.5 * h / ny], [\n 0.5 * w / nx, 1.5 * h / ny], [1.5 * w / nx, 1.5 * h / ny]])\n', (1109, 1236), True, 'import numpy as np\n'), ((1201, 1238), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (1228, 1238), False, 'import cv2\n'), ((1251, 1286), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', '(w, h)'], {}), '(img, M, (w, h))\n', (1270, 1286), False, 'import cv2\n'), ((1798, 1815), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (1808, 1815), False, 'import cv2\n'), ((1827, 1864), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1839, 1864), False, 'import cv2\n'), ((1918, 1963), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(9, 6)', 'None'], {}), '(gray, (9, 6), None)\n', (1943, 1963), False, 'import cv2\n'), ((3045, 3069), 'matplotlib.image.imread', 'mpimg.imread', (['image_file'], {}), '(image_file)\n', (3057, 3069), True, 'import matplotlib.image as mpimg\n'), ((3092, 3138), 'cv2.undistort', 'cv2.undistort', (['image_org', 'mtx', 'dist', 'None', 'mtx'], {}), '(image_org, mtx, dist, None, mtx)\n', (3105, 3138), False, 'import cv2\n'), ((3143, 3170), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_undistort'], {}), '(image_undistort)\n', (3153, 3170), True, 'import matplotlib.pyplot as plt\n'), ((3175, 3185), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3183, 3185), True, 'import matplotlib.pyplot as plt\n'), ((3190, 3276), 'matplotlib.pyplot.imsave', 'plt.imsave', (["('../output_images/' + 'undistort_' + base_fn + '.png')", 'image_undistort'], {}), "('../output_images/' + 'undistort_' + base_fn + '.png',\n image_undistort)\n", (3200, 3276), True, 'import matplotlib.pyplot as plt\n'), ((3317, 3357), 'util.hls_select', 'util.hls_select', (['image'], {'thresh': '(90, 255)'}), '(image, thresh=(90, 255))\n', (3332, 3357), False, 'import util\n'), ((3445, 3533), 'util.abs_sobel_thresh', 'util.abs_sobel_thresh', (['hls_binary'], {'orient': '"""x"""', 'sobel_kernel': 'ksize', 'thresh': '(40, 100)'}), "(hls_binary, orient='x', sobel_kernel=ksize, thresh=(\n 40, 100))\n", (3466, 3533), False, 'import util\n'), ((3541, 3629), 'util.abs_sobel_thresh', 'util.abs_sobel_thresh', (['hls_binary'], {'orient': '"""y"""', 'sobel_kernel': 'ksize', 'thresh': '(40, 100)'}), "(hls_binary, orient='y', sobel_kernel=ksize, thresh=(\n 40, 100))\n", (3562, 3629), False, 'import util\n'), ((3642, 3711), 'util.mag_thresh', 'util.mag_thresh', (['hls_binary'], {'sobel_kernel': 'ksize', 'mag_thresh': '(30, 100)'}), '(hls_binary, sobel_kernel=ksize, mag_thresh=(30, 100))\n', (3657, 3711), False, 'import util\n'), ((3729, 3798), 'util.dir_threshold', 'util.dir_threshold', (['hls_binary'], {'sobel_kernel': 'ksize', 'thresh': '(0.7, 1.3)'}), '(hls_binary, sobel_kernel=ksize, thresh=(0.7, 1.3))\n', (3747, 3798), False, 'import util\n'), ((3814, 3835), 'numpy.zeros_like', 'np.zeros_like', (['binary'], {}), '(binary)\n', (3827, 3835), True, 'import numpy as np\n'), ((3953, 4014), 'numpy.float32', 'np.float32', (['[[571, 467], [717, 467], [1105, 720], [205, 720]]'], {}), '([[571, 467], [717, 467], [1105, 720], [205, 720]])\n', (3963, 4014), True, 'import numpy as np\n'), ((4115, 4204), 'numpy.float32', 'np.float32', (['[[1280 / 4, 0], [1280 / 4 * 3, 0], [1280 / 4 * 3, 720], [1280 / 4, 720]]'], {}), '([[1280 / 4, 0], [1280 / 4 * 3, 0], [1280 / 4 * 3, 720], [1280 / \n 4, 720]])\n', (4125, 4204), True, 'import numpy as np\n'), ((4345, 4390), 'util.warper', 'util.warper', (['combined', 'src_points', 'dst_points'], {}), '(combined, src_points, dst_points)\n', (4356, 4390), False, 'import util\n'), ((4395, 4440), 'matplotlib.pyplot.title', 'plt.title', (["('COMBINED bird view ' + image_file)"], {}), "('COMBINED bird view ' + image_file)\n", (4404, 4440), True, 'import matplotlib.pyplot as plt\n'), ((4445, 4479), 'matplotlib.pyplot.imshow', 'plt.imshow', (['bird_view'], {'cmap': '"""gray"""'}), "(bird_view, cmap='gray')\n", (4455, 4479), True, 'import matplotlib.pyplot as plt\n'), ((4483, 4493), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4491, 4493), True, 'import matplotlib.pyplot as plt\n'), ((4498, 4607), 'matplotlib.pyplot.imsave', 'plt.imsave', (["('../output_images/' + 'binary_combo_warped_' + base_fn + '.png')", '(bird_view * 255)'], {'cmap': '"""gray"""'}), "('../output_images/' + 'binary_combo_warped_' + base_fn + '.png',\n bird_view * 255, cmap='gray')\n", (4508, 4607), True, 'import matplotlib.pyplot as plt\n'), ((4634, 4686), 'util.warper', 'util.warper', (['image_undistort', 'src_points', 'dst_points'], {}), '(image_undistort, src_points, dst_points)\n', (4645, 4686), False, 'import util\n'), ((4947, 4982), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 4)'}), '(1, 2, figsize=(12, 4))\n', (4959, 4982), True, 'import matplotlib.pyplot as plt\n'), ((5163, 5222), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.0)', 'right': '(1)', 'top': '(0.9)', 'bottom': '(0.0)'}), '(left=0.0, right=1, top=0.9, bottom=0.0)\n', (5182, 5222), True, 'import matplotlib.pyplot as plt\n'), ((5225, 5288), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../output_images/' + 'warped_' + base_fn + '.jpg')"], {}), "('../output_images/' + 'warped_' + base_fn + '.jpg')\n", (5236, 5288), True, 'import matplotlib.pyplot as plt\n'), ((5293, 5303), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5301, 5303), True, 'import matplotlib.pyplot as plt\n'), ((5510, 5562), 'pickle.dump', 'pickle.dump', (['save_pickle', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(save_pickle, f, pickle.HIGHEST_PROTOCOL)\n', (5521, 5562), False, 'import pickle\n'), ((479, 535), 'cv2.drawChessboardCorners', 'cv2.drawChessboardCorners', (['img_und', '(9, 6)', 'corners', 'ret'], {}), '(img_und, (9, 6), corners, ret)\n', (504, 535), False, 'import cv2\n'), ((548, 651), 'numpy.float32', 'np.float32', (['[[corners[0, 0, :]], [corners[1, 0, :]], [corners[nx, 0, :]], [corners[nx +\n 1, 0, :]]]'], {}), '([[corners[0, 0, :]], [corners[1, 0, :]], [corners[nx, 0, :]], [\n corners[nx + 1, 0, :]]])\n', (558, 651), True, 'import numpy as np\n'), ((674, 811), 'numpy.float32', 'np.float32', (['[[0.5 * w / nx, 0.5 * h / ny], [1.5 * w / nx, 0.5 * h / ny], [0.5 * w / nx,\n 1.5 * h / ny], [1.5 * w / nx, 1.5 * h / ny]]'], {}), '([[0.5 * w / nx, 0.5 * h / ny], [1.5 * w / nx, 0.5 * h / ny], [\n 0.5 * w / nx, 1.5 * h / ny], [1.5 * w / nx, 1.5 * h / ny]])\n', (684, 811), True, 'import numpy as np\n'), ((780, 817), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['src', 'dst'], {}), '(src, dst)\n', (807, 817), False, 'import cv2\n'), ((834, 876), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img_corner', 'M', '(w, h)'], {}), '(img_corner, M, (w, h))\n', (853, 876), False, 'import cv2\n'), ((2150, 2202), 'cv2.drawChessboardCorners', 'cv2.drawChessboardCorners', (['img', '(9, 6)', 'corners', 'ret'], {}), '(img, (9, 6), corners, ret)\n', (2175, 2202), False, 'import cv2\n'), ((2267, 2282), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2277, 2282), True, 'import matplotlib.pyplot as plt\n'), ((2291, 2301), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2299, 2301), True, 'import matplotlib.pyplot as plt\n'), ((4735, 4773), 'numpy.array', 'np.array', (['[dst_points]'], {'dtype': 'np.int32'}), '([dst_points], dtype=np.int32)\n', (4743, 4773), True, 'import numpy as np\n'), ((4854, 4892), 'numpy.array', 'np.array', (['[src_points]'], {'dtype': 'np.int32'}), '([src_points], dtype=np.int32)\n', (4862, 4892), True, 'import numpy as np\n')] |
import numpy as np
from pokerlib.EvaluationStateMachine import FlushStateDevice, \
StraightStateDevice, CardValueStateDevice
ROYAL_FLUSH = 9
STRAIGHT_FLUSH = 8
FOUR_OF_A_KIND = 7
FULL_HOUSE = 6
FLUSH = 5
STRAIGHT = 4
THREE_OF_A_KIND = 3
TWO_PAIR = 2
PAIR = 1
HIGH_CARD = 0
def evaluate_cards(cards):
cards = np.asarray(cards)
cards.sort()
cards_values = np.asarray([cards[i].value for i in range(len(cards))])
cards_value_diff = [cards[i+1].value-cards[i].value for i in range(len(cards)-1)]
suits = np.asarray([cards[i].suit for i in range(len(cards))])
suits_indexes = np.argsort(suits)
suits.sort()
cards_suit_diff = [suits[i + 1] - suits[i] for i in range(len(suits) - 1)]
flush_state_machine = FlushStateDevice()
straight_state_machine = StraightStateDevice()
value_state_machine = CardValueStateDevice()
if len(cards) is not 7:
raise AssertionError("Need seven cards to evaluate")
for index in range(len(cards_suit_diff)):
flush_state_machine.on_event(cards_suit_diff[index], index)
flush_card_indexes = suits_indexes[flush_state_machine.used_card_indices]
is_flush = flush_state_machine.state.evaluate()
for index in range(len(cards_value_diff)):
straight_state_machine.on_event(cards_value_diff[index], index)
value_state_machine.on_event(cards_value_diff[index], index)
is_straight = straight_state_machine.state.evaluate()
straight_indexes = straight_state_machine.used_card_indices
is_ace_high_straight = False
if not is_straight and \
(12 in cards_values and
0 in cards_values and
1 in cards_values and
2 in cards_values and
3 in cards_values):
is_straight = True
is_ace_high_straight = True
ace_straight_values = [12, 0, 1, 2, 3]
straight_indexes = np.asarray([np.where(cards_values == ace_straight_values[i])[0][0]
for i in range(len(ace_straight_values))])
value_result = value_state_machine.state.evaluate()
value_indexes = value_state_machine.used_card_indices
sf_indexes = np.intersect1d(flush_card_indexes, straight_indexes)
index_diffs = np.abs([sf_indexes[i]-sf_indexes[i+1] for i in range(len(sf_indexes)-1)])
high_cards = list(np.copy(cards))
if is_flush \
and is_straight \
and (np.sum(index_diffs) == len(sf_indexes)-1 or is_ace_high_straight) \
and len(sf_indexes) > 4:
if (cards[sf_indexes][-1].value == 12 and
cards[sf_indexes][-2].value == 11): # Add the king in here for the ace to five straight flush check.
return [ROYAL_FLUSH, cards[sf_indexes], []]
if cards[sf_indexes][-1].value == 12:
sf_indexes = list(sf_indexes)
sf_indexes.insert(0, sf_indexes.pop(-1))
sf_indexes = np.asarray(sf_indexes)
return [STRAIGHT_FLUSH, cards[sf_indexes], []]
if value_result == FOUR_OF_A_KIND:
return_cards = cards[value_indexes]
if len(return_cards) > 4:
card_values = np.asarray([cards[index].value for index in value_indexes])
return_cards = return_cards[np.where(card_values == np.median(card_values))]
for card in return_cards:
high_cards.remove(card)
return [FOUR_OF_A_KIND, return_cards, get_high_cards(high_cards, 1)]
if value_result == FULL_HOUSE:
return_cards = cards[value_indexes]
return [FULL_HOUSE, return_cards[:], []]
if is_flush:
return_cards = cards[flush_card_indexes]
return [FLUSH, return_cards[-5:], []]
if is_straight:
return_cards = cards[straight_indexes]
return [STRAIGHT, return_cards[-5:], []]
if value_result == THREE_OF_A_KIND:
for card in cards[value_indexes]:
high_cards.remove(card)
return [THREE_OF_A_KIND, cards[value_indexes], get_high_cards(high_cards, 2)]
if value_result == TWO_PAIR:
return_cards = cards[value_indexes][-4:]
for card in return_cards:
high_cards.remove(card)
return [TWO_PAIR, return_cards, get_high_cards(high_cards, 1)]
if value_result == PAIR:
for card in cards[value_indexes]:
high_cards.remove(card)
return [PAIR, cards[value_indexes], get_high_cards(high_cards, 3)]
return [HIGH_CARD, [], cards[2:]]
def get_high_cards(cards, number):
if len(cards) < number:
raise AssertionError("cannot get so many high cards")
cards.sort()
return cards[(-1*number):]
| [
"numpy.intersect1d",
"numpy.copy",
"numpy.median",
"numpy.where",
"pokerlib.EvaluationStateMachine.CardValueStateDevice",
"numpy.asarray",
"numpy.argsort",
"numpy.sum",
"pokerlib.EvaluationStateMachine.FlushStateDevice",
"pokerlib.EvaluationStateMachine.StraightStateDevice"
] | [((319, 336), 'numpy.asarray', 'np.asarray', (['cards'], {}), '(cards)\n', (329, 336), True, 'import numpy as np\n'), ((603, 620), 'numpy.argsort', 'np.argsort', (['suits'], {}), '(suits)\n', (613, 620), True, 'import numpy as np\n'), ((744, 762), 'pokerlib.EvaluationStateMachine.FlushStateDevice', 'FlushStateDevice', ([], {}), '()\n', (760, 762), False, 'from pokerlib.EvaluationStateMachine import FlushStateDevice, StraightStateDevice, CardValueStateDevice\n'), ((792, 813), 'pokerlib.EvaluationStateMachine.StraightStateDevice', 'StraightStateDevice', ([], {}), '()\n', (811, 813), False, 'from pokerlib.EvaluationStateMachine import FlushStateDevice, StraightStateDevice, CardValueStateDevice\n'), ((840, 862), 'pokerlib.EvaluationStateMachine.CardValueStateDevice', 'CardValueStateDevice', ([], {}), '()\n', (860, 862), False, 'from pokerlib.EvaluationStateMachine import FlushStateDevice, StraightStateDevice, CardValueStateDevice\n'), ((2153, 2205), 'numpy.intersect1d', 'np.intersect1d', (['flush_card_indexes', 'straight_indexes'], {}), '(flush_card_indexes, straight_indexes)\n', (2167, 2205), True, 'import numpy as np\n'), ((2320, 2334), 'numpy.copy', 'np.copy', (['cards'], {}), '(cards)\n', (2327, 2334), True, 'import numpy as np\n'), ((2891, 2913), 'numpy.asarray', 'np.asarray', (['sf_indexes'], {}), '(sf_indexes)\n', (2901, 2913), True, 'import numpy as np\n'), ((3113, 3172), 'numpy.asarray', 'np.asarray', (['[cards[index].value for index in value_indexes]'], {}), '([cards[index].value for index in value_indexes])\n', (3123, 3172), True, 'import numpy as np\n'), ((2401, 2420), 'numpy.sum', 'np.sum', (['index_diffs'], {}), '(index_diffs)\n', (2407, 2420), True, 'import numpy as np\n'), ((1895, 1943), 'numpy.where', 'np.where', (['(cards_values == ace_straight_values[i])'], {}), '(cards_values == ace_straight_values[i])\n', (1903, 1943), True, 'import numpy as np\n'), ((3237, 3259), 'numpy.median', 'np.median', (['card_values'], {}), '(card_values)\n', (3246, 3259), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
##Use the trained yNet to perform large-scale grain growth simulation
import numpy as np
import matplotlib.pyplot as plt
from model import *
nx = 1600
ny = 1600
deltaT = [1,3,4,5,7,9,11,12,13,15,17,18,19,21,22,24,25,27,29,30,2,6,14,20,28,8,10,16,23,26]
deltaT = deltaT/np.max(deltaT)
MICNN = yNet(nx,ny)
MICNN.summary()
MICNN.load_weights("weights_yNet.h5")
##########delta_T = 1#############################################
eta = np.zeros((nx,ny),dtype = np.float32)
eta = np.load("data_seeding_1600x1600\\eta_initial_1.npy")
x_test_0 = eta[:,:]
x_test_0 = np.reshape(x_test_0, (1, nx, ny, 1))
deltaT_test_0 = deltaT[0] #[0] = 1, [3] = 5; [19] = 30;
deltaT_test_0 = np.reshape(deltaT_test_0, (1, 1))
###Recurrent prediction
for istep in range(0,65):
print(istep)
ax = plt.imshow(x_test_0.reshape(nx, ny),cmap = 'coolwarm', vmin = 0, vmax = 1)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.set_title('$\mathit{\Delta}t$'+' = 1',loc = 'right', fontsize = 10)
ax.axes.set_title('$\mathit{t}$'+'_'+'$\mathit{step}$'+' = '+str(istep),loc = 'left', fontsize = 10)
plt.savefig('large_dt1_'+'eta_'+str(istep)+'.jpg', dpi=200, bbox_inches = "tight")
plt.close()
eta2D = x_test_0.reshape(nx, ny)
x_test_1 = MICNN.predict([x_test_0,deltaT_test_0])
x_test_0 = x_test_1
##########delta_T = 5#############################################
eta = np.zeros((nx,ny),dtype = np.float32)
eta = np.load("data_seeding_1600x1600\\eta_initial_5.npy")
x_test_0 = eta[:,:]
x_test_0 = np.reshape(x_test_0, (1, nx, ny, 1))
deltaT_test_0 = deltaT[3] #[0] = 1, [3] = 5; [19] = 30;
deltaT_test_0 = np.reshape(deltaT_test_0, (1, 1))
###Recurrent prediction
for istep in range(0,65):
print(istep)
ax = plt.imshow(x_test_0.reshape(nx, ny),cmap = 'coolwarm', vmin = 0, vmax = 1)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.set_title('$\mathit{\Delta}t$'+' = 5',loc = 'right', fontsize = 10)
ax.axes.set_title('$\mathit{t}$'+'_'+'$\mathit{step}$'+' = '+str(istep),loc = 'left', fontsize = 10)
plt.savefig('large_dt5_'+'eta_'+str(istep)+'.jpg', dpi=200, bbox_inches = "tight")
plt.close()
eta2D = x_test_0.reshape(nx, ny)
x_test_1 = MICNN.predict([x_test_0,deltaT_test_0])
x_test_0 = x_test_1
##########delta_T = 30#############################################
eta = np.zeros((nx,ny),dtype = np.float32)
eta = np.load("data_seeding_1600x1600\\eta_initial_30.npy")
nx2 = 128
ny2 = 128
x_test_0 = eta[:,:]
x_test_0 = np.reshape(x_test_0, (1, nx, ny, 1))
deltaT_test_0 = deltaT[19] #[0] = 1, [3] = 5; [19] = 30;
deltaT_test_0 = np.reshape(deltaT_test_0, (1, 1))
###Recurrent prediction
for istep in range(0,65):
print(istep)
ax = plt.imshow(x_test_0.reshape(nx, ny),cmap = 'coolwarm', vmin = 0, vmax = 1)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.axes.set_title('$\mathit{\Delta}t$'+' = 30',loc = 'right', fontsize = 10)
ax.axes.set_title('$\mathit{t}$'+'_'+'$\mathit{step}$'+' = '+str(istep),loc = 'left', fontsize = 10)
plt.savefig('large_dt30_'+'eta_'+str(istep)+'.jpg', dpi=200, bbox_inches = "tight")
plt.close()
eta2D = x_test_0.reshape(nx, ny)
x_test_1 = MICNN.predict([x_test_0,deltaT_test_0])
x_test_0 = x_test_1 | [
"numpy.reshape",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.zeros",
"numpy.load"
] | [((485, 521), 'numpy.zeros', 'np.zeros', (['(nx, ny)'], {'dtype': 'np.float32'}), '((nx, ny), dtype=np.float32)\n', (493, 521), True, 'import numpy as np\n'), ((529, 581), 'numpy.load', 'np.load', (['"""data_seeding_1600x1600\\\\eta_initial_1.npy"""'], {}), "('data_seeding_1600x1600\\\\eta_initial_1.npy')\n", (536, 581), True, 'import numpy as np\n'), ((620, 656), 'numpy.reshape', 'np.reshape', (['x_test_0', '(1, nx, ny, 1)'], {}), '(x_test_0, (1, nx, ny, 1))\n', (630, 656), True, 'import numpy as np\n'), ((732, 765), 'numpy.reshape', 'np.reshape', (['deltaT_test_0', '(1, 1)'], {}), '(deltaT_test_0, (1, 1))\n', (742, 765), True, 'import numpy as np\n'), ((1514, 1550), 'numpy.zeros', 'np.zeros', (['(nx, ny)'], {'dtype': 'np.float32'}), '((nx, ny), dtype=np.float32)\n', (1522, 1550), True, 'import numpy as np\n'), ((1558, 1610), 'numpy.load', 'np.load', (['"""data_seeding_1600x1600\\\\eta_initial_5.npy"""'], {}), "('data_seeding_1600x1600\\\\eta_initial_5.npy')\n", (1565, 1610), True, 'import numpy as np\n'), ((1649, 1685), 'numpy.reshape', 'np.reshape', (['x_test_0', '(1, nx, ny, 1)'], {}), '(x_test_0, (1, nx, ny, 1))\n', (1659, 1685), True, 'import numpy as np\n'), ((1761, 1794), 'numpy.reshape', 'np.reshape', (['deltaT_test_0', '(1, 1)'], {}), '(deltaT_test_0, (1, 1))\n', (1771, 1794), True, 'import numpy as np\n'), ((2544, 2580), 'numpy.zeros', 'np.zeros', (['(nx, ny)'], {'dtype': 'np.float32'}), '((nx, ny), dtype=np.float32)\n', (2552, 2580), True, 'import numpy as np\n'), ((2588, 2641), 'numpy.load', 'np.load', (['"""data_seeding_1600x1600\\\\eta_initial_30.npy"""'], {}), "('data_seeding_1600x1600\\\\eta_initial_30.npy')\n", (2595, 2641), True, 'import numpy as np\n'), ((2701, 2737), 'numpy.reshape', 'np.reshape', (['x_test_0', '(1, nx, ny, 1)'], {}), '(x_test_0, (1, nx, ny, 1))\n', (2711, 2737), True, 'import numpy as np\n'), ((2814, 2847), 'numpy.reshape', 'np.reshape', (['deltaT_test_0', '(1, 1)'], {}), '(deltaT_test_0, (1, 1))\n', (2824, 2847), True, 'import numpy as np\n'), ((309, 323), 'numpy.max', 'np.max', (['deltaT'], {}), '(deltaT)\n', (315, 323), True, 'import numpy as np\n'), ((1289, 1300), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1298, 1300), True, 'import matplotlib.pyplot as plt\n'), ((2318, 2329), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2327, 2329), True, 'import matplotlib.pyplot as plt\n'), ((3373, 3384), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3382, 3384), True, 'import matplotlib.pyplot as plt\n')] |
from __future__ import absolute_import
import numpy as np
from pyti import catch_errors
from pyti.function_helper import fill_for_noncomputable_vals
def williams_percent_r(high_data, low_data, close_data, period):
"""
Williams %R.
Formula:
wr = (HighestHigh - close / HighestHigh - LowestLow) * -100
lookback period typically 14 trading days
"""
catch_errors.check_for_period_error(close_data, period)
catch_errors.check_for_period_error(high_data, period)
catch_errors.check_for_period_error(low_data, period)
wr = [(-100 * (np.max(high_data[idx+1-period:idx+1]) - close_data[idx] ) /
(np.max(high_data[idx+1-period:idx+1]) -
np.min(low_data[idx+1-period:idx+1]))) for idx in range(period-1, len(close_data))]
wr = fill_for_noncomputable_vals(close_data, wr)
return wr
| [
"pyti.function_helper.fill_for_noncomputable_vals",
"pyti.catch_errors.check_for_period_error",
"numpy.min",
"numpy.max"
] | [((377, 432), 'pyti.catch_errors.check_for_period_error', 'catch_errors.check_for_period_error', (['close_data', 'period'], {}), '(close_data, period)\n', (412, 432), False, 'from pyti import catch_errors\n'), ((437, 491), 'pyti.catch_errors.check_for_period_error', 'catch_errors.check_for_period_error', (['high_data', 'period'], {}), '(high_data, period)\n', (472, 491), False, 'from pyti import catch_errors\n'), ((496, 549), 'pyti.catch_errors.check_for_period_error', 'catch_errors.check_for_period_error', (['low_data', 'period'], {}), '(low_data, period)\n', (531, 549), False, 'from pyti import catch_errors\n'), ((789, 832), 'pyti.function_helper.fill_for_noncomputable_vals', 'fill_for_noncomputable_vals', (['close_data', 'wr'], {}), '(close_data, wr)\n', (816, 832), False, 'from pyti.function_helper import fill_for_noncomputable_vals\n'), ((643, 686), 'numpy.max', 'np.max', (['high_data[idx + 1 - period:idx + 1]'], {}), '(high_data[idx + 1 - period:idx + 1])\n', (649, 686), True, 'import numpy as np\n'), ((696, 738), 'numpy.min', 'np.min', (['low_data[idx + 1 - period:idx + 1]'], {}), '(low_data[idx + 1 - period:idx + 1])\n', (702, 738), True, 'import numpy as np\n'), ((570, 613), 'numpy.max', 'np.max', (['high_data[idx + 1 - period:idx + 1]'], {}), '(high_data[idx + 1 - period:idx + 1])\n', (576, 613), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Support module generated by PAGE version 4.20
# in conjunction with Tcl version 8.6
# Feb 18, 2019 11:55:48 AM -03 platform: Windows NT
# Feb 19, 2019 09:09:42 AM -03 platform: Windows NT
"""
Created on Mon Feb 18 10:08:04 2019
@author: <NAME>
"""
import sys
import Controller as ctrl
import Model as md
import Estilos as es
import numpy as np
#from numpy import array, concatenate, ndarray, append, take, delete
import pandas as pd
from tkinter import filedialog, colorchooser, IntVar
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
def set_var():
global conn, qtdMin, curvePlot, curvesList, cBoxList, md_dpv, validation
conn = 0
qtdMin = 0
curvePlot = np.ndarray([])
curvesList = np.ndarray([])
cBoxList = np.ndarray([])
md_dpv = md.dpv()
validation = ctrl.validation(w, root)
def init(top, gui, *args, **kwargs):
global w, top_level, root, font9
w = gui
top_level = top
root = top
#font9 = "-family {Segoe UI} -size 9 -weight bold -slant roman " \
# "-underline 0 -overstrike 0"
set_var()
painelDPV()
def destroy_window():
# Function which closes the window.
global top_level
top_level.destroy()
top_level = None
if __name__ == '__main__':
import VStat
VStat.vp_start_gui()
########## Funções ###########
def createCanvas():
w.cv_curveGraph = tk.Canvas(w.fr_mainView)
w.cv_curveGraph.place(relx=0.012, rely=0.119, relheight=0.857, relwidth=0.974)
w.cv_curveGraph.configure(background="#ffffff")
w.cv_curveGraph.configure(highlightbackground="#ffffff")
w.cv_curveGraph.configure(highlightcolor="black")
w.cv_curveGraph.configure(insertbackground="black")
w.cv_curveGraph.configure(relief='ridge')
w.cv_curveGraph.configure(selectbackground="#c4c4c4")
w.cv_curveGraph.configure(selectforeground="black")
w.cv_curveGraph.configure(width=823)
w.fr_toolbar = tk.Frame(w.fr_mainView)
w.fr_toolbar.place(relx=0.012, rely=0.02, height=38, relwidth=0.974)
w.fr_toolbar.configure(relief='groove')
w.fr_toolbar.configure(borderwidth="2")
w.fr_toolbar.configure(relief='groove')
w.fr_toolbar.configure(background="#f9f9f9")
w.fr_toolbar.configure(highlightbackground="#f9f9f9")
w.fr_toolbar.configure(highlightcolor="black")
w.fr_toolbar.configure(width=823)
def btn_import(p1):
global curvesList, curvePlot, spAt, cnvAt
curvePlot = np.ndarray([])
curvesList = np.ndarray([])
imp = filedialog.askopenfilename(initialdir = "C:/", title = "Importar CSV...",
filetypes = (("Comma-separeted values", "*.csv"),
("All files", "*.*")))
if imp:
csv = ctrl.file.importCsv(imp)
top_level.title("VStat - " + csv.curveName + ".csv")
curvePlot = np.append(curvePlot, csv, axis=None)
spAt, cnvAt = drawCurve()
cnvAt.draw()
curvesList = np.append(curvesList, csv, axis=None)
createMins()
def btn_export(p1):
global curvePlot
if curvePlot.size == 2:
csvName = filedialog.asksaveasfilename(title='Exportar CSV...', defaultextension = 'csv', initialdir = "C:/", filetypes = (("Comma-separeted values", "*.csv"), ("All files", "*.*")))
ctrl.file.exportCsv(np.take(curvePlot, 1), csvName)
elif curvePlot.size > 2:
w.lb_ConnInfo.configure(text="Ainda não é possível\nexportar curvas unidas")
elif curvePlot.size < 2:
w.lb_ConnInfo.configure(text="Sem curva para\nexportar")
def btn_connect(p1):
global conn
if conn:
ctrl.connection.disconnect()
conn = 0
w.btn_connect.configure(text='''Conectar''', background="#738c8c")
w.btn_connect.update()
w.lb_ConnInfo.configure(text="VStat desconectado")
else:
vstat = ctrl.connection.connect()
if vstat:
conn = 1
w.lb_ConnInfo.configure(text="VStat conectado\nPorta "+vstat)
w.btn_connect.configure(text='''Desconectar''', background="#00cccc")
w.btn_connect.update()
else:
w.lb_ConnInfo.configure(text="VStat não encontrado")
def btn_iniciar(p1):
global curvePlot, curvesList, spAt, cnvAt, md_dpv
md_dpv.pIni = w.et_PInicio.get()
md_dpv.pFim = w.et_PFim.get()
md_dpv.pPul = w.et_PPulso.get()
md_dpv.pPas = w.et_PPasso.get()
md_dpv.tPul = w.et_TPulso.get()
md_dpv.tPas = w.et_tPasso.get()
md_dpv.tEqu = w.et_tEquil.get()
md_dpv.fEsc = w.cb_intCorrente.current()
# Limpa o frame de miniaturas
destroyChildren(w.fr_miniaturas)
w.fr_miniaturas.update()
# Verifica se o potenciostato está conectado e inicia a análise
ini = ctrl.connection.openPort()
if ini:
w.lb_ConnInfo.configure(text="VStat não conectado")
w.btn_connect.configure(background="#ff6666")
w.btn_connect.update()
else:
"""x = np.arange(float(w.et_PInicio.get()), float(w.et_PFim.get()), float(w.et_PPasso.get()))
y = np.arange(0, x.size, 1)
c = md.curve("live Plot", x, y)
curvePlot = np.append(curvePlot, c)"""
ctrl.transmition.transmit(str(w.cb_intCorrente.current()),
w.et_PInicio.get(),
w.et_PFim.get(),
w.et_PPulso.get(),
w.et_PPasso.get(),
w.et_TPulso.get(),
w.et_tPasso.get(),
w.et_tEquil.get())
destroyChildren(w.fr_mainView)
# Fundo de escala
if w.cb_intCorrente.current() == 0:
fe = 5/(4096/3.3)
print("Escala: Automática")
print("fundo de escala(inicial): ", fe)
else:
fe = int(w.cb_intCorrente.get()[4:-2])/(4096/3.3)
print("Escala: ", w.cb_intCorrente.get()[4:-2])
print("fundo de escala: ", fe)
curvePlot = np.ndarray([])
curvePlot = np.append(curvePlot, md.curve("", np.array([]), np.array([])))
spAt, cnvAt = drawCurve()
curvesList = ctrl.transmition.receive(curvePlot, spAt, cnvAt, fe, float(w.et_PInicio.get()), float(w.et_PPasso.get()))#, canvas)
#curvePlot = np.append(curvePlot, np.take(curvesList, 1))
ctrl.connection.closePort()
#if dpv:
top_level.title("VStat - " + np.take(curvePlot, 1).curveName)#dpv.curveName)
#createCanvas()
#spAt, cnvAt = drawCurve()
createMins()
def drawCurve():
global curvePlot, sp, fig
createCanvas()
fig = Figure(figsize=(10, 8), dpi = 100)
sp = fig.add_subplot(111, xlabel="Potencial em Volts (V)", ylabel="Corrente em Microampere (µA)")#, title=cv.curveName)
canvas = FigureCanvasTkAgg(fig, master = w.cv_curveGraph)
toolbar = NavigationToolbar2Tk(canvas, w.fr_toolbar)
if curvePlot.size == 2:
cv = np.take(curvePlot, 1)
sp.set_title(cv.curveName)
sp.plot(cv.curveX, cv.curveY, color=cv.color)
elif curvePlot.size > 2:
sp.set_title("untitle merge")
for i in range(1, curvePlot.size):
cv = np.take(curvePlot, i)
sp.plot(cv.curveX, cv.curveY, color=cv.color)
toolbar.update()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
#canvas.draw()
canvas.get_tk_widget().pack(side = tk.TOP, fill = tk.BOTH, expand = 1)
return sp, canvas
def expandMin(curveIndex):
global curvesList, curvePlot, spAt, cnvAt
cv = np.take(curvesList, curveIndex+1)
curvePlot = np.ndarray([])
curvePlot = np.append(curvePlot, cv, axis=None)
spAt, cnvAt = drawCurve()
def createMins():
global cBoxList, curvesList, qtdMin
# Apaga miniaturas existentes
qtdMin = 0
destroyChildren(w.fr_miniaturas)
# Cria miniaturas para cada curva na lista
for i in range(1, curvesList.size):
curve = np.take(curvesList, i)
createMin(curve)
def createMin(curve):
global qtdMin, cBoxList
cBoxList = np.append(cBoxList, IntVar(), axis=None)
thisIndex = qtdMin
relX = 0.01
if qtdMin == 0:
qtdMin += 1
elif qtdMin > 0:
relX = (0.152 * qtdMin) + 0.01
qtdMin += 1
# Titulo superior das miniaturas
w.lb_minCurve = tk.Label(w.fr_miniaturas)
w.lb_minCurve.place(relx=relX, rely=0.058, height=21, width=133)
w.lb_minCurve.configure(background="#d9d9d9")
w.lb_minCurve.configure(disabledforeground="#a3a3a3")
w.lb_minCurve.configure(foreground="#000000")
w.lb_minCurve.configure(text=curve.curveName)
w.lb_minCurve.configure(width=133)
w.lb_minCurve.bind("<Button-1>", lambda x:expandMin(thisIndex))
# Canvas para desenhar a miniatura
w.cv_minCurve = tk.Canvas(w.fr_miniaturas)
w.cv_minCurve.place(relx=relX, rely=0.165, height=112, width=133)
fig = Figure(figsize=(1, 1), dpi = 100)
canvas = FigureCanvasTkAgg(fig, master = w.cv_minCurve)
#toolbar = NavigationToolbar2Tk(canvas, w.fr_toolbar)
sp = fig.add_subplot(111)
sp.plot(curve.curveX, curve.curveY)
#toolbar.update()
#canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', lambda x: expandMin(thisIndex))
w.cb_chooser = tk.Checkbutton(w.cv_minCurve)
w.cb_chooser.place(relx=0.075, rely=0.097, relheight=0.243, relwidth=0.211)
w.cb_chooser.configure(activebackground="#ececec")
w.cb_chooser.configure(activeforeground="#000000")
w.cb_chooser.configure(background="#d9d9d9")
w.cb_chooser.configure(disabledforeground="#a3a3a3")
w.cb_chooser.configure(foreground="#000000")
w.cb_chooser.configure(highlightbackground="#d9d9d9")
w.cb_chooser.configure(highlightcolor="black")
w.cb_chooser.configure(justify='left')
w.cb_chooser.configure(variable=np.take(cBoxList, thisIndex+1))
w.fr_color = tk.Frame(w.cv_minCurve)
w.fr_color.place(relx=0.752, rely=0.097, relheight=0.243, relwidth=0.188)
w.fr_color.configure(relief='groove')
w.fr_color.configure(borderwidth="2")
w.fr_color.configure(relief='groove')
w.fr_color.configure(background="#1559c6")
w.fr_color.configure(width=25)
w.fr_color.bind("<Button-1>", lambda e:changeColor(e, thisIndex, sp, canvas))
def destroyChildren(frame):
for child in frame.winfo_children():
if child.winfo_children():
destroyChildren(child)
child.destroy()
def changeColor(p1, curveIndex, sp, canvas):
global curvesList, curvePlot
color = colorchooser.askcolor()
c = str(color)
c = c[-9:-2]
cv = np.take(curvesList, curveIndex+1)
cv.color = c
sp.plot(cv.curveX, cv.curveY, c)
canvas.draw()
p1.widget.configure(background=cv.color)
p1.widget.update()
drawCurve()
def curvesJoin():
global curvePlot
count = 0
for i in range(1, cBoxList.size):
c = np.take(cBoxList, i)
if c.get():
if count < 1:
cv = np.take(curvesList, i)
curvePlot = np.ndarray([])
curvePlot = np.append(curvePlot, cv, axis=None)
count += 1
else:
cv = np.take(curvesList, i)
curvePlot = np.append(curvePlot, cv, axis=None)
count += 1
c.set(0)
if count <= 1:
w.lb_ConnInfo.configure(text="Selecione ao menos\nduas curvas")
else:
drawCurve()
def removeCurve():
global curvesList, cBoxList, curvePlot
#print("remover")
i = 1
while i < cBoxList.size:
c = np.take(cBoxList, i)
t = np.take(curvesList, i)
p = np.take(curvePlot, 1)
if t is p:
#print("Igual")
pass
#print("c: "+ str(c.get()))
if c.get():
#if t is p:
curvesList = np.delete(curvesList, i)
cBoxList = np.delete(cBoxList, i)
else:
i += 1
createMins()
#-----------------------------------------------------#
# PAINEIS #
#-----------------------------------------------------#
#---- Painel DPV ----#
def painelDPV():
global md_dpv
destroyChildren(w.fr_analise)
w.fr_analise.configure(text='''DPV''')
vcmd = w.fr_analise.register(validation.entryValidate)
# Inicializa entradas que serão manipuladas
w.et_PInicio = tk.Entry(w.fr_analise)
w.et_PFim = tk.Entry(w.fr_analise)
w.et_PPasso = tk.Entry(w.fr_analise)
w.et_tPasso = tk.Entry(w.fr_analise)
w.lb_PInicio = tk.Label(w.fr_analise, anchor="w")
w.lb_PInicio.place(relx=0.053, y=17, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_PInicio)
w.lb_PInicio.configure(text='''Pot. Inicial (V)''')
w.et_PInicio.configure(validate="key")
w.et_PInicio.configure(validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_PInicio.place(relx=0.59, y=18, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_PInicio)
ctrl.validation.entryInsert(w.et_PInicio, md_dpv.pIni)
w.lb_PFim = tk.Label(w.fr_analise, anchor="w")
w.lb_PFim.place(relx=0.053, y=43, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_PFim)
w.lb_PFim.configure(text='''Pot. Final (V)''')
w.lb_PFim.configure(width=71)
w.et_PFim.configure(validate="key")
w.et_PFim.configure(validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_PFim.place(relx=0.59, y=44, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_PFim)
ctrl.validation.entryInsert(w.et_PFim, md_dpv.pFim)
w.lb_PPasso = tk.Label(w.fr_analise, anchor="w")
w.lb_PPasso.place(relx=0.053, y=69, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_PPasso)
w.lb_PPasso.configure(text='''Pot. Passo (V)''')
w.lb_PPasso.configure(width=81)
w.et_PPasso.configure(validate="key")
w.et_PPasso.configure(validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_PPasso.place(relx=0.59, y=70, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_PPasso)
ctrl.validation.entryInsert(w.et_PPasso, md_dpv.pPas)
w.lb_PPulso = tk.Label(w.fr_analise, anchor="w")
w.lb_PPulso.place(relx=0.053, y=95, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_PPulso)
w.lb_PPulso.configure(text='''Pot. Pulso (V)''')
w.et_PPulso = tk.Entry(w.fr_analise, validate="key", validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_PPulso.place(relx=0.59, y=96, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_PPulso)
ctrl.validation.entryInsert(w.et_PPulso, md_dpv.pPul)
w.lb_TPulso = tk.Label(w.fr_analise, anchor="w")
w.lb_TPulso.place(relx=0.053, y=121, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_TPulso)
w.lb_TPulso.configure(text='''Tem. Pulso (s)''')
w.lb_TPulso.configure(width=91)
w.et_TPulso = tk.Entry(w.fr_analise, validate="key", validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_TPulso.place(relx=0.59, y=122, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_TPulso)
ctrl.validation.entryInsert(w.et_TPulso, md_dpv.tPul)
w.lb_tPasso = tk.Label(w.fr_analise, anchor="w")
w.lb_tPasso.place(relx=0.053, y=147, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_tPasso)
w.lb_tPasso.configure(text='''Tem. Passo (s)''')
w.et_tPasso.configure(validate="key")
w.et_tPasso.configure(validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_tPasso.place(relx=0.59, y=148, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_tPasso)
ctrl.validation.entryInsert(w.et_tPasso, md_dpv.tPas)
w.lb_tEquil = tk.Label(w.fr_analise, anchor="w")
w.lb_tEquil.place(relx=0.053, y=173, height=21, width=110
, bordermode='ignore')
es.lbStyle(w.lb_tEquil)
w.lb_tEquil.configure(text='''Tem. equilíbrio (s)''')
w.et_tEquil = tk.Entry(w.fr_analise, validate="key", validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_tEquil.place(relx=0.59, y=174, height=20, width=77
, bordermode='ignore')
es.etStyle(w.et_tEquil)
ctrl.validation.entryInsert(w.et_tEquil, md_dpv.tEqu)
w.lb_currentRange = tk.Label(w.fr_analise, anchor="w")
w.lb_currentRange.place(relx=0.053, y=199, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_currentRange)
w.lb_currentRange.configure(text='''Int. Corrente''')
w.cb_intCorrente = ttk.Combobox(w.fr_analise)
w.cb_intCorrente.place(relx=0.59, y=183, height=20, width=77)
w.cb_intCorrente.configure(values=["auto","+/- 5uA","+/- 10uA","+/- 20uA", "+/- 50uA"])
w.cb_intCorrente.current(md_dpv.fEsc)
w.lb_sRate = tk.Label(w.fr_analise, anchor="w")
w.lb_sRate.place(relx=0.053, y=225, height=21, width=110
, bordermode='ignore')
es.lbStyle(w.lb_sRate)
w.lb_sRate.configure(text='''SRate (V/s)''')
w.et_sRate = tk.Entry(w.fr_analise, state="disabled")
w.et_sRate.place(relx=0.59, y=226, height=20, width=77
, bordermode='ignore')
w.lb_tEstimado = tk.Label(w.fr_analise, anchor="w")
w.lb_tEstimado.place(relx=0.053, y=251, height=21, width=110
, bordermode='ignore')
es.lbStyle(w.lb_tEstimado)
w.lb_tEstimado.configure(text='''Tem. Estimado (s)''')
w.et_tEstimado = tk.Entry(w.fr_analise, state="disabled")
w.et_tEstimado.place(relx=0.59, y=252, height=20, width=77
, bordermode='ignore')
w.lb_nPontos = tk.Label(w.fr_analise, anchor="w")
w.lb_nPontos.place(relx=0.053, y=277, height=21, width=110
, bordermode='ignore')
es.lbStyle(w.lb_nPontos)
w.lb_nPontos.configure(text='''Nº Pontos''')
w.et_nPontos = tk.Entry(w.fr_analise, state="disabled")
w.et_nPontos.place(relx=0.59, y=278, height=20, width=77
, bordermode='ignore')
w.btn_dpv = tk.Button(w.fr_analise)
w.btn_dpv.place(relx=0.063, y=315, height=24, relwidth=0.88
, bordermode='ignore')
es.btnStyle(w.btn_dpv)
w.btn_dpv.configure(text='''Iniciar''')
w.btn_dpv.configure(width=167)
w.btn_dpv.bind('<ButtonRelease-1>',lambda e:btn_iniciar(e))
validation.updateInfo(float(w.et_PInicio.get()), float(w.et_PFim.get()), float(w.et_PPasso.get()), float(w.et_tPasso.get()))
#---- Operações de tratamento da curva ----#
def op_frame2param(frName, p1Name, p1Value, p2Name, p2Value, callback):
destroyChildren(w.fr_analise)
w.fr_analise.configure(text=frName)
vcmd = w.fr_analise.register(validation.entryValidate)
w.lb_Param1 = tk.Label(w.fr_analise, anchor="w")
w.lb_Param1.place(relx=0.053, y=17, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_Param1)
w.lb_Param1.configure(text=p1Name)
w.et_Param1 = tk.Entry(w.fr_analise, validate="key", validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_Param1.place(relx=0.59, y=18, height=20, width=74
, bordermode='ignore')
es.etStyle(w.et_Param1)
w.et_Param1.configure(width=74)
ctrl.validation.entryInsert(w.et_Param1, p1Value)
w.lb_Param2 = tk.Label(w.fr_analise, anchor="w")
w.lb_Param2.place(relx=0.053, y=43, height=21, width=91
, bordermode='ignore')
es.lbStyle(w.lb_Param2)
w.lb_Param2.configure(text=p2Name)
w.et_Param2 = tk.Entry(w.fr_analise, validate="key", validatecommand=(vcmd, '%d', '%i', '%P', '%S', '%W'))
w.et_Param2.place(relx=0.59, y=44, height=20, width=74
, bordermode='ignore')
es.etStyle(w.et_Param2)
ctrl.validation.entryInsert(w.et_Param2, p2Value)
w.btn_Apply = tk.Button(w.fr_analise)
w.btn_Apply.place(relx=0.063, y=315, height=24, relwidth=0.88
, bordermode='ignore')
es.btnStyle(w.btn_Apply)
w.btn_Apply.configure(text='''Aplicar''')
w.btn_Apply.configure(width=167)
w.btn_Apply.bind('<ButtonRelease-1>',lambda x:aplicar(callback, (w.et_Param1.get(), w.et_Param2.get())))
def fd_PEAK():
global curvePlot, spAt, cnvAt
if curvePlot.size == 2:
cv = np.take(curvePlot, 1)
i = ctrl.operations.findPeak(cv.curveY)
spAt.scatter(cv.curveX[i],cv.curveY[i])
cnvAt.draw()
w.lb_ConnInfo.configure(text="PICO\nPotencial = "+str(float("{0:.4f}".format(cv.curveX[i])))+"V\nCorrente = "+str(float("{0:.3f}".format(cv.curveY[i])))+"uA")
elif curvePlot.size > 2:
w.lb_ConnInfo.configure(text="Ainda não é possível\nanalisar curvas unidas")
elif curvePlot.size < 2:
w.lb_ConnInfo.configure(text="Selecione uma curva")
def aplicar(callback, args):
global curvePlot, curvesList
if curvePlot.size == 2:
c = np.take(curvePlot, 1)
y = callback(c.curveY, *args)
c2 = md.curve(c.curveName+"_"+callback.__name__, c.curveX, y)
c2.curveX = c.curveX
c2.curveY = y
curvePlot = np.ndarray([])
curvePlot = np.append(curvePlot, c2, axis=None)
drawCurve()
curvesList = np.append(curvesList, c2, axis=None)
createMins()
elif curvePlot.size > 2:
w.lb_ConnInfo.configure(text="Ainda não é possível\nanalisar curvas unidas")
elif curvePlot.size < 2:
w.lb_ConnInfo.configure(text="Selecione uma curva")
""" apagar filhos de um frame
for child in infoFrame.winfo_children():
child.destroy()
""" | [
"tkinter.Button",
"tkinter.Canvas",
"numpy.array",
"tkinter.Label",
"Controller.operations.findPeak",
"tkinter.colorchooser.askcolor",
"tkinter.Frame",
"Controller.validation.entryInsert",
"Controller.connection.closePort",
"Model.curve",
"tkinter.Entry",
"numpy.delete",
"Controller.file.imp... | [((993, 1007), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (1003, 1007), True, 'import numpy as np\n'), ((1025, 1039), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (1035, 1039), True, 'import numpy as np\n'), ((1055, 1069), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (1065, 1069), True, 'import numpy as np\n'), ((1083, 1091), 'Model.dpv', 'md.dpv', ([], {}), '()\n', (1089, 1091), True, 'import Model as md\n'), ((1109, 1133), 'Controller.validation', 'ctrl.validation', (['w', 'root'], {}), '(w, root)\n', (1124, 1133), True, 'import Controller as ctrl\n'), ((1583, 1603), 'VStat.vp_start_gui', 'VStat.vp_start_gui', ([], {}), '()\n', (1601, 1603), False, 'import VStat\n'), ((1683, 1707), 'tkinter.Canvas', 'tk.Canvas', (['w.fr_mainView'], {}), '(w.fr_mainView)\n', (1692, 1707), True, 'import tkinter as tk\n'), ((2235, 2258), 'tkinter.Frame', 'tk.Frame', (['w.fr_mainView'], {}), '(w.fr_mainView)\n', (2243, 2258), True, 'import tkinter as tk\n'), ((2747, 2761), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (2757, 2761), True, 'import numpy as np\n'), ((2779, 2793), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (2789, 2793), True, 'import numpy as np\n'), ((2809, 2953), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'initialdir': '"""C:/"""', 'title': '"""Importar CSV..."""', 'filetypes': "(('Comma-separeted values', '*.csv'), ('All files', '*.*'))"}), "(initialdir='C:/', title='Importar CSV...',\n filetypes=(('Comma-separeted values', '*.csv'), ('All files', '*.*')))\n", (2835, 2953), False, 'from tkinter import filedialog, colorchooser, IntVar\n'), ((5179, 5205), 'Controller.connection.openPort', 'ctrl.connection.openPort', ([], {}), '()\n', (5203, 5205), True, 'import Controller as ctrl\n'), ((7212, 7244), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(10, 8)', 'dpi': '(100)'}), '(figsize=(10, 8), dpi=100)\n', (7218, 7244), False, 'from matplotlib.figure import Figure\n'), ((7384, 7430), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig'], {'master': 'w.cv_curveGraph'}), '(fig, master=w.cv_curveGraph)\n', (7401, 7430), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n'), ((7447, 7489), 'matplotlib.backends.backend_tkagg.NavigationToolbar2Tk', 'NavigationToolbar2Tk', (['canvas', 'w.fr_toolbar'], {}), '(canvas, w.fr_toolbar)\n', (7467, 7489), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n'), ((8202, 8237), 'numpy.take', 'np.take', (['curvesList', '(curveIndex + 1)'], {}), '(curvesList, curveIndex + 1)\n', (8209, 8237), True, 'import numpy as np\n'), ((8252, 8266), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (8262, 8266), True, 'import numpy as np\n'), ((8283, 8318), 'numpy.append', 'np.append', (['curvePlot', 'cv'], {'axis': 'None'}), '(curvePlot, cv, axis=None)\n', (8292, 8318), True, 'import numpy as np\n'), ((9008, 9033), 'tkinter.Label', 'tk.Label', (['w.fr_miniaturas'], {}), '(w.fr_miniaturas)\n', (9016, 9033), True, 'import tkinter as tk\n'), ((9482, 9508), 'tkinter.Canvas', 'tk.Canvas', (['w.fr_miniaturas'], {}), '(w.fr_miniaturas)\n', (9491, 9508), True, 'import tkinter as tk\n'), ((9594, 9625), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(1, 1)', 'dpi': '(100)'}), '(figsize=(1, 1), dpi=100)\n', (9600, 9625), False, 'from matplotlib.figure import Figure\n'), ((9641, 9685), 'matplotlib.backends.backend_tkagg.FigureCanvasTkAgg', 'FigureCanvasTkAgg', (['fig'], {'master': 'w.cv_minCurve'}), '(fig, master=w.cv_minCurve)\n', (9658, 9685), False, 'from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n'), ((10140, 10169), 'tkinter.Checkbutton', 'tk.Checkbutton', (['w.cv_minCurve'], {}), '(w.cv_minCurve)\n', (10154, 10169), True, 'import tkinter as tk\n'), ((10753, 10776), 'tkinter.Frame', 'tk.Frame', (['w.cv_minCurve'], {}), '(w.cv_minCurve)\n', (10761, 10776), True, 'import tkinter as tk\n'), ((11401, 11424), 'tkinter.colorchooser.askcolor', 'colorchooser.askcolor', ([], {}), '()\n', (11422, 11424), False, 'from tkinter import filedialog, colorchooser, IntVar\n'), ((11470, 11505), 'numpy.take', 'np.take', (['curvesList', '(curveIndex + 1)'], {}), '(curvesList, curveIndex + 1)\n', (11477, 11505), True, 'import numpy as np\n'), ((13290, 13312), 'tkinter.Entry', 'tk.Entry', (['w.fr_analise'], {}), '(w.fr_analise)\n', (13298, 13312), True, 'import tkinter as tk\n'), ((13329, 13351), 'tkinter.Entry', 'tk.Entry', (['w.fr_analise'], {}), '(w.fr_analise)\n', (13337, 13351), True, 'import tkinter as tk\n'), ((13370, 13392), 'tkinter.Entry', 'tk.Entry', (['w.fr_analise'], {}), '(w.fr_analise)\n', (13378, 13392), True, 'import tkinter as tk\n'), ((13411, 13433), 'tkinter.Entry', 'tk.Entry', (['w.fr_analise'], {}), '(w.fr_analise)\n', (13419, 13433), True, 'import tkinter as tk\n'), ((13458, 13492), 'tkinter.Label', 'tk.Label', (['w.fr_analise'], {'anchor': '"""w"""'}), "(w.fr_analise, anchor='w')\n", (13466, 13492), True, 'import tkinter as tk\n'), ((13593, 13617), 'Estilos.lbStyle', 'es.lbStyle', (['w.lb_PInicio'], {}), '(w.lb_PInicio)\n', (13603, 13617), True, 'import Estilos as es\n'), ((13902, 13926), 'Estilos.etStyle', 'es.etStyle', (['w.et_PInicio'], {}), '(w.et_PInicio)\n', (13912, 13926), True, 'import Estilos as es\n'), ((13931, 13985), 'Controller.validation.entryInsert', 'ctrl.validation.entryInsert', (['w.et_PInicio', 'md_dpv.pIni'], {}), '(w.et_PInicio, md_dpv.pIni)\n', (13958, 13985), True, 'import Controller as ctrl\n'), ((14003, 14037), 'tkinter.Label', 'tk.Label', (['w.fr_analise'], {'anchor': '"""w"""'}), "(w.fr_analise, anchor='w')\n", (14011, 14037), True, 'import tkinter as tk\n'), ((14135, 14156), 'Estilos.lbStyle', 'es.lbStyle', (['w.lb_PFim'], {}), '(w.lb_PFim)\n', (14145, 14156), True, 'import Estilos as es\n'), ((14462, 14483), 'Estilos.etStyle', 'es.etStyle', (['w.et_PFim'], {}), '(w.et_PFim)\n', (14472, 14483), True, 'import Estilos as es\n'), ((14488, 14539), 'Controller.validation.entryInsert', 'ctrl.validation.entryInsert', (['w.et_PFim', 'md_dpv.pFim'], {}), '(w.et_PFim, md_dpv.pFim)\n', (14515, 14539), True, 'import Controller as ctrl\n'), ((14559, 14593), 'tkinter.Label', 'tk.Label', (['w.fr_analise'], {'anchor': '"""w"""'}), "(w.fr_analise, anchor='w')\n", (14567, 14593), True, 'import tkinter as tk\n'), ((14693, 14716), 'Estilos.lbStyle', 'es.lbStyle', (['w.lb_PPasso'], {}), '(w.lb_PPasso)\n', (14703, 14716), True, 'import Estilos as es\n'), ((15036, 15059), 'Estilos.etStyle', 'es.etStyle', (['w.et_PPasso'], {}), '(w.et_PPasso)\n', (15046, 15059), True, 'import Estilos as es\n'), ((15064, 15117), 'Controller.validation.entryInsert', 'ctrl.validation.entryInsert', (['w.et_PPasso', 'md_dpv.pPas'], {}), '(w.et_PPasso, md_dpv.pPas)\n', (15091, 15117), True, 'import Controller as ctrl\n'), ((15137, 15171), 'tkinter.Label', 'tk.Label', (['w.fr_analise'], {'anchor': '"""w"""'}), "(w.fr_analise, anchor='w')\n", (15145, 15171), True, 'import tkinter as tk\n'), ((15271, 15294), 'Estilos.lbStyle', 'es.lbStyle', (['w.lb_PPulso'], {}), '(w.lb_PPulso)\n', (15281, 15294), True, 'import Estilos as es\n'), ((15367, 15463), 'tkinter.Entry', 'tk.Entry', (['w.fr_analise'], {'validate': '"""key"""', 'validatecommand': "(vcmd, '%d', '%i', '%P', '%S', '%W')"}), "(w.fr_analise, validate='key', validatecommand=(vcmd, '%d', '%i',\n '%P', '%S', '%W'))\n", (15375, 15463), True, 'import tkinter as tk\n'), ((15563, 15586), 'Estilos.etStyle', 'es.etStyle', (['w.et_PPulso'], {}), '(w.et_PPulso)\n', (15573, 15586), True, 'import Estilos as es\n'), ((15591, 15644), 'Controller.validation.entryInsert', 'ctrl.validation.entryInsert', (['w.et_PPulso', 'md_dpv.pPul'], {}), '(w.et_PPulso, md_dpv.pPul)\n', (15618, 15644), True, 'import Controller as ctrl\n'), ((15668, 15702), 'tkinter.Label', 'tk.Label', (['w.fr_analise'], {'anchor': '"""w"""'}), "(w.fr_analise, anchor='w')\n", (15676, 15702), True, 'import tkinter as tk\n'), ((15803, 15826), 'Estilos.lbStyle', 'es.lbStyle', (['w.lb_TPulso'], {}), '(w.lb_TPulso)\n', (15813, 15826), True, 'import Estilos as es\n'), ((15935, 16031), 'tkinter.Entry', 'tk.Entry', (['w.fr_analise'], {'validate': '"""key"""', 'validatecommand': "(vcmd, '%d', '%i', '%P', '%S', '%W')"}), "(w.fr_analise, validate='key', validatecommand=(vcmd, '%d', '%i',\n '%P', '%S', '%W'))\n", (15943, 16031), True, 'import tkinter as tk\n'), ((16127, 16150), 'Estilos.etStyle', 'es.etStyle', (['w.et_TPulso'], {}), '(w.et_TPulso)\n', (16137, 16150), True, 'import Estilos as es\n'), ((16155, 16208), 'Controller.validation.entryInsert', 'ctrl.validation.entryInsert', (['w.et_TPulso', 'md_dpv.tPul'], {}), '(w.et_TPulso, md_dpv.tPul)\n', (16182, 16208), True, 'import Controller as ctrl\n'), ((16228, 16262), 'tkinter.Label', 'tk.Label', (['w.fr_analise'], {'anchor': '"""w"""'}), "(w.fr_analise, anchor='w')\n", (16236, 16262), True, 'import tkinter as tk\n'), ((16363, 16386), 'Estilos.lbStyle', 'es.lbStyle', (['w.lb_tPasso'], {}), '(w.lb_tPasso)\n', (16373, 16386), True, 'import Estilos as es\n'), ((16666, 16689), 'Estilos.etStyle', 'es.etStyle', (['w.et_tPasso'], {}), '(w.et_tPasso)\n', (16676, 16689), True, 'import Estilos as es\n'), ((16694, 16747), 'Controller.validation.entryInsert', 'ctrl.validation.entryInsert', (['w.et_tPasso', 'md_dpv.tPas'], {}), '(w.et_tPasso, md_dpv.tPas)\n', (16721, 16747), True, 'import Controller as ctrl\n'), ((16771, 16805), 'tkinter.Label', 'tk.Label', (['w.fr_analise'], {'anchor': '"""w"""'}), "(w.fr_analise, anchor='w')\n", (16779, 16805), True, 'import tkinter as tk\n'), ((16907, 16930), 'Estilos.lbStyle', 'es.lbStyle', (['w.lb_tEquil'], {}), '(w.lb_tEquil)\n', (16917, 16930), True, 'import Estilos as es\n'), ((17008, 17104), 'tkinter.Entry', 'tk.Entry', (['w.fr_analise'], {'validate': '"""key"""', 'validatecommand': "(vcmd, '%d', '%i', '%P', '%S', '%W')"}), "(w.fr_analise, validate='key', validatecommand=(vcmd, '%d', '%i',\n '%P', '%S', '%W'))\n", (17016, 17104), True, 'import tkinter as tk\n'), ((17200, 17223), 'Estilos.etStyle', 'es.etStyle', (['w.et_tEquil'], {}), '(w.et_tEquil)\n', (17210, 17223), True, 'import Estilos as es\n'), ((17228, 17281), 'Controller.validation.entryInsert', 'ctrl.validation.entryInsert', (['w.et_tEquil', 'md_dpv.tEqu'], {}), '(w.et_tEquil, md_dpv.tEqu)\n', (17255, 17281), True, 'import Controller as ctrl\n'), ((17311, 17345), 'tkinter.Label', 'tk.Label', (['w.fr_analise'], {'anchor': '"""w"""'}), "(w.fr_analise, anchor='w')\n", (17319, 17345), True, 'import tkinter as tk\n'), ((17452, 17481), 'Estilos.lbStyle', 'es.lbStyle', (['w.lb_currentRange'], {}), '(w.lb_currentRange)\n', (17462, 17481), True, 'import Estilos as es\n'), ((17568, 17594), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['w.fr_analise'], {}), '(w.fr_analise)\n', (17580, 17594), True, 'import tkinter.ttk as ttk\n'), ((17817, 17851), 'tkinter.Label', 'tk.Label', (['w.fr_analise'], {'anchor': '"""w"""'}), "(w.fr_analise, anchor='w')\n", (17825, 17851), True, 'import tkinter as tk\n'), ((17952, 17974), 'Estilos.lbStyle', 'es.lbStyle', (['w.lb_sRate'], {}), '(w.lb_sRate)\n', (17962, 17974), True, 'import Estilos as es\n'), ((18042, 18082), 'tkinter.Entry', 'tk.Entry', (['w.fr_analise'], {'state': '"""disabled"""'}), "(w.fr_analise, state='disabled')\n", (18050, 18082), True, 'import tkinter as tk\n'), ((18203, 18237), 'tkinter.Label', 'tk.Label', (['w.fr_analise'], {'anchor': '"""w"""'}), "(w.fr_analise, anchor='w')\n", (18211, 18237), True, 'import tkinter as tk\n'), ((18342, 18368), 'Estilos.lbStyle', 'es.lbStyle', (['w.lb_tEstimado'], {}), '(w.lb_tEstimado)\n', (18352, 18368), True, 'import Estilos as es\n'), ((18450, 18490), 'tkinter.Entry', 'tk.Entry', (['w.fr_analise'], {'state': '"""disabled"""'}), "(w.fr_analise, state='disabled')\n", (18458, 18490), True, 'import tkinter as tk\n'), ((18613, 18647), 'tkinter.Label', 'tk.Label', (['w.fr_analise'], {'anchor': '"""w"""'}), "(w.fr_analise, anchor='w')\n", (18621, 18647), True, 'import tkinter as tk\n'), ((18750, 18774), 'Estilos.lbStyle', 'es.lbStyle', (['w.lb_nPontos'], {}), '(w.lb_nPontos)\n', (18760, 18774), True, 'import Estilos as es\n'), ((18844, 18884), 'tkinter.Entry', 'tk.Entry', (['w.fr_analise'], {'state': '"""disabled"""'}), "(w.fr_analise, state='disabled')\n", (18852, 18884), True, 'import tkinter as tk\n'), ((19002, 19025), 'tkinter.Button', 'tk.Button', (['w.fr_analise'], {}), '(w.fr_analise)\n', (19011, 19025), True, 'import tkinter as tk\n'), ((19129, 19151), 'Estilos.btnStyle', 'es.btnStyle', (['w.btn_dpv'], {}), '(w.btn_dpv)\n', (19140, 19151), True, 'import Estilos as es\n'), ((19717, 19751), 'tkinter.Label', 'tk.Label', (['w.fr_analise'], {'anchor': '"""w"""'}), "(w.fr_analise, anchor='w')\n", (19725, 19751), True, 'import tkinter as tk\n'), ((19851, 19874), 'Estilos.lbStyle', 'es.lbStyle', (['w.lb_Param1'], {}), '(w.lb_Param1)\n', (19861, 19874), True, 'import Estilos as es\n'), ((19933, 20029), 'tkinter.Entry', 'tk.Entry', (['w.fr_analise'], {'validate': '"""key"""', 'validatecommand': "(vcmd, '%d', '%i', '%P', '%S', '%W')"}), "(w.fr_analise, validate='key', validatecommand=(vcmd, '%d', '%i',\n '%P', '%S', '%W'))\n", (19941, 20029), True, 'import tkinter as tk\n'), ((20124, 20147), 'Estilos.etStyle', 'es.etStyle', (['w.et_Param1'], {}), '(w.et_Param1)\n', (20134, 20147), True, 'import Estilos as es\n'), ((20188, 20237), 'Controller.validation.entryInsert', 'ctrl.validation.entryInsert', (['w.et_Param1', 'p1Value'], {}), '(w.et_Param1, p1Value)\n', (20215, 20237), True, 'import Controller as ctrl\n'), ((20262, 20296), 'tkinter.Label', 'tk.Label', (['w.fr_analise'], {'anchor': '"""w"""'}), "(w.fr_analise, anchor='w')\n", (20270, 20296), True, 'import tkinter as tk\n'), ((20396, 20419), 'Estilos.lbStyle', 'es.lbStyle', (['w.lb_Param2'], {}), '(w.lb_Param2)\n', (20406, 20419), True, 'import Estilos as es\n'), ((20478, 20574), 'tkinter.Entry', 'tk.Entry', (['w.fr_analise'], {'validate': '"""key"""', 'validatecommand': "(vcmd, '%d', '%i', '%P', '%S', '%W')"}), "(w.fr_analise, validate='key', validatecommand=(vcmd, '%d', '%i',\n '%P', '%S', '%W'))\n", (20486, 20574), True, 'import tkinter as tk\n'), ((20669, 20692), 'Estilos.etStyle', 'es.etStyle', (['w.et_Param2'], {}), '(w.et_Param2)\n', (20679, 20692), True, 'import Estilos as es\n'), ((20697, 20746), 'Controller.validation.entryInsert', 'ctrl.validation.entryInsert', (['w.et_Param2', 'p2Value'], {}), '(w.et_Param2, p2Value)\n', (20724, 20746), True, 'import Controller as ctrl\n'), ((20770, 20793), 'tkinter.Button', 'tk.Button', (['w.fr_analise'], {}), '(w.fr_analise)\n', (20779, 20793), True, 'import tkinter as tk\n'), ((20899, 20923), 'Estilos.btnStyle', 'es.btnStyle', (['w.btn_Apply'], {}), '(w.btn_Apply)\n', (20910, 20923), True, 'import Estilos as es\n'), ((3069, 3093), 'Controller.file.importCsv', 'ctrl.file.importCsv', (['imp'], {}), '(imp)\n', (3088, 3093), True, 'import Controller as ctrl\n'), ((3184, 3220), 'numpy.append', 'np.append', (['curvePlot', 'csv'], {'axis': 'None'}), '(curvePlot, csv, axis=None)\n', (3193, 3220), True, 'import numpy as np\n'), ((3315, 3352), 'numpy.append', 'np.append', (['curvesList', 'csv'], {'axis': 'None'}), '(curvesList, csv, axis=None)\n', (3324, 3352), True, 'import numpy as np\n'), ((3485, 3660), 'tkinter.filedialog.asksaveasfilename', 'filedialog.asksaveasfilename', ([], {'title': '"""Exportar CSV..."""', 'defaultextension': '"""csv"""', 'initialdir': '"""C:/"""', 'filetypes': "(('Comma-separeted values', '*.csv'), ('All files', '*.*'))"}), "(title='Exportar CSV...', defaultextension=\n 'csv', initialdir='C:/', filetypes=(('Comma-separeted values', '*.csv'),\n ('All files', '*.*')))\n", (3513, 3660), False, 'from tkinter import filedialog, colorchooser, IntVar\n'), ((4030, 4058), 'Controller.connection.disconnect', 'ctrl.connection.disconnect', ([], {}), '()\n', (4056, 4058), True, 'import Controller as ctrl\n'), ((4267, 4292), 'Controller.connection.connect', 'ctrl.connection.connect', ([], {}), '()\n', (4290, 4292), True, 'import Controller as ctrl\n'), ((6510, 6524), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (6520, 6524), True, 'import numpy as np\n'), ((6880, 6907), 'Controller.connection.closePort', 'ctrl.connection.closePort', ([], {}), '()\n', (6905, 6907), True, 'import Controller as ctrl\n'), ((7536, 7557), 'numpy.take', 'np.take', (['curvePlot', '(1)'], {}), '(curvePlot, 1)\n', (7543, 7557), True, 'import numpy as np\n'), ((8612, 8634), 'numpy.take', 'np.take', (['curvesList', 'i'], {}), '(curvesList, i)\n', (8619, 8634), True, 'import numpy as np\n'), ((8751, 8759), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (8757, 8759), False, 'from tkinter import filedialog, colorchooser, IntVar\n'), ((11768, 11788), 'numpy.take', 'np.take', (['cBoxList', 'i'], {}), '(cBoxList, i)\n', (11775, 11788), True, 'import numpy as np\n'), ((12448, 12468), 'numpy.take', 'np.take', (['cBoxList', 'i'], {}), '(cBoxList, i)\n', (12455, 12468), True, 'import numpy as np\n'), ((12481, 12503), 'numpy.take', 'np.take', (['curvesList', 'i'], {}), '(curvesList, i)\n', (12488, 12503), True, 'import numpy as np\n'), ((12516, 12537), 'numpy.take', 'np.take', (['curvePlot', '(1)'], {}), '(curvePlot, 1)\n', (12523, 12537), True, 'import numpy as np\n'), ((21216, 21237), 'numpy.take', 'np.take', (['curvePlot', '(1)'], {}), '(curvePlot, 1)\n', (21223, 21237), True, 'import numpy as np\n'), ((21250, 21285), 'Controller.operations.findPeak', 'ctrl.operations.findPeak', (['cv.curveY'], {}), '(cv.curveY)\n', (21274, 21285), True, 'import Controller as ctrl\n'), ((21851, 21872), 'numpy.take', 'np.take', (['curvePlot', '(1)'], {}), '(curvePlot, 1)\n', (21858, 21872), True, 'import numpy as np\n'), ((21924, 21984), 'Model.curve', 'md.curve', (["(c.curveName + '_' + callback.__name__)", 'c.curveX', 'y'], {}), "(c.curveName + '_' + callback.__name__, c.curveX, y)\n", (21932, 21984), True, 'import Model as md\n'), ((22061, 22075), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (22071, 22075), True, 'import numpy as np\n'), ((22096, 22131), 'numpy.append', 'np.append', (['curvePlot', 'c2'], {'axis': 'None'}), '(curvePlot, c2, axis=None)\n', (22105, 22131), True, 'import numpy as np\n'), ((22182, 22218), 'numpy.append', 'np.append', (['curvesList', 'c2'], {'axis': 'None'}), '(curvesList, c2, axis=None)\n', (22191, 22218), True, 'import numpy as np\n'), ((3695, 3716), 'numpy.take', 'np.take', (['curvePlot', '(1)'], {}), '(curvePlot, 1)\n', (3702, 3716), True, 'import numpy as np\n'), ((10703, 10735), 'numpy.take', 'np.take', (['cBoxList', '(thisIndex + 1)'], {}), '(cBoxList, thisIndex + 1)\n', (10710, 10735), True, 'import numpy as np\n'), ((12724, 12748), 'numpy.delete', 'np.delete', (['curvesList', 'i'], {}), '(curvesList, i)\n', (12733, 12748), True, 'import numpy as np\n'), ((12772, 12794), 'numpy.delete', 'np.delete', (['cBoxList', 'i'], {}), '(cBoxList, i)\n', (12781, 12794), True, 'import numpy as np\n'), ((6579, 6591), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6587, 6591), True, 'import numpy as np\n'), ((6593, 6605), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6601, 6605), True, 'import numpy as np\n'), ((7793, 7814), 'numpy.take', 'np.take', (['curvePlot', 'i'], {}), '(curvePlot, i)\n', (7800, 7814), True, 'import numpy as np\n'), ((11856, 11878), 'numpy.take', 'np.take', (['curvesList', 'i'], {}), '(curvesList, i)\n', (11863, 11878), True, 'import numpy as np\n'), ((11907, 11921), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (11917, 11921), True, 'import numpy as np\n'), ((11950, 11985), 'numpy.append', 'np.append', (['curvePlot', 'cv'], {'axis': 'None'}), '(curvePlot, cv, axis=None)\n', (11959, 11985), True, 'import numpy as np\n'), ((12052, 12074), 'numpy.take', 'np.take', (['curvesList', 'i'], {}), '(curvesList, i)\n', (12059, 12074), True, 'import numpy as np\n'), ((12103, 12138), 'numpy.append', 'np.append', (['curvePlot', 'cv'], {'axis': 'None'}), '(curvePlot, cv, axis=None)\n', (12112, 12138), True, 'import numpy as np\n'), ((6971, 6992), 'numpy.take', 'np.take', (['curvePlot', '(1)'], {}), '(curvePlot, 1)\n', (6978, 6992), True, 'import numpy as np\n')] |
from __future__ import division
from pyfmmlib import fmm_part
import numpy as np
import numpy.linalg as la
def test_fmm():
for dims in [2, 3]:
for kernel in [0, 5]:
sources = np.random.randn(4000, dims)
dipvec = np.random.randn(4000, dims)
fmm_part("pg", iprec=1, kernel=kernel, sources=sources,
dip_charge=1, dipvec=dipvec,
debug=True)
targ_def = (slice(-3, 3, 20j),)
targets = np.mgrid[targ_def*dims]
targets = targets.reshape(dims, -1)
fmm_part("PG", iprec=1, kernel=kernel,
sources=sources, mop_charge=1, target=targets.T,
debug=True)
def test_translations():
nterms = 15
zk = 3
rscale = 1
n = 40
# centered at the origin, extent [-.5,.5]
sources = np.random.uniform(size=(n, 2)) - 0.5
charges = np.random.uniform(size=n)
targets_center = np.array([10, 0])
targets = np.random.uniform(size=(n, 2)) - 0.5 + targets_center
from pyfmmlib import (h2dformmp, h2dmpmp_vec, h2dmploc_vec,
h2dlocloc_vec, h2dtaeval_vec, hpotgrad2dall_vec, h2dmpeval_vec)
ref_value, _, _ = hpotgrad2dall_vec(ifgrad=False, ifhess=False,
sources=sources.T, charge=charges,
targets=targets.T, zk=zk)
# {{{ multipole 1
mp1_center = np.array([0, 0])
ier, mp1 = h2dformmp(zk, rscale, sources.T, charges, mp1_center, nterms)
assert ier == 0
mp1_value, _, _ = h2dmpeval_vec(zk, rscale, mp1_center, mp1, ztarg=targets.T,
ifgrad=False, ifhess=False)
assert la.norm(mp1_value - ref_value) / la.norm(ref_value) < 1e-12
# }}}
# {{{ multipole 2
mp2_center = np.array([2, 0])
mp2 = h2dmpmp_vec(zk, rscale, mp1_center, mp1, rscale, mp2_center, nterms)
mp2_value, _, _ = h2dmpeval_vec(zk, rscale, mp2_center, mp2, ztarg=targets.T,
ifgrad=False, ifhess=False)
assert la.norm(mp2_value - ref_value) / la.norm(ref_value) < 3e-5
# }}}
# {{{ local 1
loc1_center = targets_center - np.array([1, 0])
loc1 = h2dmploc_vec(zk, rscale, mp2_center, mp2, rscale, loc1_center, nterms)
loc1_value, _, _ = h2dtaeval_vec(zk, rscale, loc1_center, loc1,
ztarg=targets.T, ifgrad=False, ifhess=False)
assert la.norm(loc1_value - ref_value) / la.norm(ref_value) < 3e-5
# }}}
# {{{ local 2
loc2_center = targets_center
loc2 = h2dlocloc_vec(zk, rscale, loc1_center, loc1, rscale, loc2_center, nterms)
loc2_value, _, _ = h2dtaeval_vec(zk, rscale, loc2_center, loc2, ztarg=targets.T,
ifgrad=False, ifhess=False)
assert la.norm(loc2_value - ref_value) / la.norm(ref_value) < 1e-4
# }}}
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from pytest import main
main([__file__])
# vim: fdm=marker
| [
"pyfmmlib.hpotgrad2dall_vec",
"pyfmmlib.h2dmpmp_vec",
"pyfmmlib.h2dlocloc_vec",
"pyfmmlib.h2dmploc_vec",
"numpy.linalg.norm",
"pytest.main",
"numpy.array",
"pyfmmlib.h2dtaeval_vec",
"pyfmmlib.h2dmpeval_vec",
"numpy.random.uniform",
"pyfmmlib.h2dformmp",
"numpy.random.randn",
"pyfmmlib.fmm_pa... | [((912, 937), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'n'}), '(size=n)\n', (929, 937), True, 'import numpy as np\n'), ((960, 977), 'numpy.array', 'np.array', (['[10, 0]'], {}), '([10, 0])\n', (968, 977), True, 'import numpy as np\n'), ((1210, 1321), 'pyfmmlib.hpotgrad2dall_vec', 'hpotgrad2dall_vec', ([], {'ifgrad': '(False)', 'ifhess': '(False)', 'sources': 'sources.T', 'charge': 'charges', 'targets': 'targets.T', 'zk': 'zk'}), '(ifgrad=False, ifhess=False, sources=sources.T, charge=\n charges, targets=targets.T, zk=zk)\n', (1227, 1321), False, 'from pyfmmlib import h2dformmp, h2dmpmp_vec, h2dmploc_vec, h2dlocloc_vec, h2dtaeval_vec, hpotgrad2dall_vec, h2dmpeval_vec\n'), ((1382, 1398), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1390, 1398), True, 'import numpy as np\n'), ((1414, 1475), 'pyfmmlib.h2dformmp', 'h2dformmp', (['zk', 'rscale', 'sources.T', 'charges', 'mp1_center', 'nterms'], {}), '(zk, rscale, sources.T, charges, mp1_center, nterms)\n', (1423, 1475), False, 'from pyfmmlib import h2dformmp, h2dmpmp_vec, h2dmploc_vec, h2dlocloc_vec, h2dtaeval_vec, hpotgrad2dall_vec, h2dmpeval_vec\n'), ((1519, 1610), 'pyfmmlib.h2dmpeval_vec', 'h2dmpeval_vec', (['zk', 'rscale', 'mp1_center', 'mp1'], {'ztarg': 'targets.T', 'ifgrad': '(False)', 'ifhess': '(False)'}), '(zk, rscale, mp1_center, mp1, ztarg=targets.T, ifgrad=False,\n ifhess=False)\n', (1532, 1610), False, 'from pyfmmlib import h2dformmp, h2dmpmp_vec, h2dmploc_vec, h2dlocloc_vec, h2dtaeval_vec, hpotgrad2dall_vec, h2dmpeval_vec\n'), ((1743, 1759), 'numpy.array', 'np.array', (['[2, 0]'], {}), '([2, 0])\n', (1751, 1759), True, 'import numpy as np\n'), ((1770, 1838), 'pyfmmlib.h2dmpmp_vec', 'h2dmpmp_vec', (['zk', 'rscale', 'mp1_center', 'mp1', 'rscale', 'mp2_center', 'nterms'], {}), '(zk, rscale, mp1_center, mp1, rscale, mp2_center, nterms)\n', (1781, 1838), False, 'from pyfmmlib import h2dformmp, h2dmpmp_vec, h2dmploc_vec, h2dlocloc_vec, h2dtaeval_vec, hpotgrad2dall_vec, h2dmpeval_vec\n'), ((1862, 1953), 'pyfmmlib.h2dmpeval_vec', 'h2dmpeval_vec', (['zk', 'rscale', 'mp2_center', 'mp2'], {'ztarg': 'targets.T', 'ifgrad': '(False)', 'ifhess': '(False)'}), '(zk, rscale, mp2_center, mp2, ztarg=targets.T, ifgrad=False,\n ifhess=False)\n', (1875, 1953), False, 'from pyfmmlib import h2dformmp, h2dmpmp_vec, h2dmploc_vec, h2dlocloc_vec, h2dtaeval_vec, hpotgrad2dall_vec, h2dmpeval_vec\n'), ((2127, 2197), 'pyfmmlib.h2dmploc_vec', 'h2dmploc_vec', (['zk', 'rscale', 'mp2_center', 'mp2', 'rscale', 'loc1_center', 'nterms'], {}), '(zk, rscale, mp2_center, mp2, rscale, loc1_center, nterms)\n', (2139, 2197), False, 'from pyfmmlib import h2dformmp, h2dmpmp_vec, h2dmploc_vec, h2dlocloc_vec, h2dtaeval_vec, hpotgrad2dall_vec, h2dmpeval_vec\n'), ((2222, 2315), 'pyfmmlib.h2dtaeval_vec', 'h2dtaeval_vec', (['zk', 'rscale', 'loc1_center', 'loc1'], {'ztarg': 'targets.T', 'ifgrad': '(False)', 'ifhess': '(False)'}), '(zk, rscale, loc1_center, loc1, ztarg=targets.T, ifgrad=False,\n ifhess=False)\n', (2235, 2315), False, 'from pyfmmlib import h2dformmp, h2dmpmp_vec, h2dmploc_vec, h2dlocloc_vec, h2dtaeval_vec, hpotgrad2dall_vec, h2dmpeval_vec\n'), ((2471, 2544), 'pyfmmlib.h2dlocloc_vec', 'h2dlocloc_vec', (['zk', 'rscale', 'loc1_center', 'loc1', 'rscale', 'loc2_center', 'nterms'], {}), '(zk, rscale, loc1_center, loc1, rscale, loc2_center, nterms)\n', (2484, 2544), False, 'from pyfmmlib import h2dformmp, h2dmpmp_vec, h2dmploc_vec, h2dlocloc_vec, h2dtaeval_vec, hpotgrad2dall_vec, h2dmpeval_vec\n'), ((2569, 2662), 'pyfmmlib.h2dtaeval_vec', 'h2dtaeval_vec', (['zk', 'rscale', 'loc2_center', 'loc2'], {'ztarg': 'targets.T', 'ifgrad': '(False)', 'ifhess': '(False)'}), '(zk, rscale, loc2_center, loc2, ztarg=targets.T, ifgrad=False,\n ifhess=False)\n', (2582, 2662), False, 'from pyfmmlib import h2dformmp, h2dmpmp_vec, h2dmploc_vec, h2dlocloc_vec, h2dtaeval_vec, hpotgrad2dall_vec, h2dmpeval_vec\n'), ((861, 891), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n, 2)'}), '(size=(n, 2))\n', (878, 891), True, 'import numpy as np\n'), ((2099, 2115), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2107, 2115), True, 'import numpy as np\n'), ((2900, 2916), 'pytest.main', 'main', (['[__file__]'], {}), '([__file__])\n', (2904, 2916), False, 'from pytest import main\n'), ((202, 229), 'numpy.random.randn', 'np.random.randn', (['(4000)', 'dims'], {}), '(4000, dims)\n', (217, 229), True, 'import numpy as np\n'), ((251, 278), 'numpy.random.randn', 'np.random.randn', (['(4000)', 'dims'], {}), '(4000, dims)\n', (266, 278), True, 'import numpy as np\n'), ((291, 391), 'pyfmmlib.fmm_part', 'fmm_part', (['"""pg"""'], {'iprec': '(1)', 'kernel': 'kernel', 'sources': 'sources', 'dip_charge': '(1)', 'dipvec': 'dipvec', 'debug': '(True)'}), "('pg', iprec=1, kernel=kernel, sources=sources, dip_charge=1,\n dipvec=dipvec, debug=True)\n", (299, 391), False, 'from pyfmmlib import fmm_part\n'), ((580, 683), 'pyfmmlib.fmm_part', 'fmm_part', (['"""PG"""'], {'iprec': '(1)', 'kernel': 'kernel', 'sources': 'sources', 'mop_charge': '(1)', 'target': 'targets.T', 'debug': '(True)'}), "('PG', iprec=1, kernel=kernel, sources=sources, mop_charge=1,\n target=targets.T, debug=True)\n", (588, 683), False, 'from pyfmmlib import fmm_part\n'), ((992, 1022), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n, 2)'}), '(size=(n, 2))\n', (1009, 1022), True, 'import numpy as np\n'), ((1631, 1661), 'numpy.linalg.norm', 'la.norm', (['(mp1_value - ref_value)'], {}), '(mp1_value - ref_value)\n', (1638, 1661), True, 'import numpy.linalg as la\n'), ((1664, 1682), 'numpy.linalg.norm', 'la.norm', (['ref_value'], {}), '(ref_value)\n', (1671, 1682), True, 'import numpy.linalg as la\n'), ((1974, 2004), 'numpy.linalg.norm', 'la.norm', (['(mp2_value - ref_value)'], {}), '(mp2_value - ref_value)\n', (1981, 2004), True, 'import numpy.linalg as la\n'), ((2007, 2025), 'numpy.linalg.norm', 'la.norm', (['ref_value'], {}), '(ref_value)\n', (2014, 2025), True, 'import numpy.linalg as la\n'), ((2336, 2367), 'numpy.linalg.norm', 'la.norm', (['(loc1_value - ref_value)'], {}), '(loc1_value - ref_value)\n', (2343, 2367), True, 'import numpy.linalg as la\n'), ((2370, 2388), 'numpy.linalg.norm', 'la.norm', (['ref_value'], {}), '(ref_value)\n', (2377, 2388), True, 'import numpy.linalg as la\n'), ((2683, 2714), 'numpy.linalg.norm', 'la.norm', (['(loc2_value - ref_value)'], {}), '(loc2_value - ref_value)\n', (2690, 2714), True, 'import numpy.linalg as la\n'), ((2717, 2735), 'numpy.linalg.norm', 'la.norm', (['ref_value'], {}), '(ref_value)\n', (2724, 2735), True, 'import numpy.linalg as la\n')] |
from equadratures import *
import numpy as np
from scipy.optimize import minimize
try:
import pymanopt
manopt = True
except ImportError as e:
manopt = False
if manopt:
from pymanopt.manifolds import Stiefel
from pymanopt import Problem
from pymanopt.solvers import ConjugateGradient
class LogisticPoly(object):
"""
Class for defining a logistic subspace polynomial, used for classification tasks.
Parameters
----------
n : optional, int
Dimension of subspace (should be smaller than ambient input dimension d). Defaults to 2.
M_init : optional, numpy array of dimensions (d, n)
Initial guess for subspace matrix. Defaults to a random projection.
tol : optional, float
Optimisation terminates when cost function on training data falls below ``tol``. Defaults to 1e-7.
cauchy_tol : optional, float
Optimisation terminates when the difference between the average of the last ``cauchy_length`` cost function evaluations and the current cost is below ``cauchy_tol`` times the current evaluation. Defaults to 1e-5.
cauchy_length : optional, int
Length of comparison history for Cauchy convergence. Defaults to 3.
verbosity : optional, one of (0, 1, 2)
Print debug messages during optimisation. 0 for no messages, 1 for printing final residual every restart, 2 for printing residuals at every iteration. Defaults to 0.
order : optional, int
Maximum order of subspace polynomial used. Defaults to 2.
C : optional, float
L2 penalty on coefficients. Defaults to 1.0.
max_M_iters : optional, int
Maximum optimisation iterations per restart. Defaults to 10.
restarts : optional, int
Number of times to restart optimisation. The result with lowest training error is taken at the end. Defaults to 1.
Examples
--------
Fitting and testing a logistic polynomial on a dataset.
>>> log_quad = eq.LogisticPoly(n=1, cauchy_tol=1e-5, verbosity=0, order=p_order, max_M_iters=100, C=0.001)
>>> log_quad.fit(X_train, y_train)
>>> prediction = log_quad.predict(X_test)
>>> error_rate = np.sum(np.abs(np.round(prediction) - y_test)) / y_test.shape[0]
>>> print(error_rate)
"""
def __init__(self, n=2, M_init=None, tol=1e-7, cauchy_tol=1e-5,
cauchy_length=3, verbosity=2, order=2, C=1.0, max_M_iters=10, restarts=1):
if not manopt:
raise ModuleNotFoundError('pymanopt is required for logistic_poly module.')
self.n = n
self.tol = tol
self.cauchy_tol = cauchy_tol
self.verbosity = verbosity
self.cauchy_length = cauchy_length
self.C = C
self.max_M_iters = max_M_iters
self.restarts = restarts
self.order = order
self.M_init = M_init
self.fitted = False
@staticmethod
def _sigmoid(U):
return 1.0 / (1.0 + np.exp(-U))
def _p(self, X, M, c):
self.dummy_poly.coefficients = c
return self.dummy_poly.get_polyfit(X @ M).reshape(-1)
def _phi(self, X, M, c):
pW = self._p(X, M, c)
return self._sigmoid(pW)
def _cost(self, f, X, M, c):
this_phi = self._phi(X, M, c)
return -np.sum(f * np.log(this_phi + 1e-15) + (1.0 - f) * np.log(1 - this_phi + 1e-15)) \
+ 0.5 * self.C * np.linalg.norm(c)**2
def _dcostdc(self, f, X, M, c):
W = X @ M
self.dummy_poly.coefficients = c
V = self.dummy_poly.get_poly(W)
# U = self.dummy_poly.get_polyfit(W).reshape(-1)
diff = f - self._phi(X, M, c)
return -np.dot(V, diff) + self.C * c
def _dcostdM(self, f, X, M, c):
self.dummy_poly.coefficients = c
W = X @ M
# U = self.dummy_poly.get_polyfit(W).reshape(-1)
J = np.array(self.dummy_poly.get_poly_grad(W))
if len(J.shape) == 2:
J = J[np.newaxis,:,:]
diff = f - self._phi(X, M, c)
Jt = J.transpose((2,0,1))
XJ = X[:, :, np.newaxis] * np.dot(Jt[:, np.newaxis, :, :], c)
result = -np.dot(XJ.transpose((1,2,0)), diff)
return result
def fit(self, X_train, f_train):
""" Method to fit logistic polynomial.
Parameters
----------
X_train : numpy array, shape (N, d)
Training input points.
f_train : numpy array, shape (N)
Training output targets.
"""
f = f_train
X = X_train
tol = self.tol
d = X_train.shape[1]
n = self.n
current_best_residual = np.inf
my_params = [Parameter(order=self.order, distribution='uniform',
lower=-np.sqrt(d), upper=np.sqrt(d)) for _ in range(n)]
my_basis = Basis('total-order')
self.dummy_poly = Poly(parameters=my_params, basis=my_basis, method='least-squares')
for r in range(self.restarts):
if self.M_init is None:
M0 = np.linalg.qr(np.random.randn(d, self.n))[0]
else:
M0 = self.M_init.copy()
my_poly_init = Poly(parameters=my_params, basis=my_basis, method='least-squares',
sampling_args={'mesh': 'user-defined',
'sample-points': X @ M0,
'sample-outputs': f})
my_poly_init.set_model()
c0 = my_poly_init.coefficients.copy()
residual = self._cost(f, X, M0, c0)
cauchy_length = self.cauchy_length
residual_history = []
iter_ind = 0
M = M0.copy()
c = c0.copy()
while residual > tol:
if self.verbosity == 2:
print('residual = %f' % residual)
residual_history.append(residual)
# Minimize over M
func_M = lambda M_var: self._cost(f, X, M_var, c)
grad_M = lambda M_var: self._dcostdM(f, X, M_var, c)
manifold = Stiefel(d, n)
solver = ConjugateGradient(maxiter=self.max_M_iters)
problem = Problem(manifold=manifold, cost=func_M, egrad=grad_M, verbosity=0)
M = solver.solve(problem, x=M)
# Minimize over c
func_c = lambda c_var: self._cost(f, X, M, c_var)
grad_c = lambda c_var: self._dcostdc(f, X, M, c_var)
res = minimize(func_c, x0=c, method='CG', jac=grad_c)
c = res.x
residual = self._cost(f, X, M, c)
if iter_ind < cauchy_length:
iter_ind += 1
elif np.abs(np.mean(residual_history[-cauchy_length:]) - residual)/residual < self.cauchy_tol:
break
if self.verbosity > 0:
print('Final residual on training data: %f' % self._cost(f, X, M, c))
if residual < current_best_residual:
self.M = M
self.c = c
current_best_residual = residual
self.fitted = True
def predict(self, X):
""" Method to predict from input test points.
Parameters
----------
X : numpy array, shape (N, d)
Test input points.
Returns
----------
numpy array, shape (N)
Predictions at specified test points.
"""
if not self.fitted:
raise ValueError('Call fit() to fit logistic polynomial first.')
return self._phi(X, self.M, self.c)
| [
"numpy.mean",
"numpy.sqrt",
"pymanopt.Problem",
"scipy.optimize.minimize",
"numpy.log",
"pymanopt.manifolds.Stiefel",
"numpy.exp",
"numpy.dot",
"numpy.linalg.norm",
"numpy.random.randn",
"pymanopt.solvers.ConjugateGradient"
] | [((4069, 4103), 'numpy.dot', 'np.dot', (['Jt[:, np.newaxis, :, :]', 'c'], {}), '(Jt[:, np.newaxis, :, :], c)\n', (4075, 4103), True, 'import numpy as np\n'), ((2950, 2960), 'numpy.exp', 'np.exp', (['(-U)'], {}), '(-U)\n', (2956, 2960), True, 'import numpy as np\n'), ((3658, 3673), 'numpy.dot', 'np.dot', (['V', 'diff'], {}), '(V, diff)\n', (3664, 3673), True, 'import numpy as np\n'), ((6096, 6109), 'pymanopt.manifolds.Stiefel', 'Stiefel', (['d', 'n'], {}), '(d, n)\n', (6103, 6109), False, 'from pymanopt.manifolds import Stiefel\n'), ((6135, 6178), 'pymanopt.solvers.ConjugateGradient', 'ConjugateGradient', ([], {'maxiter': 'self.max_M_iters'}), '(maxiter=self.max_M_iters)\n', (6152, 6178), False, 'from pymanopt.solvers import ConjugateGradient\n'), ((6206, 6272), 'pymanopt.Problem', 'Problem', ([], {'manifold': 'manifold', 'cost': 'func_M', 'egrad': 'grad_M', 'verbosity': '(0)'}), '(manifold=manifold, cost=func_M, egrad=grad_M, verbosity=0)\n', (6213, 6272), False, 'from pymanopt import Problem\n'), ((6514, 6561), 'scipy.optimize.minimize', 'minimize', (['func_c'], {'x0': 'c', 'method': '"""CG"""', 'jac': 'grad_c'}), "(func_c, x0=c, method='CG', jac=grad_c)\n", (6522, 6561), False, 'from scipy.optimize import minimize\n'), ((3388, 3405), 'numpy.linalg.norm', 'np.linalg.norm', (['c'], {}), '(c)\n', (3402, 3405), True, 'import numpy as np\n'), ((4755, 4765), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (4762, 4765), True, 'import numpy as np\n'), ((4737, 4747), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (4744, 4747), True, 'import numpy as np\n'), ((5030, 5056), 'numpy.random.randn', 'np.random.randn', (['d', 'self.n'], {}), '(d, self.n)\n', (5045, 5056), True, 'import numpy as np\n'), ((3285, 3309), 'numpy.log', 'np.log', (['(this_phi + 1e-15)'], {}), '(this_phi + 1e-15)\n', (3291, 3309), True, 'import numpy as np\n'), ((3324, 3352), 'numpy.log', 'np.log', (['(1 - this_phi + 1e-15)'], {}), '(1 - this_phi + 1e-15)\n', (3330, 3352), True, 'import numpy as np\n'), ((6745, 6787), 'numpy.mean', 'np.mean', (['residual_history[-cauchy_length:]'], {}), '(residual_history[-cauchy_length:])\n', (6752, 6787), True, 'import numpy as np\n')] |
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import networkx as nx
import numpy as np
from suspect.convexity.rules import QuadraticRule
from suspect.expression import ExpressionType
from suspect.fbbt.propagation.rules import (
QuadraticRule as QuadraticBoundPropagationRule
)
from galini.core import (
QuadraticExpression,
LinearExpression,
SumExpression,
Constraint,
Variable,
Domain,
ExpressionReference,
BilinearTermReference,
)
from galini.expression_relaxation.bilinear import (
McCormickExpressionRelaxation,
)
from galini.expression_relaxation.expression_relaxation import ExpressionRelaxation, \
ExpressionRelaxationResult, RelaxationSide
DISAGGREGATE_VAR_AUX_META = 'disaggregate_var_aux_meta'
class DisaggregateBilinearExpressionRelaxation(ExpressionRelaxation):
def __init__(self):
super().__init__()
self._call_count = 0
self._quadratic_rule = QuadraticRule()
self._quadratic_bound_propagation_rule = \
QuadraticBoundPropagationRule()
self._bilinear_underestimator = McCormickExpressionRelaxation(linear=True)
def can_relax(self, problem, expr, ctx):
return expr.expression_type == ExpressionType.Quadratic
def relax(self, problem, expr, ctx, **kwargs):
assert expr.expression_type == ExpressionType.Quadratic
side = kwargs.pop('side')
term_graph = nx.Graph()
term_graph.add_nodes_from(ch.idx for ch in expr.children)
term_graph.add_edges_from(
(t.var1.idx, t.var2.idx, {'coefficient': t.coefficient})
for t in expr.terms
)
# Check convexity of each connected subgraph
convex_exprs = []
nonconvex_exprs = []
for connected_component in nx.connected_components(term_graph):
connected_graph = term_graph.subgraph(connected_component)
vars1 = []
vars2 = []
coefs = []
for (idx1, idx2) in connected_graph.edges:
coef = connected_graph.edges[idx1, idx2]['coefficient']
v1 = problem.variable(idx1)
v2 = problem.variable(idx2)
vars1.append(v1)
vars2.append(v2)
coefs.append(coef)
quadratic_expr = QuadraticExpression(vars1, vars2, coefs)
cvx = self._quadratic_rule.apply(
quadratic_expr, ctx.convexity, ctx.monotonicity, ctx.bounds
)
if cvx.is_convex() and side == RelaxationSide.UNDER:
convex_exprs.append(quadratic_expr)
elif cvx.is_convex() and side == RelaxationSide.BOTH:
convex_exprs.append(quadratic_expr)
elif cvx.is_concave() and side == RelaxationSide.OVER:
convex_exprs.append(quadratic_expr)
else:
nonconvex_exprs.append(quadratic_expr)
aux_vars = []
aux_coefs = []
constraints = []
if DISAGGREGATE_VAR_AUX_META not in ctx.metadata:
ctx.metadata[DISAGGREGATE_VAR_AUX_META] = dict()
bilinear_aux = ctx.metadata[DISAGGREGATE_VAR_AUX_META]
for quadratic_expr in convex_exprs:
if len(quadratic_expr.terms) == 1:
term = quadratic_expr.terms[0]
xy_idx = (term.var1.idx, term.var2.idx)
aux_w = bilinear_aux.get(xy_idx, None)
if aux_w is not None:
aux_vars.append(aux_w)
aux_coefs.append(term.coefficient)
continue
quadratic_expr_bounds = \
self._quadratic_bound_propagation_rule.apply(
quadratic_expr, ctx.bounds
)
aux_w = Variable(
'_aux_{}'.format(self._call_count),
quadratic_expr_bounds.lower_bound,
quadratic_expr_bounds.upper_bound,
Domain.REAL,
)
if len(quadratic_expr.terms) == 1:
term = quadratic_expr.terms[0]
xy_idx = (term.var1.idx, term.var2.idx)
bilinear_aux[xy_idx] = aux_w
aux_w.reference = ExpressionReference(quadratic_expr)
aux_vars.append(aux_w)
aux_coefs.append(1.0)
if side == RelaxationSide.UNDER:
lower_bound = None
upper_bound = 0.0
elif side == RelaxationSide.OVER:
lower_bound = 0.0
upper_bound = None
else:
lower_bound = upper_bound = 0.0
lower_bound = upper_bound = 0.0
constraint = Constraint(
'_disaggregate_aux_{}'.format(self._call_count),
SumExpression([
LinearExpression([aux_w], [-1.0], 0.0),
quadratic_expr,
]),
lower_bound,
upper_bound,
)
constraint.metadata['original_side'] = side
constraints.append(constraint)
self._call_count += 1
nonconvex_quadratic_expr = QuadraticExpression(nonconvex_exprs)
nonconvex_quadratic_under = \
self._bilinear_underestimator.relax(
problem, nonconvex_quadratic_expr, ctx, **kwargs
)
assert nonconvex_quadratic_under is not None
aux_vars_expr = LinearExpression(
aux_vars,
np.ones_like(aux_vars),
0.0,
)
new_expr = LinearExpression(
[aux_vars_expr, nonconvex_quadratic_under.expression]
)
constraints.extend(nonconvex_quadratic_under.constraints)
return ExpressionRelaxationResult(new_expr, constraints)
| [
"suspect.fbbt.propagation.rules.QuadraticRule",
"numpy.ones_like",
"galini.core.QuadraticExpression",
"suspect.convexity.rules.QuadraticRule",
"galini.expression_relaxation.bilinear.McCormickExpressionRelaxation",
"networkx.Graph",
"networkx.connected_components",
"galini.core.LinearExpression",
"ga... | [((1458, 1473), 'suspect.convexity.rules.QuadraticRule', 'QuadraticRule', ([], {}), '()\n', (1471, 1473), False, 'from suspect.convexity.rules import QuadraticRule\n'), ((1537, 1568), 'suspect.fbbt.propagation.rules.QuadraticRule', 'QuadraticBoundPropagationRule', ([], {}), '()\n', (1566, 1568), True, 'from suspect.fbbt.propagation.rules import QuadraticRule as QuadraticBoundPropagationRule\n'), ((1609, 1651), 'galini.expression_relaxation.bilinear.McCormickExpressionRelaxation', 'McCormickExpressionRelaxation', ([], {'linear': '(True)'}), '(linear=True)\n', (1638, 1651), False, 'from galini.expression_relaxation.bilinear import McCormickExpressionRelaxation\n'), ((1935, 1945), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1943, 1945), True, 'import networkx as nx\n'), ((2302, 2337), 'networkx.connected_components', 'nx.connected_components', (['term_graph'], {}), '(term_graph)\n', (2325, 2337), True, 'import networkx as nx\n'), ((5654, 5690), 'galini.core.QuadraticExpression', 'QuadraticExpression', (['nonconvex_exprs'], {}), '(nonconvex_exprs)\n', (5673, 5690), False, 'from galini.core import QuadraticExpression, LinearExpression, SumExpression, Constraint, Variable, Domain, ExpressionReference, BilinearTermReference\n'), ((6058, 6129), 'galini.core.LinearExpression', 'LinearExpression', (['[aux_vars_expr, nonconvex_quadratic_under.expression]'], {}), '([aux_vars_expr, nonconvex_quadratic_under.expression])\n', (6074, 6129), False, 'from galini.core import QuadraticExpression, LinearExpression, SumExpression, Constraint, Variable, Domain, ExpressionReference, BilinearTermReference\n'), ((6235, 6284), 'galini.expression_relaxation.expression_relaxation.ExpressionRelaxationResult', 'ExpressionRelaxationResult', (['new_expr', 'constraints'], {}), '(new_expr, constraints)\n', (6261, 6284), False, 'from galini.expression_relaxation.expression_relaxation import ExpressionRelaxation, ExpressionRelaxationResult, RelaxationSide\n'), ((2824, 2864), 'galini.core.QuadraticExpression', 'QuadraticExpression', (['vars1', 'vars2', 'coefs'], {}), '(vars1, vars2, coefs)\n', (2843, 2864), False, 'from galini.core import QuadraticExpression, LinearExpression, SumExpression, Constraint, Variable, Domain, ExpressionReference, BilinearTermReference\n'), ((4716, 4751), 'galini.core.ExpressionReference', 'ExpressionReference', (['quadratic_expr'], {}), '(quadratic_expr)\n', (4735, 4751), False, 'from galini.core import QuadraticExpression, LinearExpression, SumExpression, Constraint, Variable, Domain, ExpressionReference, BilinearTermReference\n'), ((5987, 6009), 'numpy.ones_like', 'np.ones_like', (['aux_vars'], {}), '(aux_vars)\n', (5999, 6009), True, 'import numpy as np\n'), ((5317, 5355), 'galini.core.LinearExpression', 'LinearExpression', (['[aux_w]', '[-1.0]', '(0.0)'], {}), '([aux_w], [-1.0], 0.0)\n', (5333, 5355), False, 'from galini.core import QuadraticExpression, LinearExpression, SumExpression, Constraint, Variable, Domain, ExpressionReference, BilinearTermReference\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.